diff --git a/.github/actions/run-faucet-tests/action.yaml b/.github/actions/run-faucet-tests/action.yaml index 22fc9134978ae..f416024073266 100644 --- a/.github/actions/run-faucet-tests/action.yaml +++ b/.github/actions/run-faucet-tests/action.yaml @@ -3,7 +3,10 @@ description: | Runs the tests for the Aptos faucet against a local testnet built from a particular release branch inputs: NETWORK: - description: "The network branch for running the local testnet: devnet or testnet." + description: "The network branch for running the local testnet: devnet, testnet, or custom for a specific tag." + required: true + IMAGE_TAG: + description: "The docker image tag to use for the local testnet if NETWORK=custom." required: true GCP_DOCKER_ARTIFACT_REPO: description: "The GCP Docker artifact repository." @@ -35,7 +38,7 @@ runs: # testnet, moving the mint key where the tests expect it to be, and running the # integration tests. - name: Run integration tests - run: poetry run python main.py --base-network ${{ inputs.NETWORK }} --external-test-dir ${{ runner.temp }}/testnet --image-repo-with-project ${{ inputs.GCP_DOCKER_ARTIFACT_REPO }} + run: poetry run python main.py --base-network ${{ inputs.NETWORK }} --tag ${{ inputs.IMAGE_TAG }} --external-test-dir ${{ runner.temp }}/testnet --image-repo-with-project ${{ inputs.GCP_DOCKER_ARTIFACT_REPO }} working-directory: crates/aptos-faucet/integration-tests shell: bash diff --git a/.github/actions/rust-setup/action.yaml b/.github/actions/rust-setup/action.yaml index 29b215ccffaf9..0cfd56b94999e 100644 --- a/.github/actions/rust-setup/action.yaml +++ b/.github/actions/rust-setup/action.yaml @@ -9,10 +9,7 @@ runs: - run: sudo apt-get update && sudo apt-get install build-essential ca-certificates clang curl git libpq-dev libssl-dev pkg-config lsof lld --no-install-recommends --assume-yes shell: bash - - uses: actions-rs/toolchain@16499b5e05bf2e26879000db0c1d13f7e13fa3af # pin@v1 - with: - override: true - components: rustfmt, clippy + - uses: dsherret/rust-toolchain-file@v1 # rust-cache action will cache ~/.cargo and ./target # https://github.com/Swatinem/rust-cache#cache-details @@ -32,3 +29,8 @@ runs: run: | git config --global credential.helper store echo "${{ inputs.GIT_CREDENTIALS }}" > ~/.git-credentials + + # Display the rust toolchain version being installed + - name: Setup rust toolchain + shell: bash + run: rustup show diff --git a/.github/dependabot.yml b/.github/dependabot.yml new file mode 100644 index 0000000000000..e65d190b5399b --- /dev/null +++ b/.github/dependabot.yml @@ -0,0 +1,13 @@ +version: 2 +updates: + - package-ecosystem: "cargo" + directory: "/" + schedule: + interval: "daily" + reviewers: + - "aptos-labs/security" + - "aptos-labs/prod-eng" + labels: + - "CICD:run-e2e-tests" + - "dependencies" + - "rust" diff --git a/.github/workflows/adhoc-forge.yaml b/.github/workflows/adhoc-forge.yaml index b1541de19a65a..3a3b0bd184000 100644 --- a/.github/workflows/adhoc-forge.yaml +++ b/.github/workflows/adhoc-forge.yaml @@ -2,14 +2,18 @@ name: "Ad-hoc Forge Run" on: workflow_dispatch: inputs: - IMAGE_TAG: + GIT_SHA: required: true type: string - description: The docker image tag to test. If not specified, falls back on GIT_SHA, and then to the latest commits on the current branch + description: The git SHA1 to checkout and test + IMAGE_TAG: + required: false + type: string + description: The docker image tag to test. If not specified, falls back on GIT_SHA FORGE_IMAGE_TAG: - required: true + required: false type: string - description: The docker image tag to use for forge runner. If not specified, falls back on GIT_SHA, and then to the latest commits on the current branch + description: The docker image tag to use for forge runner. If not specified, falls back on GIT_SHA FORGE_RUNNER_DURATION_SECS: required: false type: string @@ -25,9 +29,6 @@ on: type: string description: The Forge k8s cluster to be used for test -env: - GIT_SHA: ${{ github.sha }} - permissions: contents: read id-token: write #required for GCP Workload Identity federation which we use to login into Google Artifact Registry @@ -40,14 +41,14 @@ jobs: steps: - name: collect metadata run: | - echo "GIT_SHA: ${{ env.GIT_SHA }}" + echo "GIT_SHA: ${{ inputs.GIT_SHA }}" echo "IMAGE_TAG: ${{ inputs.IMAGE_TAG }}" echo "FORGE_IMAGE_TAG: ${{ inputs.FORGE_IMAGE_TAG }}" echo "FORGE_RUNNER_DURATION_SECS: ${{ inputs.FORGE_RUNNER_DURATION_SECS }}" echo "FORGE_TEST_SUITE: ${{ inputs.FORGE_TEST_SUITE }}" echo "FORGE_CLUSTER_NAME: ${{ inputs.FORGE_CLUSTER_NAME }}" outputs: - gitSha: ${{ env.GIT_SHA }} + gitSha: ${{ inputs.GIT_SHA }} imageTag: ${{ inputs.IMAGE_TAG }} forgeImageTag: ${{ inputs.FORGE_IMAGE_TAG }} forgeRunnerDurationSecs: ${{ inputs.FORGE_RUNNER_DURATION_SECS }} diff --git a/.github/workflows/cargo-metadata-upload.yaml b/.github/workflows/cargo-metadata-upload.yaml index 80af1a940a4c2..7013fdf3e5a35 100644 --- a/.github/workflows/cargo-metadata-upload.yaml +++ b/.github/workflows/cargo-metadata-upload.yaml @@ -14,7 +14,7 @@ jobs: runs-on: ubuntu-latest steps: - uses: actions/checkout@v3 - - uses: actions-rs/toolchain@16499b5e05bf2e26879000db0c1d13f7e13fa3af # pin@v1 + - uses: dsherret/rust-toolchain-file@v1 - id: auth uses: "google-github-actions/auth@v1" with: diff --git a/.github/workflows/docker-build-test.yaml b/.github/workflows/docker-build-test.yaml index 0d5b4407d9c73..7bcbdabc4bc4e 100644 --- a/.github/workflows/docker-build-test.yaml +++ b/.github/workflows/docker-build-test.yaml @@ -229,6 +229,22 @@ jobs: GIT_SHA: ${{ needs.determine-docker-build-metadata.outputs.gitSha }} SKIP_JOB: ${{ needs.file_change_determinator.outputs.only_docs_changed == 'true' }} + faucet-tests-main: + needs: [permission-check, rust-images, determine-docker-build-metadata, file_change_determinator] # runs with the default release docker build variant "rust-images" + if: | + ( + github.event_name == 'push' || + github.event_name == 'workflow_dispatch' || + contains(github.event.pull_request.labels.*.name, 'CICD:run-e2e-tests') || + github.event.pull_request.auto_merge != null) || + contains(github.event.pull_request.body, '#e2e' + ) + uses: ./.github/workflows/faucet-tests-main.yaml + secrets: inherit + with: + GIT_SHA: ${{ needs.determine-docker-build-metadata.outputs.gitSha }} + SKIP_JOB: ${{ needs.file_change_determinator.outputs.only_docs_changed == 'true' }} + indexer-grpc-e2e-tests: needs: [permission-check, rust-images, determine-docker-build-metadata] # runs with the default release docker build variant "rust-images" if: | diff --git a/.github/workflows/faucet-tests-main.yaml b/.github/workflows/faucet-tests-main.yaml new file mode 100644 index 0000000000000..2995c50d34400 --- /dev/null +++ b/.github/workflows/faucet-tests-main.yaml @@ -0,0 +1,74 @@ +## IMPORTANT NOTE TO EDITORS OF THIS FILE ## + +## If you are trying to change how this CI works, you MUST go read the important +## note at the top of docker-build-test.yaml. In short, to test this, you must temporarily +## change docker-build-test to use the pull_request trigger instead of pull_request_target. + +## Make sure to add the CICD:CICD:build-images and CICD:run-e2e-tests labels to test +## this within an in-review PR. + +## If the above approach is too slow (since you have to wait for the rust images +## to build), you can cut the iteration time dramatically by changing the envs +## - Replace env.IMAGE_TAG for a known image tag +## - env.GIT_SHA will resolve to that of your PR branch + +# These tests ensure that changes to the node don't break compatibility with the faucet. +# +# For tests that prevent changes to the faucet breaking compatibility with the production +# networks, see faucet-tests-prod. +# +# This test uses the node image built from the previous step, hence the workflow_call +# trigger. + +name: "Faucet Integration Tests: Main" +on: + # This is called from within the docker-build-test.yaml workflow since we depend + # on the images having been built before this workflow runs. + workflow_call: + inputs: + GIT_SHA: + required: true + type: string + description: Use this to override the git SHA1, branch name (e.g. devnet) or tag to release the SDK from + SKIP_JOB: + required: false + default: false + type: boolean + description: Set to true to skip this job. Useful for PRs that don't require this workflow. + +env: + # This is the docker image tag that will be used for the SDK release. + # It is also used to pull the docker images for the CI. + IMAGE_TAG: ${{ inputs.GIT_SHA || 'devnet' }} # default to "devnet" tag when not running on workflow_call + GIT_SHA: ${{ inputs.GIT_SHA || github.event.pull_request.head.sha || github.sha }} # default to PR branch sha when not running on workflow_call + +jobs: + # These tests ensure that the faucet works with a node built from main. If we only + # upgrade devnet if this job is green and we always update the faucet image alongside + # the image for the faucet private fullnode, the faucet and fullnode should always + # be compatible in production. + run-tests-main: + if: contains(github.event.pull_request.labels.*.name, 'CICD:non-required-tests') + runs-on: high-perf-docker + steps: + - uses: actions/checkout@v3 + if: ${{ !inputs.SKIP_JOB }} + with: + ref: ${{ env.GIT_SHA }} + + - uses: aptos-labs/aptos-core/.github/actions/docker-setup@main + if: ${{ !inputs.SKIP_JOB }} + with: + GCP_WORKLOAD_IDENTITY_PROVIDER: ${{ secrets.GCP_WORKLOAD_IDENTITY_PROVIDER }} + GCP_SERVICE_ACCOUNT_EMAIL: ${{ secrets.GCP_SERVICE_ACCOUNT_EMAIL }} + AWS_ACCESS_KEY_ID: ${{ secrets.AWS_ACCESS_KEY_ID }} + AWS_SECRET_ACCESS_KEY: ${{ secrets.AWS_SECRET_ACCESS_KEY }} + AWS_DOCKER_ARTIFACT_REPO: ${{ secrets.AWS_DOCKER_ARTIFACT_REPO }} + GIT_CREDENTIALS: ${{ secrets.GIT_CREDENTIALS }} + + - uses: ./.github/actions/run-faucet-tests + if: ${{ !inputs.SKIP_JOB }} + with: + NETWORK: custom + IMAGE_TAG: ${{ env.IMAGE_TAG }} + GCP_DOCKER_ARTIFACT_REPO: ${{ secrets.GCP_DOCKER_ARTIFACT_REPO }} diff --git a/.github/workflows/faucet-tests.yaml b/.github/workflows/faucet-tests-prod.yaml similarity index 81% rename from .github/workflows/faucet-tests.yaml rename to .github/workflows/faucet-tests-prod.yaml index bc5656c155ffa..d56211b779d2d 100644 --- a/.github/workflows/faucet-tests.yaml +++ b/.github/workflows/faucet-tests-prod.yaml @@ -1,4 +1,13 @@ -name: "Faucet Integration Tests" +# These tests ensure that changes to the faucet itself or its code dependencies don't +# cause the faucet to stop working with the production (devnet and testnet) networks. +# +# For tests that prevent the node from breaking compatibility with the faucet, see +# faucet-tests-main. +# +# These tests use node images built from devnet / testnet so we don't need to use +# workflow_call to wait for the image build to finish. + +name: "Faucet Integration Tests: Prod" on: pull_request_target: types: [labeled, opened, synchronize, reopened, auto_merge_enabled] @@ -24,6 +33,7 @@ jobs: required-permission: write comment-not-permitted: Sorry, you don't have permission to trigger this workflow. + # These tests ensure that the faucet works with the nodes running on devnet. run-tests-devnet: if: contains(github.event.pull_request.labels.*.name, 'CICD:non-required-tests') needs: [permission-check] @@ -43,6 +53,7 @@ jobs: NETWORK: devnet GCP_DOCKER_ARTIFACT_REPO: ${{ secrets.GCP_DOCKER_ARTIFACT_REPO }} + # These tests ensure that the faucet works with the nodes running on testnet. run-tests-testnet: if: contains(github.event.pull_request.labels.*.name, 'CICD:non-required-tests') needs: [permission-check] diff --git a/.github/workflows/forge-pfn.yaml b/.github/workflows/forge-pfn.yaml index d897a61d20889..7db14dd04ab1d 100644 --- a/.github/workflows/forge-pfn.yaml +++ b/.github/workflows/forge-pfn.yaml @@ -23,10 +23,6 @@ on: pull_request: paths: - ".github/workflows/forge-pfn.yaml" - - "testsuite/find_latest_image.py" - push: - branches: - - aptos-release-v* # The aptos release branches env: AWS_ACCOUNT_NUM: ${{ secrets.ENV_ECR_AWS_ACCOUNT_NUM }} diff --git a/.github/workflows/forge-state-sync.yaml b/.github/workflows/forge-state-sync.yaml index 976baf489089a..c9b6e2afc72b5 100644 --- a/.github/workflows/forge-state-sync.yaml +++ b/.github/workflows/forge-state-sync.yaml @@ -23,10 +23,6 @@ on: pull_request: paths: - ".github/workflows/forge-state-sync.yaml" - - "testsuite/find_latest_image.py" - push: - branches: - - aptos-release-v* # The aptos release branches env: AWS_ACCOUNT_NUM: ${{ secrets.ENV_ECR_AWS_ACCOUNT_NUM }} diff --git a/.github/workflows/replay-verify.yaml b/.github/workflows/replay-verify.yaml index 89aaf537d4c1c..0c86e0026d69f 100644 --- a/.github/workflows/replay-verify.yaml +++ b/.github/workflows/replay-verify.yaml @@ -69,7 +69,7 @@ jobs: if: | github.event_name == 'schedule' || github.event_name == 'push' || - github.event_name == 'workflow_dispatch' && (inputs.CHAIN_NAME == 'testnet' || inputs.CHAIN_NAME == 'all' ) + github.event_name == 'workflow_dispatch' && (inputs.CHAIN_NAME == 'mainnet' || inputs.CHAIN_NAME == 'all' ) needs: determine-test-metadata uses: aptos-labs/aptos-core/.github/workflows/workflow-run-replay-verify.yaml@main secrets: inherit @@ -83,7 +83,7 @@ jobs: BACKUP_CONFIG_TEMPLATE_PATH: terraform/helm/fullnode/files/backup/s3-public.yaml # workflow config RUNS_ON: "high-perf-docker-with-local-ssd" - TIMEOUT_MINUTES: 240 + TIMEOUT_MINUTES: 300 test-replay: if: ${{ github.event_name == 'pull_request' }} diff --git a/.gitignore b/.gitignore index 3770a20fe3eb6..d17443bf41bc0 100644 --- a/.gitignore +++ b/.gitignore @@ -76,6 +76,10 @@ aptos-move/move-examples/scripts/minter/build/Minter/* # Docker incremental build temporary files and directories target-out-docker **/Dockerfile.tmp +# Indexer grpc temporary certificates. +docker/compose/indexer-grpc/data-service-grpc-server.crt +docker/compose/indexer-grpc/data-service-grpc-server.key + # Doc generation output *.md.old @@ -90,8 +94,9 @@ target-out-docker .*\# \#*\# -# Aptos CLI files +# Aptos CLI / local testnet files .aptos +**/*.rdb # VSCode settings .vscode/ diff --git a/Cargo.lock b/Cargo.lock index 9a28b8c130bbf..a5b99e45f6cc8 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -244,7 +244,6 @@ dependencies = [ "aptos-storage-interface", "aptos-telemetry", "aptos-temppath", - "aptos-transactional-test-harness", "aptos-types", "aptos-vm", "aptos-vm-genesis", @@ -576,8 +575,10 @@ dependencies = [ "aptos-state-view", "aptos-types", "aptos-vm-logging", + "aptos-vm-types", "arc-swap", "bcs 0.1.4", + "bytes", "claims", "criterion", "crossbeam", @@ -592,6 +593,7 @@ dependencies = [ "proptest-derive", "rand 0.7.3", "rayon", + "serde", ] [[package]] @@ -913,9 +915,11 @@ dependencies = [ "aptos-storage-service-types", "aptos-time-service", "aptos-types", + "arc-swap", "async-trait", "bcs 0.1.4", "claims", + "dashmap", "futures", "itertools", "maplit", @@ -985,6 +989,7 @@ dependencies = [ "claims", "clap 4.3.21", "dashmap", + "either", "itertools", "lru 0.7.8", "move-core-types", @@ -1041,6 +1046,7 @@ dependencies = [ "aptos-vm", "bcs 0.1.4", "byteorder", + "bytes", "move-core-types", "move-resource-viewer", "num-derive", @@ -1188,6 +1194,7 @@ dependencies = [ "aptos-vm-genesis", "arr_macro", "bcs 0.1.4", + "bytes", "dashmap", "fail 0.5.0", "itertools", @@ -1266,6 +1273,7 @@ dependencies = [ "bcs 0.1.4", "clap 4.3.21", "crossbeam-channel", + "dashmap", "itertools", "num_cpus", "rand 0.7.3", @@ -1567,6 +1575,7 @@ dependencies = [ "aptos-table-natives", "aptos-types", "aptos-vm", + "aptos-vm-types", "ark-bls12-381", "ark-ec", "ark-ff", @@ -1590,6 +1599,7 @@ dependencies = [ "itertools", "libsecp256k1", "log", + "lru 0.7.8", "merlin", "move-binary-format", "move-cli", @@ -1879,7 +1889,7 @@ dependencies = [ "futures", "futures-core", "once_cell", - "prost", + "prost 0.12.1", "redis", "reqwest", "serde", @@ -1887,8 +1897,9 @@ dependencies = [ "serde_yaml 0.8.26", "tempfile", "tokio", - "tonic 0.8.3", + "tonic 0.10.0", "tracing", + "url", ] [[package]] @@ -1909,13 +1920,13 @@ dependencies = [ "cloud-storage", "futures", "once_cell", - "prost", + "prost 0.12.1", "redis", "serde", "serde_json", "tokio", "tokio-stream", - "tonic 0.8.3", + "tonic 0.10.0", "tonic-reflection", "tracing", "uuid", @@ -1994,7 +2005,7 @@ dependencies = [ "serde_json", "tokio", "tokio-stream", - "tonic 0.8.3", + "tonic 0.10.0", ] [[package]] @@ -2023,8 +2034,9 @@ dependencies = [ "futures-core", "futures-util", "itertools", + "once_cell", "prometheus", - "prost", + "prost 0.12.1", "redis", "regex", "reqwest", @@ -2033,7 +2045,7 @@ dependencies = [ "serde_yaml 0.8.26", "tempfile", "tokio", - "tonic 0.8.3", + "tonic 0.10.0", "tracing", "url", "warp", @@ -2057,13 +2069,13 @@ dependencies = [ "futures", "hostname", "once_cell", - "prost", + "prost 0.12.1", "reqwest", "serde", "serde_json", "tokio", "toml 0.7.4", - "tonic 0.8.3", + "tonic 0.10.0", "tracing", "tracing-subscriber", ] @@ -2109,7 +2121,7 @@ dependencies = [ "itertools", "once_cell", "prometheus", - "prost", + "prost 0.12.1", "redis", "redis-test", "serde", @@ -2117,9 +2129,10 @@ dependencies = [ "serde_yaml 0.8.26", "tokio", "toml 0.7.4", - "tonic 0.8.3", + "tonic 0.10.0", "tracing", "tracing-subscriber", + "url", "warp", ] @@ -2196,6 +2209,7 @@ version = "0.1.0" dependencies = [ "anyhow", "aptos-abstract-gas-usage", + "aptos-aggregator", "aptos-bitvec", "aptos-block-executor", "aptos-cached-packages", @@ -2216,6 +2230,7 @@ dependencies = [ "aptos-vm-logging", "aptos-vm-types", "bcs 0.1.4", + "bytes", "goldenfile", "hex", "move-binary-format", @@ -2434,12 +2449,15 @@ dependencies = [ "aptos-infallible", "aptos-types", "bcs 0.1.4", + "bytes", "claims", "crossbeam", "dashmap", "proptest", "proptest-derive", "rayon", + "serde", + "test-case", ] [[package]] @@ -2519,6 +2537,26 @@ dependencies = [ "tokio-util 0.7.3", ] +[[package]] +name = "aptos-network-benchmark" +version = "0.1.0" +dependencies = [ + "aptos-config", + "aptos-logger", + "aptos-metrics-core", + "aptos-network", + "aptos-runtimes", + "aptos-time-service", + "aptos-types", + "async-channel", + "bytes", + "futures", + "once_cell", + "rand 0.7.3", + "serde", + "tokio", +] + [[package]] name = "aptos-network-builder" version = "0.1.0" @@ -2649,6 +2687,7 @@ dependencies = [ "aptos-mempool", "aptos-mempool-notifications", "aptos-network", + "aptos-network-benchmark", "aptos-network-builder", "aptos-node-identity", "aptos-peer-monitoring-service-client", @@ -2670,6 +2709,7 @@ dependencies = [ "aptos-vm", "bcs 0.1.4", "clap 4.3.21", + "either", "fail 0.5.0", "futures", "hex", @@ -2886,12 +2926,13 @@ dependencies = [ [[package]] name = "aptos-protos" -version = "1.0.0" +version = "1.0.1" dependencies = [ + "futures-core", "pbjson", - "prost", + "prost 0.12.1", "serde", - "tonic 0.8.3", + "tonic 0.10.0", ] [[package]] @@ -3319,6 +3360,7 @@ dependencies = [ "aptos-crypto", "aptos-types", "bcs 0.1.4", + "bytes", "serde", "serde_bytes", "serde_json", @@ -3340,6 +3382,7 @@ dependencies = [ "arr_macro", "assert_unordered", "bcs 0.1.4", + "bytes", "crossbeam-channel", "dashmap", "itertools", @@ -3421,6 +3464,7 @@ dependencies = [ "aptos-compression", "aptos-config", "aptos-crypto", + "aptos-time-service", "aptos-types", "bcs 0.1.4", "claims", @@ -3548,6 +3592,7 @@ version = "0.0.0" dependencies = [ "anyhow", "aptos", + "aptos-config", "aptos-forge", "aptos-genesis", "aptos-global-constants", @@ -3672,38 +3717,6 @@ dependencies = [ "serde", ] -[[package]] -name = "aptos-transactional-test-harness" -version = "0.1.0" -dependencies = [ - "anyhow", - "aptos-api-types", - "aptos-cached-packages", - "aptos-crypto", - "aptos-framework", - "aptos-gas-schedule", - "aptos-language-e2e-tests", - "aptos-state-view", - "aptos-storage-interface", - "aptos-types", - "aptos-vm", - "aptos-vm-genesis", - "bcs 0.1.4", - "clap 4.3.21", - "datatest-stable", - "hex", - "move-binary-format", - "move-command-line-common", - "move-compiler", - "move-core-types", - "move-resource-viewer", - "move-transactional-test-runner", - "move-vm-runtime", - "once_cell", - "serde", - "serde_json", -] - [[package]] name = "aptos-types" version = "0.0.3" @@ -3714,6 +3727,7 @@ dependencies = [ "aptos-crypto-derive", "arr_macro", "bcs 0.1.4", + "bytes", "chrono", "claims", "derivative", @@ -3808,6 +3822,7 @@ dependencies = [ "aptos-vm-logging", "aptos-vm-types", "bcs 0.1.4", + "bytes", "claims", "crossbeam-channel", "dashmap", @@ -3864,6 +3879,7 @@ dependencies = [ "aptos-vm", "aptos-vm-types", "bcs 0.1.4", + "bytes", "move-core-types", "move-vm-types", "once_cell", @@ -3926,7 +3942,9 @@ dependencies = [ "aptos-language-e2e-tests", "aptos-state-view", "aptos-types", + "aptos-vm", "bcs 0.1.4", + "bytes", "claims", "either", "move-binary-format", @@ -4232,7 +4250,7 @@ checksum = "871f9bb5e0a22eeb7e8cf16641feb87c9dc67032ccf8ff49e772eb9941d3a965" dependencies = [ "async-task", "concurrent-queue", - "fastrand", + "fastrand 1.8.0", "futures-lite", "once_cell", "slab", @@ -4693,9 +4711,9 @@ checksum = "bef38d45163c2f1dde094a7dfd33ccf595c92905c8f8f4fdc18d06fb1037718a" [[package]] name = "bitflags" -version = "2.0.2" +version = "2.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "487f1e0fcbe47deb8b0574e646def1c903389d95241dd1bbcc6ce4a715dfc0c1" +checksum = "b4682ae6287fcf752ecaabbfcc7b6f9b72aa33933dc23a554d853aea8eea8635" [[package]] name = "bitmaps" @@ -4804,7 +4822,7 @@ dependencies = [ "async-channel", "async-task", "atomic-waker", - "fastrand", + "fastrand 1.8.0", "futures-lite", "once_cell", ] @@ -4855,9 +4873,9 @@ dependencies = [ [[package]] name = "bumpalo" -version = "3.11.0" +version = "3.14.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c1ad822118d20d2c234f427000d5acc36eabe1e29a348c89b63dd60b13f28e5d" +checksum = "7f30e7476521f6f8af1a1c4c0b8cc94f0bee37d91763d0ca2665f299b6cd8aec" [[package]] name = "byte-slice-cast" @@ -4918,9 +4936,12 @@ checksum = "14c189c53d098945499cdfa7ecc63567cf3886b3332b312a5b4585d8d3a6a610" [[package]] name = "bytes" -version = "1.2.1" +version = "1.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ec8a7b6a70fde80372154c65702f00a0f56f3e1c36abbc6c440484be248856db" +checksum = "89b2fd2a0dcf38d7971e2194b6b6eebab45ae01067456a7fd93d5547a61b70be" +dependencies = [ + "serde", +] [[package]] name = "bzip2-sys" @@ -5137,9 +5158,9 @@ dependencies = [ [[package]] name = "clap_complete" -version = "4.3.1" +version = "4.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7f6b5c519bab3ea61843a7923d074b04245624bb84a64a8c150f5deb014e388b" +checksum = "4110a1e6af615a9e6d0a36f805d5c99099f8bab9b8042f5bc1fa220a4a89e36f" dependencies = [ "clap 4.3.21", ] @@ -5208,9 +5229,10 @@ dependencies = [ "hex", "jsonwebtoken 7.2.0", "lazy_static", - "openssl", + "pem 0.8.3", "percent-encoding", "reqwest", + "ring", "serde", "serde_json", "tokio", @@ -5303,8 +5325,8 @@ version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e57ff02e8ad8e06ab9731d5dc72dc23bef9200778eae1a89d555d8c42e5d4a86" dependencies = [ - "prost", - "prost-types", + "prost 0.11.9", + "prost-types 0.11.9", "tonic 0.8.3", "tracing-core", ] @@ -5321,7 +5343,7 @@ dependencies = [ "futures", "hdrhistogram", "humantime", - "prost-types", + "prost-types 0.11.9", "serde", "serde_json", "thread_local", @@ -5946,7 +5968,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f7a532c1f99a0f596f6960a60d1e119e91582b24b39e2d83a190e61262c3ef0c" dependencies = [ "bigdecimal", - "bitflags 2.0.2", + "bitflags 2.4.0", "byteorder", "chrono", "diesel_derives", @@ -6109,9 +6131,9 @@ checksum = "1435fa1053d8b2fbbe9be7e97eca7f33d37b28409959813daefc1446a14247f1" [[package]] name = "dunce" -version = "1.0.3" +version = "1.0.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0bd4b30a6560bbd9b4620f4de34c3f14f60848e58a9b7216801afcb4c7b31c3c" +checksum = "56ce8c6da7551ec6c462cbaf3bfbc75131ebbfa1c944aeaa9dab51ca1c5f0c3b" [[package]] name = "e2e-move-tests" @@ -6134,7 +6156,6 @@ dependencies = [ "aptos-types", "aptos-vm", "aptos-vm-genesis", - "aptos-writeset-generator", "bcs 0.1.4", "hex", "itertools", @@ -6269,6 +6290,17 @@ dependencies = [ "winapi 0.3.9", ] +[[package]] +name = "errno" +version = "0.3.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "136526188508e25c6fef639d7927dfb3e0e3084488bf202267829cf7fc23dbdd" +dependencies = [ + "errno-dragonfly", + "libc", + "windows-sys 0.48.0", +] + [[package]] name = "errno-dragonfly" version = "0.1.2" @@ -6306,7 +6338,7 @@ dependencies = [ "fixed-hash", "impl-codec 0.5.1", "impl-rlp", - "scale-info", + "scale-info 1.0.0", "tiny-keccak", ] @@ -6318,8 +6350,10 @@ checksum = "11da94e443c60508eb62cf256243a64da87304c2802ac2528847f79d750007ef" dependencies = [ "crunchy", "fixed-hash", + "impl-codec 0.6.0", "impl-rlp", "impl-serde", + "scale-info 1.0.0", "tiny-keccak", ] @@ -6336,12 +6370,31 @@ dependencies = [ "parity-scale-codec 2.3.1", "rlp", "rlp-derive", - "scale-info", + "scale-info 1.0.0", "serde", "sha3 0.9.1", "triehash", ] +[[package]] +name = "ethereum" +version = "0.12.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "23750149fe8834c0e24bb9adcbacbe06c45b9861f15df53e09f26cb7c4ab91ef" +dependencies = [ + "bytes", + "ethereum-types 0.13.1", + "hash-db", + "hash256-std-hasher", + "parity-scale-codec 3.4.0", + "rlp", + "rlp-derive", + "scale-info 2.9.0", + "serde", + "sha3 0.10.6", + "triehash", +] + [[package]] name = "ethereum-types" version = "0.12.1" @@ -6353,7 +6406,7 @@ dependencies = [ "impl-codec 0.5.1", "impl-rlp", "primitive-types 0.10.1", - "scale-info", + "scale-info 1.0.0", "uint", ] @@ -6365,9 +6418,11 @@ checksum = "b2827b94c556145446fcce834ca86b7abf0c39a805883fe20e72c5bfdb5a0dc6" dependencies = [ "ethbloom 0.12.1", "fixed-hash", + "impl-codec 0.6.0", "impl-rlp", "impl-serde", "primitive-types 0.11.1", + "scale-info 1.0.0", "uint", ] @@ -6391,19 +6446,40 @@ checksum = "408ffdd509e16de15ea9b51f5333748f6086601f29d445d2ba53dd7e95565574" dependencies = [ "auto_impl", "environmental", - "ethereum", - "evm-core", - "evm-gasometer", - "evm-runtime", + "ethereum 0.11.1", + "evm-core 0.33.0", + "evm-gasometer 0.33.0", + "evm-runtime 0.33.0", "log", "parity-scale-codec 2.3.1", "primitive-types 0.10.1", "rlp", - "scale-info", + "scale-info 1.0.0", "serde", "sha3 0.8.2", ] +[[package]] +name = "evm" +version = "0.36.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d388bbd18050623b996cc4ba0643971e2978693ad56ca8b7603080cfa5eaf738" +dependencies = [ + "auto_impl", + "environmental", + "ethereum 0.12.0", + "evm-core 0.36.0", + "evm-gasometer 0.36.0", + "evm-runtime 0.36.0", + "log", + "parity-scale-codec 3.4.0", + "primitive-types 0.11.1", + "rlp", + "scale-info 2.9.0", + "serde", + "sha3 0.10.6", +] + [[package]] name = "evm-core" version = "0.33.0" @@ -6413,7 +6489,19 @@ dependencies = [ "funty 1.1.0", "parity-scale-codec 2.3.1", "primitive-types 0.10.1", - "scale-info", + "scale-info 1.0.0", + "serde", +] + +[[package]] +name = "evm-core" +version = "0.36.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c5fb3a449a544a67c879d2f74e1c3d9022de3ec31c9a20817015816f687aa2af" +dependencies = [ + "parity-scale-codec 3.4.0", + "primitive-types 0.11.1", + "scale-info 2.9.0", "serde", ] @@ -6422,8 +6510,8 @@ name = "evm-exec-utils" version = "0.1.0" dependencies = [ "anyhow", - "evm", - "evm-runtime", + "evm 0.33.1", + "evm-runtime 0.33.0", "hex", "move-command-line-common", "primitive-types 0.10.1", @@ -6438,11 +6526,23 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c446679607eacac4e8c8738e20c97ea9b3c86eddd8b43666744b05f416037bd9" dependencies = [ "environmental", - "evm-core", - "evm-runtime", + "evm-core 0.33.0", + "evm-runtime 0.33.0", "primitive-types 0.10.1", ] +[[package]] +name = "evm-gasometer" +version = "0.36.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "170a27b6e49b8279016afffcdc6ebae9225d5acff3a546ad8589929b091e7ac5" +dependencies = [ + "environmental", + "evm-core 0.36.0", + "evm-runtime 0.36.0", + "primitive-types 0.11.1", +] + [[package]] name = "evm-runtime" version = "0.33.0" @@ -6450,11 +6550,24 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "419e8434ac6e850a8a4bc09a19406264582d1940913b2920be2af948f4ffc49b" dependencies = [ "environmental", - "evm-core", + "evm-core 0.33.0", "primitive-types 0.10.1", "sha3 0.8.2", ] +[[package]] +name = "evm-runtime" +version = "0.36.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4d48c2545a02e3a4d1a5184a96af11037334dce947b6bdb389b3503b3a6f8dcd" +dependencies = [ + "auto_impl", + "environmental", + "evm-core 0.36.0", + "primitive-types 0.11.1", + "sha3 0.10.6", +] + [[package]] name = "exr" version = "1.5.3" @@ -6511,6 +6624,12 @@ dependencies = [ "instant", ] +[[package]] +name = "fastrand" +version = "2.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6999dc1837253364c2ebb0704ba97994bd874e8f195d665c50b7548f6ea92764" + [[package]] name = "field_count" version = "0.1.1" @@ -6712,7 +6831,7 @@ version = "1.12.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7694489acd39452c77daa48516b894c153f192c3578d5a839b62c58099fcbf48" dependencies = [ - "fastrand", + "fastrand 1.8.0", "futures-core", "futures-io", "memchr", @@ -7003,9 +7122,9 @@ dependencies = [ [[package]] name = "goldenfile" -version = "1.4.3" +version = "1.5.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "03bd0e9c2ea26ce269d37016d6b95556bbfa544cbbbdeff40102ac54121c990b" +checksum = "86342e69ffaa1cd5450d6bad08a80da96c441d891a0e07c72c62c4abdd281713" dependencies = [ "similar-asserts", "tempfile", @@ -7055,8 +7174,8 @@ version = "0.10.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2a3b24a3f57be08afc02344e693afb55e48172c9c2ab86ff3fdb8efff550e4b9" dependencies = [ - "prost", - "prost-types", + "prost 0.11.9", + "prost-types 0.11.9", "tonic 0.9.2", ] @@ -7083,7 +7202,7 @@ dependencies = [ "google-cloud-gax", "google-cloud-googleapis", "google-cloud-token", - "prost-types", + "prost-types 0.11.9", "thiserror", "tokio", "tokio-util 0.7.3", @@ -8152,7 +8271,6 @@ dependencies = [ "aptos-vm-genesis", "aptos-vm-logging", "aptos-vm-types", - "aptos-writeset-generator", "bcs 0.1.4", "fail 0.5.0", "itertools", @@ -8404,6 +8522,12 @@ version = "0.1.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f051f77a7c8e6957c0696eac88f26b0117e54f52d3fc682ab19397a8812846a4" +[[package]] +name = "linux-raw-sys" +version = "0.4.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1a9bad9f94746442c783ca431b22403b519cd7fbeed0533fdd6328b2f2212128" + [[package]] name = "listener" version = "0.1.0" @@ -8801,6 +8925,7 @@ dependencies = [ "anyhow", "bcs 0.1.4", "better_any", + "bytes", "datatest-stable", "itertools", "move-binary-format", @@ -8901,6 +9026,7 @@ version = "0.1.0" dependencies = [ "anyhow", "bcs 0.1.4", + "bytes", "clap 4.3.21", "codespan-reporting", "colored", @@ -9039,6 +9165,7 @@ dependencies = [ "anyhow", "arbitrary", "bcs 0.1.4", + "bytes", "ethnum", "hex", "num 0.4.0", @@ -9504,6 +9631,7 @@ dependencies = [ "anyhow", "bcs 0.1.4", "better_any", + "bytes", "move-binary-format", "move-cli", "move-core-types", @@ -9529,7 +9657,7 @@ dependencies = [ "codespan-reporting", "datatest-stable", "ethnum", - "evm", + "evm 0.33.1", "evm-exec-utils", "hex", "itertools", @@ -9606,7 +9734,7 @@ dependencies = [ "colored", "datatest-stable", "difference", - "evm", + "evm 0.36.0", "evm-exec-utils", "itertools", "move-binary-format", @@ -9635,6 +9763,7 @@ name = "move-vm-integration-tests" version = "0.1.0" dependencies = [ "anyhow", + "bytes", "memory-stats", "move-binary-format", "move-bytecode-verifier", @@ -9663,6 +9792,7 @@ version = "0.1.0" dependencies = [ "anyhow", "better_any", + "bytes", "fail 0.4.0", "hex", "move-binary-format", @@ -9683,6 +9813,7 @@ name = "move-vm-test-utils" version = "0.1.0" dependencies = [ "anyhow", + "bytes", "move-binary-format", "move-core-types", "move-table-extension", @@ -10135,9 +10266,9 @@ checksum = "624a8340c38c1b80fd549087862da4ba43e08858af025b236e509b6649fc13d5" [[package]] name = "openssl" -version = "0.10.50" +version = "0.10.56" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7e30d8bc91859781f0a943411186324d580f2bbeb71b452fe91ae344806af3f1" +checksum = "729b745ad4a5575dd06a3e1af1414bd330ee561c01b3899eb584baeaa8def17e" dependencies = [ "bitflags 1.3.2", "cfg-if", @@ -10167,9 +10298,9 @@ checksum = "ff011a302c396a5197692431fc1948019154afc178baf7d8e37367442a4601cf" [[package]] name = "openssl-sys" -version = "0.9.85" +version = "0.9.93" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0d3d193fb1488ad46ffe3aaabc912cc931d02ee8518fe2959aea8ef52718b0c0" +checksum = "db4d56a4c0478783083cfafcc42493dd4a981d41669da64b4572a2a089b51b1d" dependencies = [ "cc", "libc", @@ -10353,7 +10484,7 @@ dependencies = [ "cfg-if", "instant", "libc", - "redox_syscall", + "redox_syscall 0.2.16", "smallvec", "winapi 0.3.9", ] @@ -10366,7 +10497,7 @@ checksum = "09a279cbf25cb0757810394fbc1e359949b59e348145c643a939a525692e6929" dependencies = [ "cfg-if", "libc", - "redox_syscall", + "redox_syscall 0.2.16", "smallvec", "windows-sys 0.36.1", ] @@ -10388,9 +10519,9 @@ checksum = "9423e2b32f7a043629287a536f21951e8c6a82482d0acb1eeebfc90bc2225b22" [[package]] name = "pbjson" -version = "0.4.0" +version = "0.5.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "599fe9aefc2ca0df4a96179b3075faee2cacb89d4cf947a00b9a89152dfffc9d" +checksum = "048f9ac93c1eab514f9470c4bc8d97ca2a0a236b84f45cc19d69a59fc11467f6" dependencies = [ "base64 0.13.0", "serde", @@ -10933,7 +11064,7 @@ dependencies = [ "impl-codec 0.5.1", "impl-rlp", "impl-serde", - "scale-info", + "scale-info 1.0.0", "uint", ] @@ -10947,6 +11078,7 @@ dependencies = [ "impl-codec 0.6.0", "impl-rlp", "impl-serde", + "scale-info 1.0.0", "uint", ] @@ -11038,15 +11170,15 @@ checksum = "8bccbff07d5ed689c4087d20d7307a52ab6141edeedf487c3876a55b86cf63df" [[package]] name = "prometheus" -version = "0.13.0" +version = "0.13.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b7f64969ffd5dd8f39bd57a68ac53c163a095ed9d0fb707146da1b27025a3504" +checksum = "449811d15fbdf5ceb5c1144416066429cf82316e2ec8ce0c1f6f8a02e7bbcf8c" dependencies = [ "cfg-if", "fnv", "lazy_static", "memchr", - "parking_lot 0.11.2", + "parking_lot 0.12.1", "thiserror", ] @@ -11113,7 +11245,17 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0b82eaa1d779e9a4bc1c3217db8ffbeabaae1dca241bf70183242128d48681cd" dependencies = [ "bytes", - "prost-derive", + "prost-derive 0.11.9", +] + +[[package]] +name = "prost" +version = "0.12.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f4fdd22f3b9c31b53c060df4a0613a1c7f062d4115a2b984dd15b1858f7e340d" +dependencies = [ + "bytes", + "prost-derive 0.12.1", ] [[package]] @@ -11129,13 +11271,35 @@ dependencies = [ "syn 1.0.105", ] +[[package]] +name = "prost-derive" +version = "0.12.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "265baba7fabd416cf5078179f7d2cbeca4ce7a9041111900675ea7c4cb8a4c32" +dependencies = [ + "anyhow", + "itertools", + "proc-macro2 1.0.64", + "quote 1.0.29", + "syn 2.0.25", +] + [[package]] name = "prost-types" version = "0.11.9" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "213622a1460818959ac1181aaeb2dc9c7f63df720db7d788b3e24eacd1983e13" dependencies = [ - "prost", + "prost 0.11.9", +] + +[[package]] +name = "prost-types" +version = "0.12.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e081b29f63d83a4bc75cfc9f3fe424f9156cf92d8a4f0c9407cce9a1b67327cf" +dependencies = [ + "prost 0.12.1", ] [[package]] @@ -11216,15 +11380,6 @@ version = "2.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a993555f31e5a609f617c12db6250dedcac1b0a85076912c436e6fc9b2c8e6a3" -[[package]] -name = "quick-xml" -version = "0.22.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8533f14c8382aaad0d592c812ac3b826162128b65662331e1127b45c3d18536b" -dependencies = [ - "memchr", -] - [[package]] name = "quick-xml" version = "0.23.1" @@ -11475,6 +11630,15 @@ dependencies = [ "bitflags 1.3.2", ] +[[package]] +name = "redox_syscall" +version = "0.3.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "567664f262709473930a4bf9e51bf2ebf3348f2e748ccc50dea20646858f8f29" +dependencies = [ + "bitflags 1.3.2", +] + [[package]] name = "redox_users" version = "0.4.3" @@ -11482,7 +11646,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b033d837a7cf162d7993aded9304e30a83213c648b6e389db233191f891e5c2b" dependencies = [ "getrandom 0.2.7", - "redox_syscall", + "redox_syscall 0.2.16", "thiserror", ] @@ -11550,15 +11714,6 @@ version = "0.7.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e5ea92a5b6195c6ef2a0295ea818b312502c6fc94dde986c5553242e18fd4ce2" -[[package]] -name = "remove_dir_all" -version = "0.5.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3acd125665422973a33ac9d3dd2df85edad0f4ae9b00dafb1a05e43a9f5ef8e7" -dependencies = [ - "winapi 0.3.9", -] - [[package]] name = "reqwest" version = "0.11.11" @@ -11810,7 +11965,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "72c825b8aa8010eb9ee99b75f05e10180b9278d161583034d7574c9d617aeada" dependencies = [ "bitflags 1.3.2", - "errno", + "errno 0.2.8", "io-lifetimes 0.7.3", "libc", "linux-raw-sys 0.0.46", @@ -11824,13 +11979,26 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "db4165c9963ab29e422d6c26fbc1d37f15bace6b2810221f9d925023480fcf0e" dependencies = [ "bitflags 1.3.2", - "errno", + "errno 0.2.8", "io-lifetimes 1.0.9", "libc", "linux-raw-sys 0.1.4", "windows-sys 0.45.0", ] +[[package]] +name = "rustix" +version = "0.38.13" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d7db8590df6dfcd144d22afd1b83b36c21a18d7cbc1dc4bb5295a8712e9eb662" +dependencies = [ + "bitflags 2.4.0", + "errno 0.3.3", + "libc", + "linux-raw-sys 0.4.7", + "windows-sys 0.48.0", +] + [[package]] name = "rustls" version = "0.20.6" @@ -11857,9 +12025,9 @@ dependencies = [ [[package]] name = "rustls-native-certs" -version = "0.6.2" +version = "0.6.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0167bac7a9f490495f3c33013e7722b53cb087ecbe082fb0c6387c96f634ea50" +checksum = "a9aace74cb666635c918e9c12bc0d348266037aa8eb599b5cba565709a8dff00" dependencies = [ "openssl-probe", "rustls-pemfile 1.0.1", @@ -11896,9 +12064,9 @@ dependencies = [ [[package]] name = "rustls-webpki" -version = "0.100.1" +version = "0.100.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d6207cd5ed3d8dca7816f8f3725513a34609c0c765bf652b8c3cb4cfd87db46b" +checksum = "5f6a5fc258f1c1276dfe3016516945546e2d5383911efc0fc4f1cdc5df3a4ae3" dependencies = [ "ring", "untrusted", @@ -11966,7 +12134,20 @@ dependencies = [ "cfg-if", "derive_more", "parity-scale-codec 2.3.1", - "scale-info-derive", + "scale-info-derive 1.0.0", +] + +[[package]] +name = "scale-info" +version = "2.9.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "35c0a159d0c45c12b20c5a844feb1fe4bea86e28f17b92a5f0c42193634d3782" +dependencies = [ + "bitvec 1.0.1", + "cfg-if", + "derive_more", + "parity-scale-codec 3.4.0", + "scale-info-derive 2.9.0", ] [[package]] @@ -11981,6 +12162,18 @@ dependencies = [ "syn 1.0.105", ] +[[package]] +name = "scale-info-derive" +version = "2.9.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "912e55f6d20e0e80d63733872b40e1227c0bce1e1ab81ba67d696339bfd7fd29" +dependencies = [ + "proc-macro-crate", + "proc-macro2 1.0.64", + "quote 1.0.29", + "syn 1.0.105", +] + [[package]] name = "schannel" version = "0.1.20" @@ -12057,21 +12250,33 @@ dependencies = [ "libc", ] +[[package]] +name = "self-replace" +version = "1.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "66083111de8877b8c588e3a37255f59e20a870eeb9b586473e3bd70b925a9397" +dependencies = [ + "tempfile", + "windows-sys 0.48.0", +] + [[package]] name = "self_update" -version = "0.34.0" +version = "0.38.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b28d58e73c427061f46c801176f54349be3c1a2818cf549e1d9bcac37eef7bca" +checksum = "2b3c585a1ced6b97ac13bd5e56f66559e5a75f477da5913f70df98e114518446" dependencies = [ "hyper", "indicatif 0.17.3", "log", - "quick-xml 0.22.0", + "quick-xml 0.23.1", "regex", "reqwest", + "self-replace", "semver", "serde_json", "tempfile", + "urlencoding", "zip", ] @@ -12602,7 +12807,6 @@ dependencies = [ "aptos-vault-client", "aptos-vm", "aptos-vm-genesis", - "aptos-writeset-generator", "async-trait", "base64 0.13.0", "bcs 0.1.4", @@ -12912,16 +13116,15 @@ dependencies = [ [[package]] name = "tempfile" -version = "3.3.0" +version = "3.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5cdb1ef4eaeeaddc8fbd371e5017057064af0911902ef36b39801f67cc6d79e4" +checksum = "cb94d2f3cc536af71caac6b6fcebf65860b347e7ce0cc9ebe8f70d3e521054ef" dependencies = [ "cfg-if", - "fastrand", - "libc", - "redox_syscall", - "remove_dir_all", - "winapi 0.3.9", + "fastrand 2.0.0", + "redox_syscall 0.3.5", + "rustix 0.38.13", + "windows-sys 0.48.0", ] [[package]] @@ -12982,6 +13185,41 @@ version = "0.2.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "507e9898683b6c43a9aa55b64259b721b52ba226e0f3779137e50ad114a4c90b" +[[package]] +name = "test-case" +version = "3.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2a1d6e7bde536b0412f20765b76e921028059adfd1b90d8974d33fd3c91b25df" +dependencies = [ + "test-case-macros", +] + +[[package]] +name = "test-case-core" +version = "3.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d10394d5d1e27794f772b6fc854c7e91a2dc26e2cbf807ad523370c2a59c0cee" +dependencies = [ + "cfg-if", + "proc-macro-error", + "proc-macro2 1.0.64", + "quote 1.0.29", + "syn 1.0.105", +] + +[[package]] +name = "test-case-macros" +version = "3.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "eeb9a44b1c6a54c1ba58b152797739dba2a83ca74e18168a68c980eb142f9404" +dependencies = [ + "proc-macro-error", + "proc-macro2 1.0.64", + "quote 1.0.29", + "syn 1.0.105", + "test-case-core", +] + [[package]] name = "test-generation" version = "0.1.0" @@ -13412,7 +13650,6 @@ dependencies = [ "axum 0.6.18", "base64 0.13.0", "bytes", - "flate2", "futures-core", "futures-util", "h2", @@ -13422,12 +13659,9 @@ dependencies = [ "hyper-timeout", "percent-encoding", "pin-project", - "prost", - "prost-derive", - "rustls-native-certs", - "rustls-pemfile 1.0.1", + "prost 0.11.9", + "prost-derive 0.11.9", "tokio", - "tokio-rustls 0.23.4", "tokio-stream", "tokio-util 0.7.3", "tower", @@ -13458,7 +13692,7 @@ dependencies = [ "hyper-timeout", "percent-encoding", "pin-project", - "prost", + "prost 0.11.9", "rustls-pemfile 1.0.1", "tokio", "tokio-rustls 0.24.1", @@ -13471,17 +13705,47 @@ dependencies = [ ] [[package]] -name = "tonic-reflection" -version = "0.6.0" +name = "tonic" +version = "0.10.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "67494bad4dda4c9bffae901dfe14e2b2c0f760adb4706dc10beeb81799f7f7b2" +checksum = "5469afaf78a11265c343a88969045c1568aa8ecc6c787dbf756e92e70f199861" dependencies = [ + "async-stream", + "async-trait", + "axum 0.6.18", + "base64 0.21.2", "bytes", - "prost", - "prost-types", + "flate2", + "h2", + "http", + "http-body", + "hyper", + "hyper-timeout", + "percent-encoding", + "pin-project", + "prost 0.12.1", + "rustls-native-certs", + "rustls-pemfile 1.0.1", "tokio", + "tokio-rustls 0.24.1", "tokio-stream", - "tonic 0.8.3", + "tower", + "tower-layer", + "tower-service", + "tracing", +] + +[[package]] +name = "tonic-reflection" +version = "0.10.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "16e61add39c1426d5f21eae2cc196e97e1f5a5ea7bcf491df3885797992a86eb" +dependencies = [ + "prost 0.12.1", + "prost-types 0.12.1", + "tokio", + "tokio-stream", + "tonic 0.10.0", ] [[package]] @@ -13962,7 +14226,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "80b49db848e09c50db9e7d15aee89030b6ebb8c55e77aff2cef22aeb6844c8b5" dependencies = [ "const_fn", - "errno", + "errno 0.2.8", "js-sys", "libc", "rustix 0.35.9", @@ -14229,9 +14493,9 @@ dependencies = [ [[package]] name = "webpki" -version = "0.22.0" +version = "0.22.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f095d78192e208183081cc07bc5515ef55216397af48b873e5edcd72637fa1bd" +checksum = "f0e74f82d49d545ad128049b7e88f6576df2da6b02e9ce565c6f533be576957e" dependencies = [ "ring", "untrusted", @@ -14252,7 +14516,7 @@ version = "0.23.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b03058f88386e5ff5310d9111d53f48b17d732b401aeb83a8d5190f2ac459338" dependencies = [ - "rustls-webpki 0.100.1", + "rustls-webpki 0.100.3", ] [[package]] diff --git a/Cargo.toml b/Cargo.toml index 40622c1641495..72a4f8fa2ac32 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -21,7 +21,6 @@ members = [ "aptos-move/aptos-resource-viewer", "aptos-move/aptos-sdk-builder", "aptos-move/aptos-transaction-benchmarks", - "aptos-move/aptos-transactional-test-harness", "aptos-move/aptos-validator-interface", "aptos-move/aptos-vm", "aptos-move/aptos-vm-benchmarks", @@ -74,6 +73,7 @@ members = [ "crates/aptos-openapi", "crates/aptos-profiler", "crates/aptos-proptest-helpers", + "crates/aptos-protos", "crates/aptos-push-metrics", "crates/aptos-rate-limiter", "crates/aptos-rest-client", @@ -122,6 +122,7 @@ members = [ "experimental/execution/ptx-executor", "experimental/runtimes", "mempool", + "network/benchmark", "network/builder", "network/discovery", "network/framework", @@ -135,12 +136,12 @@ members = [ "secure/storage", "secure/storage/vault", "state-sync/aptos-data-client", + "state-sync/data-streaming-service", "state-sync/inter-component/consensus-notifications", "state-sync/inter-component/event-notifications", "state-sync/inter-component/mempool-notifications", "state-sync/inter-component/storage-service-notifications", - "state-sync/state-sync-v2/data-streaming-service", - "state-sync/state-sync-v2/state-sync-driver", + "state-sync/state-sync-driver", "state-sync/storage-service/client", "state-sync/storage-service/server", "state-sync/storage-service/types", @@ -229,8 +230,6 @@ members = [ "vm-validator", ] -exclude = ["crates/aptos-protos"] - # NOTE: default-members is the complete list of binaries that form the "production Aptos codebase". These members should # never include crates that require fuzzing features or test features. These are the crates we want built with no extra # test-only code included. @@ -289,7 +288,7 @@ aptos-crash-handler = { path = "crates/crash-handler" } aptos-crypto = { path = "crates/aptos-crypto" } aptos-crypto-derive = { path = "crates/aptos-crypto-derive" } aptos-data-client = { path = "state-sync/aptos-data-client" } -aptos-data-streaming-service = { path = "state-sync/state-sync-v2/data-streaming-service" } +aptos-data-streaming-service = { path = "state-sync/data-streaming-service" } aptos-db = { path = "storage/aptosdb" } aptos-db-indexer = { path = "storage/indexer" } aptos-db-tool = { path = "storage/db-tool" } @@ -351,6 +350,7 @@ aptos-mvhashmap = { path = "aptos-move/mvhashmap" } aptos-native-interface = { path = "aptos-move/aptos-native-interface" } aptos-netcore = { path = "network/netcore" } aptos-network = { path = "network/framework" } +aptos-network-benchmark = { path = "network/benchmark" } aptos-network-builder = { path = "network/builder" } aptos-network-checker = { path = "crates/aptos-network-checker" } aptos-network-discovery = { path = "network/discovery" } @@ -388,7 +388,7 @@ aptos-secure-net = { path = "secure/net" } aptos-secure-storage = { path = "secure/storage" } aptos-short-hex-str = { path = "crates/short-hex-str" } aptos-speculative-state-helper = { path = "crates/aptos-speculative-state-helper" } -aptos-state-sync-driver = { path = "state-sync/state-sync-v2/state-sync-driver" } +aptos-state-sync-driver = { path = "state-sync/state-sync-driver" } aptos-state-view = { path = "storage/state-view" } aptos-storage-interface = { path = "storage/storage-interface" } aptos-storage-service-client = { path = "state-sync/storage-service/client" } @@ -404,7 +404,6 @@ aptos-time-service = { path = "crates/aptos-time-service", features = [ ] } aptos-transaction-emitter-lib = { path = "crates/transaction-emitter-lib" } aptos-transaction-generator-lib = { path = "crates/transaction-generator-lib" } -aptos-transactional-test-harness = { path = "aptos-move/aptos-transactional-test-harness" } aptos-types = { path = "types" } aptos-utils = { path = "aptos-utils" } aptos-validator-interface = { path = "aptos-move/aptos-validator-interface" } @@ -433,6 +432,7 @@ ark-serialize = "0.4.0" ark-std = { version = "0.4.0", features = ["getrandom"] } assert_approx_eq = "1.1.0" assert_unordered = "0.3.5" +async-channel = "1.7.1" async-stream = "0.3" async-trait = "0.1.53" axum = "0.5.16" @@ -448,14 +448,14 @@ blake2-rfc = "0.2.18" blst = "0.3.7" bulletproofs = { version = "4.0.0" } byteorder = "1.4.3" -bytes = "1.1.0" +bytes = { version = "1.4.0", features = ["serde"] } chrono = { version = "0.4.19", features = ["clock", "serde"] } cfg_block = "0.1.1" cfg-if = "1.0.0" claims = "0.7" clap = { version = "4.3.9", features = ["derive", "unstable-styles"] } -clap_complete = "4.3.1" -cloud-storage = { version = "0.11.1", features = ["global-client"] } +clap_complete = "4.4.1" +cloud-storage = { version = "0.11.1", features = ["global-client", "rustls-tls"], default-features = false } codespan-reporting = "0.11.1" console-subscriber = "0.1.8" const_format = "0.2.26" @@ -500,7 +500,7 @@ gcp-bigquery-client = "0.13.0" get_if_addrs = "0.5.3" git2 = "0.16.1" glob = "0.3.0" -goldenfile = "1.1.0" +goldenfile = "1.5.2" google-cloud-pubsub = "0.18.0" google-cloud-storage = "0.13.0" handlebars = "4.2.2" @@ -547,6 +547,7 @@ ouroboros = "0.15.6" owo-colors = "3.5.0" parking_lot = "0.12.0" paste = "1.0.7" +pbjson = "0.5.1" percent-encoding = "2.1.0" pin-project = "1.0.10" poem = { version = "=1.3.55", features = ["anyhow", "rustls"] } @@ -557,13 +558,13 @@ pretty_assertions = "1.2.1" procfs = "0.14.1" proc-macro2 = "1.0.38" project-root = "0.2.2" -prometheus = { version = "0.13.0", default-features = false } +prometheus = { version = "0.13.3", default-features = false } prometheus-http-query = "0.5.2" prometheus-parse = "0.2.4" proptest = "1.0.0" proptest-derive = "0.3.0" -prost = "0.11.3" -prost-types = "0.11.3" +prost = "0.12.1" +prost-types = "0.12.1" quanta = "0.10.1" quote = "1.0.18" rand = "0.7.3" @@ -611,6 +612,7 @@ syn = { version = "1.0.92", features = ["derive", "extra-traits"] } sysinfo = "0.28.4" tempfile = "3.3.0" termcolor = "1.1.2" +test-case = "3.1.0" textwrap = "0.15.0" thiserror = "1.0.37" time = { version = "0.3.24", features = ["serde"] } @@ -627,14 +629,14 @@ tokio-stream = "0.1.8" tokio-test = "0.4.1" tokio-util = { version = "0.7.2", features = ["compat", "codec"] } toml = "0.7.4" -tonic = { version = "0.8.3", features = [ +tonic = { version = "0.10.0", features = [ "tls-roots", "transport", "prost", "gzip", "codegen", ] } -tonic-reflection = "0.6.0" +tonic-reflection = "0.10.0" ureq = { version = "1.5.4", features = [ "json", "native-tls", diff --git a/api/openapi-spec-generator/Cargo.toml b/api/openapi-spec-generator/Cargo.toml index 181dde43df4c5..9e7a4c5a93ec2 100644 --- a/api/openapi-spec-generator/Cargo.toml +++ b/api/openapi-spec-generator/Cargo.toml @@ -17,6 +17,6 @@ anyhow = { workspace = true } aptos-api = { workspace = true } aptos-config = { workspace = true } aptos-mempool = { workspace = true, features = ["fuzzing"] } -aptos-storage-interface = { workspace = true, features = ["fuzzing"] } +aptos-storage-interface = { workspace = true } aptos-types = { workspace = true } clap = { workspace = true } diff --git a/api/src/accounts.rs b/api/src/accounts.rs index d21d11343552b..125e44752d814 100644 --- a/api/src/accounts.rs +++ b/api/src/accounts.rs @@ -537,7 +537,7 @@ impl Account { resolver .as_converter(self.context.db.clone()) - .move_struct_fields(resource_type, bytes.as_slice()) + .move_struct_fields(resource_type, &bytes) .context("Failed to convert move structs from storage") .map_err(|err| { BasicErrorWith404::internal_with_code( diff --git a/api/src/context.rs b/api/src/context.rs index c46b16ba0c4b0..ff5495d98d1ad 100644 --- a/api/src/context.rs +++ b/api/src/context.rs @@ -44,12 +44,12 @@ use aptos_types::{ transaction::{SignedTransaction, TransactionWithProof, Version}, }; use aptos_utils::aptos_try; -use aptos_vm::{ - data_cache::{AsMoveResolver, StorageAdapter}, - move_vm_ext::AptosMoveResolver, -}; +use aptos_vm::data_cache::AsMoveResolver; use futures::{channel::oneshot, SinkExt}; -use move_core_types::language_storage::{ModuleId, StructTag}; +use move_core_types::{ + language_storage::{ModuleId, StructTag}, + resolver::ModuleResolver, +}; use std::{ collections::{BTreeMap, HashMap}, ops::{Bound::Included, Deref}, @@ -259,9 +259,11 @@ impl Context { } pub fn get_state_value(&self, state_key: &StateKey, version: u64) -> Result>> { - self.db + Ok(self + .db .state_view_at_version(Some(version))? - .get_state_value_bytes(state_key) + .get_state_value_bytes(state_key)? + .map(|val| val.to_vec())) } pub fn get_state_value_poem( @@ -318,11 +320,11 @@ impl Context { StateKeyInner::AccessPath(AccessPath { address: _, path }) => { match Path::try_from(path.as_slice()) { Ok(Path::Resource(struct_tag)) => { - Some(Ok((struct_tag, v.into_bytes()))) + Some(Ok((struct_tag, v.bytes().to_vec()))) } // TODO: Consider expanding to Path::Resource Ok(Path::ResourceGroup(struct_tag)) => { - Some(Ok((struct_tag, v.into_bytes()))) + Some(Ok((struct_tag, v.bytes().to_vec()))) } Ok(Path::Code(_)) => None, Err(e) => Some(Err(anyhow::Error::from(e))), @@ -349,7 +351,7 @@ impl Context { .into_iter() .map(|(key, value)| { let is_resource_group = - |resolver: &dyn AptosMoveResolver, struct_tag: &StructTag| -> bool { + |resolver: &dyn ModuleResolver, struct_tag: &StructTag| -> bool { aptos_try!({ let md = aptos_framework::get_metadata( &resolver.get_module_metadata(&struct_tag.module_id()), @@ -410,7 +412,7 @@ impl Context { Ok((k, v)) => match k.inner() { StateKeyInner::AccessPath(AccessPath { address: _, path }) => { match Path::try_from(path.as_slice()) { - Ok(Path::Code(module_id)) => Some(Ok((module_id, v.into_bytes()))), + Ok(Path::Code(module_id)) => Some(Ok((module_id, v.bytes().to_vec()))), Ok(Path::Resource(_)) | Ok(Path::ResourceGroup(_)) => None, Err(e) => Some(Err(anyhow::Error::from(e))), } @@ -1153,17 +1155,17 @@ impl Context { .map_err(|e| { E::internal_with_code(e, AptosErrorCode::InternalError, ledger_info) })?; - let storage_adapter = StorageAdapter::new(&state_view); + let resolver = state_view.as_move_resolver(); let gas_schedule_params = - match GasScheduleV2::fetch_config(&storage_adapter).and_then(|gas_schedule| { + match GasScheduleV2::fetch_config(&resolver).and_then(|gas_schedule| { let feature_version = gas_schedule.feature_version; let gas_schedule = gas_schedule.to_btree_map(); AptosGasParameters::from_on_chain_gas_schedule(&gas_schedule, feature_version) .ok() }) { Some(gas_schedule) => Ok(gas_schedule), - None => GasSchedule::fetch_config(&storage_adapter) + None => GasSchedule::fetch_config(&resolver) .and_then(|gas_schedule| { let gas_schedule = gas_schedule.to_btree_map(); AptosGasParameters::from_on_chain_gas_schedule(&gas_schedule, 0).ok() @@ -1218,9 +1220,9 @@ impl Context { .map_err(|e| { E::internal_with_code(e, AptosErrorCode::InternalError, ledger_info) })?; - let storage_adapter = StorageAdapter::new(&state_view); + let resolver = state_view.as_move_resolver(); - let block_gas_limit = OnChainExecutionConfig::fetch_config(&storage_adapter) + let block_gas_limit = OnChainExecutionConfig::fetch_config(&resolver) .and_then(|config| config.block_gas_limit()); // Update the cache diff --git a/api/src/state.rs b/api/src/state.rs index c841e48710c55..a15ad3be80785 100644 --- a/api/src/state.rs +++ b/api/src/state.rs @@ -309,9 +309,11 @@ impl StateApi { BasicResponse::try_from_json((resource, &ledger_info, BasicResponseStatus::Ok)) }, - AcceptType::Bcs => { - BasicResponse::try_from_encoded((bytes, &ledger_info, BasicResponseStatus::Ok)) - }, + AcceptType::Bcs => BasicResponse::try_from_encoded(( + bytes.to_vec(), + &ledger_info, + BasicResponseStatus::Ok, + )), } } @@ -348,7 +350,7 @@ impl StateApi { match accept_type { AcceptType::Json => { - let module = MoveModuleBytecode::new(bytes) + let module = MoveModuleBytecode::new(bytes.to_vec()) .try_parse_abi() .context("Failed to parse move module ABI from bytes retrieved from storage") .map_err(|err| { @@ -361,9 +363,11 @@ impl StateApi { BasicResponse::try_from_json((module, &ledger_info, BasicResponseStatus::Ok)) }, - AcceptType::Bcs => { - BasicResponse::try_from_encoded((bytes, &ledger_info, BasicResponseStatus::Ok)) - }, + AcceptType::Bcs => BasicResponse::try_from_encoded(( + bytes.to_vec(), + &ledger_info, + BasicResponseStatus::Ok, + )), } } @@ -452,9 +456,11 @@ impl StateApi { BasicResponse::try_from_json((move_value, &ledger_info, BasicResponseStatus::Ok)) }, - AcceptType::Bcs => { - BasicResponse::try_from_encoded((bytes, &ledger_info, BasicResponseStatus::Ok)) - }, + AcceptType::Bcs => BasicResponse::try_from_encoded(( + bytes.to_vec(), + &ledger_info, + BasicResponseStatus::Ok, + )), } } @@ -505,9 +511,11 @@ impl StateApi { "Get raw table item", "Please use get table item instead.", )), - AcceptType::Bcs => { - BasicResponse::try_from_encoded((bytes, &ledger_info, BasicResponseStatus::Ok)) - }, + AcceptType::Bcs => BasicResponse::try_from_encoded(( + bytes.to_vec(), + &ledger_info, + BasicResponseStatus::Ok, + )), } } diff --git a/api/src/tests/converter_test.rs b/api/src/tests/converter_test.rs index 5b14f866162d9..935175e0cedbd 100644 --- a/api/src/tests/converter_test.rs +++ b/api/src/tests/converter_test.rs @@ -4,9 +4,10 @@ use super::new_test_context; use aptos_api_test_context::current_function_name; use aptos_api_types::{new_vm_utf8_string, AsConverter, HexEncodedBytes, MoveConverter, MoveType}; -use aptos_vm::{data_cache::AsMoveResolver, move_vm_ext::MoveResolverExt}; +use aptos_vm::data_cache::AsMoveResolver; use move_core_types::{ account_address::AccountAddress, + resolver::MoveResolver, value::{MoveStruct, MoveValue as VmMoveValue}, }; use serde::Serialize; @@ -57,7 +58,7 @@ async fn test_value_conversion() { ); } -fn assert_value_conversion( +fn assert_value_conversion( converter: &MoveConverter<'_, R>, json_move_type: &str, json_value: V, @@ -76,7 +77,7 @@ fn assert_value_conversion( assert_eq!(json_value_back, json!(json_value)); } -fn assert_value_conversion_bytes( +fn assert_value_conversion_bytes( converter: &MoveConverter<'_, R>, json_move_type: &str, vm_bytes: &[u8], diff --git a/api/src/transactions.rs b/api/src/transactions.rs index d01e61fa61bfd..8dd60091e1a81 100644 --- a/api/src/transactions.rs +++ b/api/src/transactions.rs @@ -31,7 +31,7 @@ use aptos_types::{ }, vm_status::StatusCode, }; -use aptos_vm::{data_cache::AsMoveResolver, AptosVM}; +use aptos_vm::{data_cache::AsMoveResolver, storage_adapter::AsExecutorView, AptosVM}; use poem_openapi::{ param::{Path, Query}, payload::Json, @@ -1317,8 +1317,8 @@ impl TransactionsApi { // Simulate transaction let state_view = self.context.latest_state_view_poem(&ledger_info)?; - let move_resolver = state_view.as_move_resolver(); - let (_, output) = AptosVM::simulate_signed_transaction(&txn, &move_resolver); + let (_, output) = + AptosVM::simulate_signed_transaction(&txn, &state_view.as_executor_view()); let version = ledger_info.version(); // Ensure that all known statuses return their values in the output (even if they aren't supposed to) diff --git a/api/types/src/convert.rs b/api/types/src/convert.rs index 33e42902f161d..3a624b3d811e3 100644 --- a/api/types/src/convert.rs +++ b/api/types/src/convert.rs @@ -34,13 +34,13 @@ use aptos_types::{ vm_status::AbortLocation, write_set::WriteOp, }; -use aptos_vm::move_vm_ext::MoveResolverExt; use move_binary_format::file_format::FunctionHandleIndex; use move_core_types::{ account_address::AccountAddress, ident_str, identifier::{IdentStr, Identifier}, language_storage::{ModuleId, StructTag, TypeTag}, + resolver::MoveResolver, value::{MoveStructLayout, MoveTypeLayout}, }; use move_resource_viewer::MoveValueAnnotator; @@ -64,7 +64,7 @@ pub struct MoveConverter<'a, R: ?Sized> { db: Arc, } -impl<'a, R: MoveResolverExt + ?Sized> MoveConverter<'a, R> { +impl<'a, R: MoveResolver + ?Sized> MoveConverter<'a, R> { pub fn new(inner: &'a R, db: Arc) -> Self { Self { inner: MoveValueAnnotator::new(inner), @@ -333,7 +333,7 @@ impl<'a, R: MoveResolverExt + ?Sized> MoveConverter<'a, R> { access_path: AccessPath, op: WriteOp, ) -> Result> { - let ret = match op.into_bytes() { + let ret = match op.bytes() { None => match access_path.get_path() { Path::Code(module_id) => vec![WriteSetChange::DeleteModule(DeleteModule { address: access_path.address.into(), @@ -355,15 +355,15 @@ impl<'a, R: MoveResolverExt + ?Sized> MoveConverter<'a, R> { Path::Code(_) => vec![WriteSetChange::WriteModule(WriteModule { address: access_path.address.into(), state_key_hash, - data: MoveModuleBytecode::new(bytes).try_parse_abi()?, + data: MoveModuleBytecode::new(bytes.to_vec()).try_parse_abi()?, })], Path::Resource(typ) => vec![WriteSetChange::WriteResource(WriteResource { address: access_path.address.into(), state_key_hash, - data: self.try_into_resource(&typ, &bytes)?, + data: self.try_into_resource(&typ, bytes)?, })], Path::ResourceGroup(_) => self - .try_into_resources_from_resource_group(&bytes)? + .try_into_resources_from_resource_group(bytes)? .into_iter() .map(|data| { WriteSetChange::WriteResource(WriteResource { @@ -387,7 +387,7 @@ impl<'a, R: MoveResolverExt + ?Sized> MoveConverter<'a, R> { ) -> Result { let hex_handle = handle.0.to_vec().into(); let key: HexEncodedBytes = key.into(); - let ret = match op.into_bytes() { + let ret = match op.bytes() { None => { let data = self.try_delete_table_item_into_deleted_table_data(handle, &key.0)?; @@ -400,13 +400,13 @@ impl<'a, R: MoveResolverExt + ?Sized> MoveConverter<'a, R> { }, Some(bytes) => { let data = - self.try_write_table_item_into_decoded_table_data(handle, &key.0, &bytes)?; + self.try_write_table_item_into_decoded_table_data(handle, &key.0, bytes)?; WriteSetChange::WriteTableItem(WriteTableItem { state_key_hash, handle: hex_handle, key, - value: bytes.into(), + value: bytes.to_vec().into(), data, }) }, @@ -919,7 +919,7 @@ impl<'a, R: MoveResolverExt + ?Sized> MoveConverter<'a, R> { } } -impl<'a, R: MoveResolverExt + ?Sized> ExplainVMStatus for MoveConverter<'a, R> { +impl<'a, R: MoveResolver + ?Sized> ExplainVMStatus for MoveConverter<'a, R> { fn get_module_bytecode(&self, module_id: &ModuleId) -> Result> { self.inner .get_module(module_id) @@ -930,7 +930,7 @@ pub trait AsConverter { fn as_converter(&self, db: Arc) -> MoveConverter; } -impl AsConverter for R { +impl AsConverter for R { fn as_converter(&self, db: Arc) -> MoveConverter { MoveConverter::new(self, db) } diff --git a/aptos-move/aptos-aggregator/src/aggregator_extension.rs b/aptos-move/aptos-aggregator/src/aggregator_extension.rs index 77d47a6e7b995..475146ce3fed3 100644 --- a/aptos-move/aptos-aggregator/src/aggregator_extension.rs +++ b/aptos-move/aptos-aggregator/src/aggregator_extension.rs @@ -5,7 +5,10 @@ use crate::{ delta_change_set::{addition, subtraction}, resolver::{AggregatorReadMode, AggregatorResolver}, }; -use aptos_types::{state_store::table::TableHandle, vm_status::StatusCode}; +use aptos_types::{ + state_store::{state_key::StateKey, table::TableHandle}, + vm_status::StatusCode, +}; use move_binary_format::errors::{PartialVMError, PartialVMResult}; use move_core_types::account_address::AccountAddress; use std::collections::{BTreeMap, BTreeSet}; @@ -21,25 +24,25 @@ pub enum AggregatorState { NegativeDelta, } -#[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord)] +#[derive(Debug, Clone, Copy, Hash, Eq, PartialEq, Ord, PartialOrd)] pub struct AggregatorHandle(pub AccountAddress); /// Uniquely identifies each aggregator instance in storage. -#[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord)] -pub struct AggregatorID { - // A handle that is shared across all aggregator instances created by the - // same `AggregatorFactory` and which is used for fine-grained storage - // access. - pub handle: TableHandle, - // Unique key associated with each aggregator instance. Generated by - // taking the hash of transaction which creates an aggregator and the - // number of aggregators that were created by this transaction so far. - pub key: AggregatorHandle, -} +#[derive(Debug, Clone, Hash, Eq, PartialEq, Ord, PartialOrd)] +pub struct AggregatorID(StateKey); impl AggregatorID { pub fn new(handle: TableHandle, key: AggregatorHandle) -> Self { - AggregatorID { handle, key } + let state_key = StateKey::table_item(handle, key.0.to_vec()); + AggregatorID(state_key) + } + + pub fn as_state_key(&self) -> &StateKey { + &self.0 + } + + pub fn into_state_key(self) -> StateKey { + self.0 } } @@ -242,9 +245,15 @@ impl Aggregator { // something may go wrong, so we guard by throwing an error in // extension. let value_from_storage = resolver - .resolve_aggregator_value(id, AggregatorReadMode::Precise) + .get_aggregator_v1_value(id.as_state_key(), AggregatorReadMode::Precise) .map_err(|e| { extension_error(format!("Could not find the value of the aggregator: {}", e)) + })? + .ok_or_else(|| { + extension_error(format!( + "Could not read from deleted aggregator at {:?}", + id + )) })?; // Validate history and apply the delta. @@ -324,7 +333,7 @@ impl AggregatorData { limit, history: None, }; - self.aggregators.insert(id, aggregator); + self.aggregators.insert(id.clone(), aggregator); self.new_aggregators.insert(id); } diff --git a/aptos-move/aptos-aggregator/src/delta_change_set.rs b/aptos-move/aptos-aggregator/src/delta_change_set.rs index 9cbdb4e310270..b24233a3cd840 100644 --- a/aptos-move/aptos-aggregator/src/delta_change_set.rs +++ b/aptos-move/aptos-aggregator/src/delta_change_set.rs @@ -5,14 +5,8 @@ //! (for accessing the storage) and an operation: a partial function with a //! postcondition. -use crate::module::AGGREGATOR_MODULE; -use aptos_state_view::StateView; -use aptos_types::{ - state_store::state_key::StateKey, - vm_status::{StatusCode, VMStatus}, - write_set::WriteOp, -}; -use move_binary_format::errors::{Location, PartialVMError, PartialVMResult}; +use aptos_types::vm_status::StatusCode; +use move_binary_format::errors::{PartialVMError, PartialVMResult}; /// When `Addition` operation overflows the `limit`. const EADD_OVERFLOW: u64 = 0x02_0001; @@ -176,41 +170,6 @@ impl DeltaOp { self.merge_with_previous_delta(previous_delta)?; Ok(()) } - - /// Consumes a single delta and tries to materialize it with a given state - /// key. If materialization succeeds, a write op is produced. Otherwise, an - /// error VM status is returned. - pub fn try_into_write_op( - self, - state_view: &dyn StateView, - state_key: &StateKey, - ) -> anyhow::Result { - // In case storage fails to fetch the value, return immediately. - let maybe_value = state_view - .get_state_value_u128(state_key) - .map_err(|e| VMStatus::error(StatusCode::STORAGE_ERROR, Some(e.to_string())))?; - - // Otherwise we have to apply delta to the storage value. - match maybe_value { - Some(base) => { - self.apply_to(base) - .map_err(|partial_error| { - // If delta application fails, transform partial VM - // error into an appropriate VM status. - partial_error - .finish(Location::Module(AGGREGATOR_MODULE.clone())) - .into_vm_status() - }) - .map(|result| WriteOp::Modification(serialize(&result))) - }, - // Something is wrong, the value to which we apply delta should - // always exist. Guard anyway. - None => Err(VMStatus::error( - StatusCode::STORAGE_ERROR, - Some("Aggregator value does not exist in storage.".to_string()), - )), - } - } } /// Implements application of `Addition` to `base`. @@ -271,12 +230,12 @@ pub fn serialize(value: &u128) -> Vec { bcs::to_bytes(value).expect("unexpected serialization error in aggregator") } -// Helper for tests, #[cfg(test)] doesn't work for cross-crate. +#[cfg(any(test, feature = "testing"))] pub fn delta_sub(v: u128, limit: u128) -> DeltaOp { DeltaOp::new(DeltaUpdate::Minus(v), limit, 0, v) } -// Helper for tests, #[cfg(test)] doesn't work for cross-crate. +#[cfg(any(test, feature = "testing"))] pub fn delta_add(v: u128, limit: u128) -> DeltaOp { DeltaOp::new(DeltaUpdate::Plus(v), limit, v, 0) } @@ -284,12 +243,16 @@ pub fn delta_add(v: u128, limit: u128) -> DeltaOp { #[cfg(test)] mod test { use super::*; - use crate::AggregatorStore; - use aptos_state_view::TStateView; - use aptos_types::state_store::{ - state_storage_usage::StateStorageUsage, state_value::StateValue, + use crate::{ + resolver::{AggregatorReadMode, TAggregatorView}, + AggregatorStore, + }; + use aptos_types::{ + state_store::{state_key::StateKey, state_value::StateValue}, + write_set::WriteOp, }; use claims::{assert_err, assert_matches, assert_ok, assert_ok_eq}; + use move_core_types::vm_status::VMStatus; use once_cell::sync::Lazy; fn delta_add_with_history(v: u128, limit: u128, max: u128, min: u128) -> DeltaOp { @@ -540,7 +503,11 @@ mod test { let state_view = AggregatorStore::default(); let delta_op = delta_add(10, 1000); assert_matches!( - delta_op.try_into_write_op(&state_view, &KEY), + state_view.try_convert_aggregator_v1_delta_into_write_op( + &KEY, + &delta_op, + AggregatorReadMode::Precise + ), Err(VMStatus::Error { status_code: StatusCode::STORAGE_ERROR, message: Some(_), @@ -551,19 +518,20 @@ mod test { struct BadStorage; - impl TStateView for BadStorage { - type Key = StateKey; + impl TAggregatorView for BadStorage { + type IdentifierV1 = StateKey; + type IdentifierV2 = (); - fn get_state_value(&self, _state_key: &Self::Key) -> anyhow::Result> { + fn get_aggregator_v1_state_value( + &self, + _id: &Self::IdentifierV1, + _mode: AggregatorReadMode, + ) -> anyhow::Result> { Err(anyhow::Error::new(VMStatus::error( StatusCode::STORAGE_ERROR, Some("Error message from BadStorage.".to_string()), ))) } - - fn get_usage(&self) -> anyhow::Result { - unreachable!() - } } #[test] @@ -571,7 +539,11 @@ mod test { let state_view = BadStorage; let delta_op = delta_add(10, 1000); assert_matches!( - delta_op.try_into_write_op(&state_view, &KEY), + state_view.try_convert_aggregator_v1_delta_into_write_op( + &KEY, + &delta_op, + AggregatorReadMode::Precise + ), Err(VMStatus::Error { status_code: StatusCode::STORAGE_ERROR, message: Some(_), @@ -589,11 +561,19 @@ mod test { let add_op = delta_add(100, 200); let sub_op = delta_sub(100, 200); - let add_result = add_op.try_into_write_op(&state_view, &KEY); - assert_ok_eq!(add_result, WriteOp::Modification(serialize(&200))); + let add_result = state_view.try_convert_aggregator_v1_delta_into_write_op( + &KEY, + &add_op, + AggregatorReadMode::Precise, + ); + assert_ok_eq!(add_result, WriteOp::Modification(serialize(&200).into())); - let sub_result = sub_op.try_into_write_op(&state_view, &KEY); - assert_ok_eq!(sub_result, WriteOp::Modification(serialize(&0))); + let sub_result = state_view.try_convert_aggregator_v1_delta_into_write_op( + &KEY, + &sub_op, + AggregatorReadMode::Precise, + ); + assert_ok_eq!(sub_result, WriteOp::Modification(serialize(&0).into())); } #[test] @@ -606,11 +586,19 @@ mod test { let sub_op = delta_sub(101, 1000); assert_matches!( - add_op.try_into_write_op(&state_view, &KEY), + state_view.try_convert_aggregator_v1_delta_into_write_op( + &KEY, + &add_op, + AggregatorReadMode::Precise + ), Err(VMStatus::MoveAbort(_, EADD_OVERFLOW)) ); assert_matches!( - sub_op.try_into_write_op(&state_view, &KEY), + state_view.try_convert_aggregator_v1_delta_into_write_op( + &KEY, + &sub_op, + AggregatorReadMode::Precise + ), Err(VMStatus::MoveAbort(_, ESUB_UNDERFLOW)) ); } diff --git a/aptos-move/aptos-aggregator/src/lib.rs b/aptos-move/aptos-aggregator/src/lib.rs index 16ff9f5746c19..6d2c932cb5102 100644 --- a/aptos-move/aptos-aggregator/src/lib.rs +++ b/aptos-move/aptos-aggregator/src/lib.rs @@ -5,7 +5,6 @@ pub mod aggregator_extension; pub mod delta_change_set; mod module; pub mod resolver; -pub mod transaction; #[cfg(any(test, feature = "testing"))] pub use resolver::test_utils::{aggregator_id_for_test, AggregatorStore}; diff --git a/aptos-move/aptos-aggregator/src/resolver.rs b/aptos-move/aptos-aggregator/src/resolver.rs index 66b5d6eae2e28..89023ea66f5b3 100644 --- a/aptos-move/aptos-aggregator/src/resolver.rs +++ b/aptos-move/aptos-aggregator/src/resolver.rs @@ -1,51 +1,145 @@ // Copyright © Aptos Foundation // SPDX-License-Identifier: Apache-2.0 -use crate::aggregator_extension::AggregatorID; +use crate::{ + aggregator_extension::AggregatorID, + delta_change_set::{serialize, DeltaOp}, + module::AGGREGATOR_MODULE, +}; +use aptos_types::{ + state_store::{ + state_key::StateKey, + state_value::{StateValue, StateValueMetadataKind}, + }, + write_set::WriteOp, +}; +use move_binary_format::errors::Location; +use move_core_types::vm_status::{StatusCode, VMStatus}; -/// Defines different ways a value of an aggregator can be resolved in -/// `AggregatorResolver`. The implementation of the trait can use custom -/// logic for different reading modes. +/// Defines different ways `AggregatorResolver` can be used to read its value +/// from the state. pub enum AggregatorReadMode { /// The returned value is guaranteed to be correct. Precise, - /// The returned value is based on speculation or approximation. For - /// example, while reading and accumulating deltas only some of them - /// can be taken into account. + /// The returned value is based on speculation or an approximation. For + /// example, while reading and accumulating deltas only some of them can be + /// taken into account. Speculative, } -/// Returns a value of an aggregator from cache or global storage. -/// - Ok(..) if aggregator value exists -/// - Err(..) otherwise. -pub trait AggregatorResolver { - /// Returns a value of an aggregator. - fn resolve_aggregator_value( +/// Allows to query aggregator values from the state storage. +/// Because there are two types of aggregators in the system, V1 and V2, we use +/// different code paths for each. +pub trait TAggregatorView { + // We differentiate between two possible ways to identify an aggregator in + // storage for now (V1 or V2) so that the APIs are completely separate and + // we can delete all V1 code when necessary. + type IdentifierV1; + type IdentifierV2; + + /// Aggregator V1 is implemented as a state item, and therefore the API has + /// the same pattern as for modules or resources: + /// - Ok(None) if aggregator value is not in storage, + /// - Ok(Some(...)) if aggregator value exists in storage, + /// - Err(...) otherwise (e.g. storage error or failed delta + /// application). + fn get_aggregator_v1_state_value( + &self, + id: &Self::IdentifierV1, + mode: AggregatorReadMode, + ) -> anyhow::Result>; + + fn get_aggregator_v1_value( &self, - id: &AggregatorID, + id: &Self::IdentifierV1, mode: AggregatorReadMode, - ) -> Result; + ) -> anyhow::Result> { + let maybe_state_value = self.get_aggregator_v1_state_value(id, mode)?; + match maybe_state_value { + Some(state_value) => Ok(Some(bcs::from_bytes(state_value.bytes())?)), + None => Ok(None), + } + } + + /// Because aggregator V1 is a state item, it also can have metadata (for + /// example used to calculate storage refunds). + fn get_aggregator_v1_state_value_metadata( + &self, + id: &Self::IdentifierV1, + ) -> anyhow::Result> { + // When getting state value metadata for aggregator V1, we need to do a + // precise read. + let maybe_state_value = + self.get_aggregator_v1_state_value(id, AggregatorReadMode::Precise)?; + Ok(maybe_state_value.map(StateValue::into_metadata)) + } + + fn get_aggregator_v2_value( + &self, + _id: &Self::IdentifierV2, + _mode: AggregatorReadMode, + ) -> anyhow::Result { + unimplemented!("Aggregator V2 is not yet supported") + } /// Returns a unique per-block identifier that can be used when creating a - /// new aggregator. - fn generate_aggregator_id(&self) -> AggregatorID; + /// new aggregator V2. + fn generate_aggregator_v2_id(&self) -> Self::IdentifierV2 { + unimplemented!("ID generation for Aggregator V2 is not yet supported") + } + + /// Consumes a single delta of aggregator V1, and tries to materialize it + /// with a given identifier (state key). If materialization succeeds, a + /// write op is produced. + fn try_convert_aggregator_v1_delta_into_write_op( + &self, + id: &Self::IdentifierV1, + delta_op: &DeltaOp, + mode: AggregatorReadMode, + ) -> anyhow::Result { + let base = self + .get_aggregator_v1_value(id, mode) + .map_err(|e| VMStatus::error(StatusCode::STORAGE_ERROR, Some(e.to_string())))? + .ok_or_else(|| { + VMStatus::error( + StatusCode::STORAGE_ERROR, + Some("Cannot convert delta for deleted aggregator".to_string()), + ) + })?; + delta_op + .apply_to(base) + .map_err(|partial_error| { + partial_error + .finish(Location::Module(AGGREGATOR_MODULE.clone())) + .into_vm_status() + }) + .map(|result| WriteOp::Modification(serialize(&result).into())) + } +} + +pub trait AggregatorResolver: + TAggregatorView +{ +} + +impl> AggregatorResolver + for T +{ } // Utils to store aggregator values in data store. Here, we -// only care about aggregators which are state items. +// only care about aggregators which are state items (V1). #[cfg(any(test, feature = "testing"))] pub mod test_utils { use super::*; use crate::{aggregator_extension::AggregatorHandle, delta_change_set::serialize}; - use aptos_state_view::TStateView; use aptos_types::state_store::{ - state_key::StateKey, state_storage_usage::StateStorageUsage, state_value::StateValue, - table::TableHandle, + state_key::StateKey, state_value::StateValue, table::TableHandle, }; use move_core_types::account_address::AccountAddress; use std::collections::HashMap; - /// Generates a dummy id for aggregator based on the given key. Only used for testing. + /// Generates a dummy identifier for aggregator V1 based on the given key. pub fn aggregator_id_for_test(key: u128) -> AggregatorID { let bytes: Vec = [key.to_le_bytes(), key.to_le_bytes()] .iter() @@ -60,51 +154,25 @@ pub mod test_utils { impl AggregatorStore { pub fn set_from_id(&mut self, id: AggregatorID, value: u128) { - let AggregatorID { handle, key } = id; - let state_key = StateKey::table_item(handle, key.0.to_vec()); - self.set_from_state_key(state_key, value); + self.set_from_state_key(id.into_state_key(), value); } pub fn set_from_state_key(&mut self, state_key: StateKey, value: u128) { self.0 - .insert(state_key, StateValue::new_legacy(serialize(&value))); + .insert(state_key, StateValue::new_legacy(serialize(&value).into())); } } - impl AggregatorResolver for AggregatorStore { - fn resolve_aggregator_value( + impl TAggregatorView for AggregatorStore { + type IdentifierV1 = StateKey; + type IdentifierV2 = AggregatorID; + + fn get_aggregator_v1_state_value( &self, - id: &AggregatorID, + state_key: &Self::IdentifierV1, _mode: AggregatorReadMode, - ) -> Result { - let AggregatorID { handle, key } = id; - let state_key = StateKey::table_item(*handle, key.0.to_vec()); - match self.get_state_value_u128(&state_key)? { - Some(value) => Ok(value), - None => { - anyhow::bail!("Could not find the value of the aggregator") - }, - } - } - - fn generate_aggregator_id(&self) -> AggregatorID { - unimplemented!("Aggregator id generation will be implemented for V2 aggregators.") - } - } - - impl TStateView for AggregatorStore { - type Key = StateKey; - - fn get_state_value(&self, state_key: &Self::Key) -> anyhow::Result> { + ) -> anyhow::Result> { Ok(self.0.get(state_key).cloned()) } - - fn get_usage(&self) -> anyhow::Result { - let mut usage = StateStorageUsage::new_untracked(); - for (k, v) in self.0.iter() { - usage.add_item(k.size() + v.size()) - } - Ok(usage) - } } } diff --git a/aptos-move/aptos-aggregator/src/transaction.rs b/aptos-move/aptos-aggregator/src/transaction.rs deleted file mode 100644 index b44980354f426..0000000000000 --- a/aptos-move/aptos-aggregator/src/transaction.rs +++ /dev/null @@ -1,26 +0,0 @@ -// Copyright © Aptos Foundation -// SPDX-License-Identifier: Apache-2.0 - -use aptos_types::write_set::TransactionWrite; - -/// Helpful trait for e.g. extracting u128 value out of TransactionWrite that we know is -/// for aggregator (i.e. if we have seen a DeltaOp for the same access path). -pub struct AggregatorValue(u128); - -impl AggregatorValue { - /// Returns None if the write doesn't contain a value (i.e deletion), and panics if - /// the value raw bytes can't be deserialized into an u128. - pub fn from_write(write: &dyn TransactionWrite) -> Option { - let v = write.extract_raw_bytes(); - v.map(|bytes| { - Self( - bcs::from_bytes(&bytes) - .expect("Deserializing into an aggregator value always succeeds"), - ) - }) - } - - pub fn into(self) -> u128 { - self.0 - } -} diff --git a/aptos-move/aptos-debugger/src/lib.rs b/aptos-move/aptos-debugger/src/lib.rs index 2e91026a82af7..ddad4a0536c1b 100644 --- a/aptos-move/aptos-debugger/src/lib.rs +++ b/aptos-move/aptos-debugger/src/lib.rs @@ -23,7 +23,7 @@ use aptos_validator_interface::{ AptosValidatorInterface, DBDebuggerInterface, DebuggerStateView, RestDebuggerInterface, }; use aptos_vm::{ - data_cache::StorageAdapter, + data_cache::AsMoveResolver, move_vm_ext::{MoveVmExt, SessionExt, SessionId}, AptosVM, VMExecutor, }; @@ -72,33 +72,36 @@ impl AptosDebugger { .check_signature() .map_err(|err| format_err!("Unexpected VM Error: {:?}", err))?; - let (status, output, gas_profiler) = - AptosVM::execute_user_transaction_with_custom_gas_meter( - &state_view, - &txn, - &log_context, - |gas_feature_version, gas_params, storage_gas_params, balance| { - let gas_meter = - MemoryTrackedGasMeter::new(StandardGasMeter::new(StandardGasAlgebra::new( - gas_feature_version, - gas_params, - storage_gas_params, - balance, - ))); - let gas_profiler = match txn.payload() { - TransactionPayload::Script(_) => GasProfiler::new_script(gas_meter), - TransactionPayload::EntryFunction(entry_func) => GasProfiler::new_function( - gas_meter, - entry_func.module().clone(), - entry_func.function().to_owned(), - entry_func.ty_args().to_vec(), - ), - TransactionPayload::ModuleBundle(..) => unreachable!("not supported"), - TransactionPayload::Multisig(..) => unimplemented!("not supported yet"), - }; - Ok(gas_profiler) - }, - )?; + // TODO(Gas): revisit this. + let vm = AptosVM::new_from_state_view(&state_view); + let resolver = state_view.as_move_resolver(); + + let (status, output, gas_profiler) = vm.execute_user_transaction_with_custom_gas_meter( + &resolver, + &txn, + &log_context, + |gas_feature_version, gas_params, storage_gas_params, balance| { + let gas_meter = + MemoryTrackedGasMeter::new(StandardGasMeter::new(StandardGasAlgebra::new( + gas_feature_version, + gas_params, + storage_gas_params, + balance, + ))); + let gas_profiler = match txn.payload() { + TransactionPayload::Script(_) => GasProfiler::new_script(gas_meter), + TransactionPayload::EntryFunction(entry_func) => GasProfiler::new_function( + gas_meter, + entry_func.module().clone(), + entry_func.function().to_owned(), + entry_func.ty_args().to_vec(), + ), + TransactionPayload::ModuleBundle(..) => unreachable!("not supported"), + TransactionPayload::Multisig(..) => unimplemented!("not supported yet"), + }; + Ok(gas_profiler) + }, + )?; Ok((status, output, gas_profiler.finish())) } @@ -175,7 +178,7 @@ impl AptosDebugger { version: Version, ) -> Result> { let state_view = DebuggerStateView::new(self.debugger.clone(), version); - let remote_storage = StorageAdapter::new(&state_view); + let remote_storage = state_view.as_move_resolver(); let annotator = AptosValueAnnotator::new(&remote_storage); Ok( match self @@ -195,7 +198,7 @@ impl AptosDebugger { ) -> Result> { let accounts = self.debugger.get_admin_accounts(version).await?; let state_view = DebuggerStateView::new(self.debugger.clone(), version); - let remote_storage = StorageAdapter::new(&state_view); + let remote_storage = state_view.as_move_resolver(); let annotator = AptosValueAnnotator::new(&remote_storage); let mut result = vec![]; @@ -224,7 +227,7 @@ impl AptosDebugger { F: FnOnce(&mut SessionExt) -> VMResult<()>, { let state_view = DebuggerStateView::new(self.debugger.clone(), version); - let state_view_storage = StorageAdapter::new(&state_view); + let state_view_storage = state_view.as_move_resolver(); let features = Features::fetch_config(&state_view_storage).unwrap_or_default(); let move_vm = MoveVmExt::new( NativeGasParameters::zeros(), diff --git a/aptos-move/aptos-release-builder/data/release.yaml b/aptos-move/aptos-release-builder/data/release.yaml index 4394193507ab3..57bea9ca73ae3 100644 --- a/aptos-move/aptos-release-builder/data/release.yaml +++ b/aptos-move/aptos-release-builder/data/release.yaml @@ -28,16 +28,6 @@ proposals: - Framework: bytecode_version: 6 git_hash: ~ - - name: enable_aggregator_snapshots - metadata: - title: "Enable Aggregator Snapshots" - description: "AIP-43: Aggregator snapshots can be used to postpone the read operations on aggregators to the commit phase and improve concurrency of transaction execution." - discussion_url: "https://github.com/aptos-foundation/AIPs/issues/209" - execution_mode: MultiStep - update_sequence: - - FeatureFlag: - enabled: - - aggregator_snapshots - name: enable_signature_checker_v2_fix metadata: title: "Enable a fix signature checker v2" diff --git a/aptos-move/aptos-release-builder/src/main.rs b/aptos-move/aptos-release-builder/src/main.rs index a1e93f8e2f973..1e6d217433d7d 100644 --- a/aptos-move/aptos-release-builder/src/main.rs +++ b/aptos-move/aptos-release-builder/src/main.rs @@ -3,6 +3,7 @@ use anyhow::Context; use aptos_crypto::{ed25519::Ed25519PrivateKey, ValidCryptoMaterialStringExt}; +use aptos_framework::natives::code::PackageRegistry; use aptos_release_builder::{ components::fetch_config, initialize_aptos_core_path, @@ -61,6 +62,23 @@ pub enum Commands { #[clap(short, long)] print_gas_schedule: bool, }, + /// Print out package metadata. + /// Usage: --endpoint '' + /// --package-address
--package-name [--print-json] + PrintPackageMetadata { + /// Url endpoint for the desired network. e.g: https://fullnode.mainnet.aptoslabs.com/v1. + #[clap(short, long)] + endpoint: url::Url, + /// The address under which the package is published + #[clap(long)] + package_address: String, + /// The name of the package + #[clap(long)] + package_name: String, + /// Whether to print the original data in json + #[clap(long)] + print_json: bool, + }, } #[derive(Subcommand, Debug)] @@ -84,7 +102,7 @@ pub enum InputOptions { } #[tokio::main] -async fn main() { +async fn main() -> anyhow::Result<()> { let args = Argument::parse(); initialize_aptos_core_path(args.aptos_core_path.clone()); @@ -93,15 +111,16 @@ async fn main() { Commands::GenerateProposals { release_config, output_dir, - } => aptos_release_builder::ReleaseConfig::load_config(release_config.as_path()) - .with_context(|| "Failed to load release config".to_string()) - .unwrap() - .generate_release_proposal_scripts(output_dir.as_path()) - .with_context(|| "Failed to generate release proposal scripts".to_string()) - .unwrap(), - Commands::WriteDefault { output_path } => aptos_release_builder::ReleaseConfig::default() - .save_config(output_path.as_path()) - .unwrap(), + } => { + aptos_release_builder::ReleaseConfig::load_config(release_config.as_path()) + .with_context(|| "Failed to load release config".to_string())? + .generate_release_proposal_scripts(output_dir.as_path()) + .with_context(|| "Failed to generate release proposal scripts".to_string())?; + Ok(()) + }, + Commands::WriteDefault { output_path } => { + aptos_release_builder::ReleaseConfig::default().save_config(output_path.as_path()) + }, Commands::ValidateProposals { release_config, input_option, @@ -111,36 +130,31 @@ async fn main() { output_dir, } => { let config = - aptos_release_builder::ReleaseConfig::load_config(release_config.as_path()) - .unwrap(); + aptos_release_builder::ReleaseConfig::load_config(release_config.as_path())?; let root_key_path = aptos_temppath::TempPath::new(); - root_key_path.create_as_file().unwrap(); + root_key_path.create_as_file()?; let mut network_config = match input_option { InputOptions::FromDirectory { test_dir } => { aptos_release_builder::validate::NetworkConfig::new_from_dir( endpoint.clone(), test_dir.as_path(), - ) - .unwrap() + )? }, InputOptions::FromArgs { root_key, validator_address, validator_key, } => { - let root_key = Ed25519PrivateKey::from_encoded_string(&root_key).unwrap(); - let validator_key = - Ed25519PrivateKey::from_encoded_string(&validator_key).unwrap(); - let validator_account = - AccountAddress::from_hex(validator_address.as_bytes()).unwrap(); + let root_key = Ed25519PrivateKey::from_encoded_string(&root_key)?; + let validator_key = Ed25519PrivateKey::from_encoded_string(&validator_key)?; + let validator_account = AccountAddress::from_hex(validator_address.as_bytes())?; let mut root_key_path = root_key_path.path().to_path_buf(); root_key_path.set_extension("key"); - std::fs::write(root_key_path.as_path(), bcs::to_bytes(&root_key).unwrap()) - .unwrap(); + std::fs::write(root_key_path.as_path(), bcs::to_bytes(&root_key)?)?; aptos_release_builder::validate::NetworkConfig { root_key_path, @@ -157,8 +171,7 @@ async fn main() { if mint_to_validator { let chain_id = aptos_rest_client::Client::new(endpoint) .get_ledger_information() - .await - .unwrap() + .await? .inner() .chain_id; @@ -166,25 +179,23 @@ async fn main() { panic!("Mint to mainnet/testnet is not allowed"); } - network_config.mint_to_validator().await.unwrap(); + network_config.mint_to_validator().await?; } network_config .set_fast_resolve(FAST_RESOLUTION_TIME) - .await - .unwrap(); + .await?; aptos_release_builder::validate::validate_config_and_generate_release( config, network_config.clone(), output_dir, ) - .await - .unwrap(); + .await?; // Reset resolution time back to normal after resolution network_config .set_fast_resolve(DEFAULT_RESOLUTION_TIME) - .await - .unwrap() + .await?; + Ok(()) }, Commands::PrintConfigs { endpoint, @@ -198,7 +209,7 @@ async fn main() { ($($type:ty), *) => { $( println!("{}", std::any::type_name::<$type>()); - println!("{}", serde_yaml::to_string(&fetch_config::<$type>(&client).unwrap()).unwrap()); + println!("{}", serde_yaml::to_string(&fetch_config::<$type>(&client)?)?); )* } } @@ -210,14 +221,37 @@ async fn main() { } // Print Activated Features - let features = fetch_config::(&client).unwrap(); + let features = fetch_config::(&client)?; println!( "Features\n{}", serde_yaml::to_string( &aptos_release_builder::components::feature_flags::Features::from(&features) - ) - .unwrap() + )? ); + Ok(()) + }, + Commands::PrintPackageMetadata { + endpoint, + package_address, + package_name, + print_json, + } => { + let client = aptos_rest_client::Client::new(endpoint); + let address = AccountAddress::from_str_strict(&package_address)?; + let packages = client + .get_account_resource_bcs::(address, "0x1::code::PackageRegistry") + .await?; + for package in packages.into_inner().packages { + if package.name == package_name { + if print_json { + println!("{}", serde_json::to_string(&package).unwrap()); + } else { + println!("{}", package); + } + break; + } + } + Ok(()) }, } } diff --git a/aptos-move/aptos-resource-viewer/src/lib.rs b/aptos-move/aptos-resource-viewer/src/lib.rs index 6289debc77438..d956510564828 100644 --- a/aptos-move/aptos-resource-viewer/src/lib.rs +++ b/aptos-move/aptos-resource-viewer/src/lib.rs @@ -7,8 +7,7 @@ use aptos_types::{ access_path::AccessPath, account_address::AccountAddress, account_state::AccountState, contract_event::ContractEvent, }; -use aptos_vm::move_vm_ext::MoveResolverExt; -use move_core_types::language_storage::StructTag; +use move_core_types::{language_storage::StructTag, resolver::MoveResolver}; use move_resource_viewer::MoveValueAnnotator; pub use move_resource_viewer::{AnnotatedMoveStruct, AnnotatedMoveValue}; use std::{ @@ -22,7 +21,7 @@ pub struct AptosValueAnnotator<'a, T>(MoveValueAnnotator<'a, T>); #[derive(Debug)] pub struct AnnotatedAccountStateBlob(BTreeMap); -impl<'a, T: MoveResolverExt> AptosValueAnnotator<'a, T> { +impl<'a, T: MoveResolver> AptosValueAnnotator<'a, T> { pub fn new(storage: &'a T) -> Self { Self(MoveValueAnnotator::new(storage)) } diff --git a/aptos-move/aptos-transactional-test-harness/Cargo.toml b/aptos-move/aptos-transactional-test-harness/Cargo.toml deleted file mode 100644 index 058bdf82ac13b..0000000000000 --- a/aptos-move/aptos-transactional-test-harness/Cargo.toml +++ /dev/null @@ -1,47 +0,0 @@ -[package] -name = "aptos-transactional-test-harness" -description = "Transactional testing framework for Move" -version = "0.1.0" - -# Workspace inherited keys -authors = { workspace = true } -edition = { workspace = true } -homepage = { workspace = true } -license = { workspace = true } -publish = { workspace = true } -repository = { workspace = true } -rust-version = { workspace = true } - -[dependencies] -anyhow = { workspace = true } -aptos-api-types = { workspace = true } -aptos-cached-packages = { workspace = true } -aptos-crypto = { workspace = true } -aptos-framework = { workspace = true } -aptos-gas-schedule = { workspace = true } -aptos-language-e2e-tests = { workspace = true } -aptos-state-view = { workspace = true } -aptos-storage-interface = { workspace = true } -aptos-types = { workspace = true } -aptos-vm = { workspace = true } -aptos-vm-genesis = { workspace = true } -bcs = { workspace = true } -clap = { workspace = true } -hex = { workspace = true } -move-binary-format = { workspace = true, features = [ "fuzzing" ] } -move-command-line-common = { workspace = true } -move-compiler = { workspace = true } -move-core-types = { workspace = true, features = [ "fuzzing" ] } -move-resource-viewer = { workspace = true } -move-transactional-test-runner = { workspace = true } -move-vm-runtime = { workspace = true } -once_cell = { workspace = true } -serde = { workspace = true } -serde_json = { workspace = true } - -[dev-dependencies] -datatest-stable = { workspace = true } - -[[test]] -name = "tests" -harness = false diff --git a/aptos-move/aptos-transactional-test-harness/src/aptos_test_harness.rs b/aptos-move/aptos-transactional-test-harness/src/aptos_test_harness.rs deleted file mode 100644 index 87715d43dfe1a..0000000000000 --- a/aptos-move/aptos-transactional-test-harness/src/aptos_test_harness.rs +++ /dev/null @@ -1,979 +0,0 @@ -// Copyright © Aptos Foundation -// Parts of the project are originally copyright © Meta Platforms, Inc. -// SPDX-License-Identifier: Apache-2.0 - -use anyhow::{bail, format_err, Result}; -use aptos_api_types::AsConverter; -use aptos_crypto::{ - ed25519::{Ed25519PrivateKey, Ed25519PublicKey}, - hash::HashValue, - ValidCryptoMaterialStringExt, -}; -use aptos_gas_schedule::{InitialGasSchedule, TransactionGasParameters}; -use aptos_language_e2e_tests::data_store::{FakeDataStore, GENESIS_CHANGE_SET_HEAD}; -use aptos_state_view::TStateView; -use aptos_types::{ - access_path::AccessPath, - account_config::{aptos_test_root_address, AccountResource, CoinStoreResource}, - block_metadata::BlockMetadata, - chain_id::ChainId, - contract_event::ContractEvent, - state_store::{state_key::StateKey, table::TableHandle}, - transaction::{ - EntryFunction as TransactionEntryFunction, ExecutionStatus, Module as TransactionModule, - RawTransaction, Script as TransactionScript, Transaction, TransactionOutput, - TransactionStatus, - }, -}; -use aptos_vm::{data_cache::AsMoveResolver, AptosVM, VMExecutor}; -use aptos_vm_genesis::GENESIS_KEYPAIR; -use clap::Parser; -use move_binary_format::file_format::{CompiledModule, CompiledScript}; -use move_command_line_common::{ - address::ParsedAddress, files::verify_and_create_named_address_mapping, -}; -use move_compiler::{self, shared::PackagePaths, FullyCompiledProgram}; -use move_core_types::{ - account_address::AccountAddress, - identifier::{IdentStr, Identifier}, - language_storage::{ModuleId, TypeTag}, - move_resource::MoveStructType, - parser::parse_type_tag, - transaction_argument::{convert_txn_args, TransactionArgument}, - value::{MoveTypeLayout, MoveValue}, -}; -use move_resource_viewer::{AnnotatedMoveValue, MoveValueAnnotator}; -use move_transactional_test_runner::{ - framework::{run_test_impl, CompiledState, MoveTestAdapter}, - tasks::{InitCommand, SyntaxChoice, TaskInput}, - vm_test_harness::{view_resource_in_move_storage, TestRunConfig}, -}; -use move_vm_runtime::session::SerializedReturnValues; -use once_cell::sync::Lazy; -use std::{ - collections::{BTreeMap, BTreeSet, HashMap}, - convert::TryFrom, - fmt, - path::Path, - string::String, - sync::Arc, -}; - -/** - * Definitions - */ - -/// The Aptos transaction test adapter. -/// -/// This differs from the SimpleVMTestAdapter in a few ways to ensure that our tests mimics -/// production settings: -/// - It uses a StateView as its storage backend -/// - It executes transactions through AptosVM, instead of MoveVM directly -struct AptosTestAdapter<'a> { - compiled_state: CompiledState<'a>, - storage: FakeDataStore, - default_syntax: SyntaxChoice, - private_key_mapping: BTreeMap, -} - -/// Parameters *required* to create a transaction. -struct TransactionParameters { - pub sequence_number: u64, - pub max_gas_amount: u64, - pub gas_unit_price: u64, - pub expiration_timestamp_secs: u64, -} - -/// Aptos-specific arguments for the publish command. -#[derive(Parser, Debug)] -struct AptosPublishArgs { - #[clap(long = "private-key", value_parser = RawPrivateKey::parse)] - private_key: Option, - - #[clap(long = "expiration")] - expiration_time: Option, - - #[clap(long = "sequence-number")] - sequence_number: Option, - - #[clap(long = "gas-price")] - gas_unit_price: Option, - - #[clap(long = "override-signer", value_parser= ParsedAddress::parse)] - override_signer: Option, -} - -#[derive(Debug, Clone)] -struct SignerAndKeyPair { - address: ParsedAddress, - private_key: Option, -} - -/// Aptos-specifc arguments for the run command. -#[derive(Parser, Debug)] -struct AptosRunArgs { - #[clap(long = "private-key", value_parser = RawPrivateKey::parse)] - private_key: Option, - - #[clap(long = "script")] - script: bool, - - #[clap(long = "expiration")] - expiration_time: Option, - - #[clap(long = "sequence-number")] - sequence_number: Option, - - #[clap(long = "gas-price")] - gas_unit_price: Option, - - #[clap(long = "show-events")] - show_events: bool, - - #[clap(long = "secondary-signers", value_parser = SignerAndKeyPair::parse, num_args = 0..)] - secondary_signers: Option>, -} - -/// Aptos-specifc arguments for the init command. -#[derive(Parser, Debug)] -struct AptosInitArgs { - #[clap(long = "private-keys", value_parser = parse_named_private_key, num_args = 0..)] - private_keys: Option>, - #[clap(long = "initial-coins")] - initial_coins: Option, -} - -/// A raw private key -- either a literal or an unresolved name. -#[derive(Debug, Clone)] -enum RawPrivateKey { - Named(Identifier), - Anonymous(Ed25519PrivateKey), -} - -/// Command to initiate a block metadata transaction. -#[derive(Parser, Debug)] -struct BlockCommand { - #[clap(long = "proposer", value_parser = ParsedAddress::parse)] - proposer: ParsedAddress, - - #[clap(long = "time")] - time: u64, -} - -/// Command to view a table item. -#[derive(Parser, Debug)] -struct ViewTableCommand { - #[clap(long = "table_handle")] - table_handle: AccountAddress, - - #[clap(long = "key_type", value_parser = parse_type_tag)] - key_type: TypeTag, - - #[clap(long = "value_type", value_parser = parse_type_tag)] - value_type: TypeTag, - - #[clap(long = "key_value", value_parser = parse_value)] - key_value: serde_json::Value, -} - -fn parse_value(input: &str) -> Result { - serde_json::from_str(input) -} - -/// Custom commands for the transactional test flow. -#[derive(Parser, Debug)] -enum AptosSubCommand { - #[clap(name = "block")] - BlockCommand(BlockCommand), - - #[clap(name = "view_table")] - ViewTableCommand(ViewTableCommand), -} - -/** - * Parsing - */ - -fn parse_ed25519_private_key(s: &str) -> Result { - Ok(Ed25519PrivateKey::from_encoded_string(s)?) -} - -impl RawPrivateKey { - fn parse(s: &str) -> Result { - if let Ok(private_key) = parse_ed25519_private_key(s) { - return Ok(Self::Anonymous(private_key)); - } - let name = Identifier::new(s) - .map_err(|_| format_err!("Failed to parse '{}' as private key.", s))?; - Ok(Self::Named(name)) - } -} - -fn parse_named_private_key(s: &str) -> Result<(Identifier, Ed25519PrivateKey)> { - let before_after = s.split('=').collect::>(); - - if before_after.len() != 2 { - bail!("Invalid named private key assignment. Must be of the form =, but found '{}'", s); - } - - let name = Identifier::new(before_after[0]) - .map_err(|_| format_err!("Invalid private key name '{}'", s))?; - let private_key = parse_ed25519_private_key(before_after[1])?; - - Ok((name, private_key)) -} - -impl SignerAndKeyPair { - fn parse(s: &str) -> Result { - if let Ok(address) = ParsedAddress::parse(s) { - return Ok(Self { - address, - private_key: None, - }); - }; - - let before_after = s.split('=').collect::>(); - - if before_after.len() != 2 { - bail!("Invalid signer and key pair. Must be of the form = or , but found '{}'", s); - } - - let address = ParsedAddress::parse(before_after[0])?; - let private_key = RawPrivateKey::parse(before_after[1])?; - - Ok(Self { - address, - private_key: Some(private_key), - }) - } -} - -pub struct FakeDbReader {} - -impl aptos_storage_interface::DbReader for FakeDbReader { - fn indexer_enabled(&self) -> bool { - false - } -} - -/** - * Helpers - */ - -/// Default private key mappings for special Aptos accounts. -fn aptos_framework_private_key_mapping() -> Vec<(String, Ed25519PrivateKey)> { - vec![("Root".to_string(), GENESIS_KEYPAIR.0.clone())] -} - -fn panic_missing_private_key_named(cmd_name: &str, name: &str) -> ! { - panic!( - "Missing private key. Either add a `--private-key ` argument \ - to the {} command, or associate an address to the \ - name '{}' in the init command.", - cmd_name, name, - ) -} - -fn panic_missing_private_key(cmd_name: &str) -> ! { - panic!( - "Missing private key. Try adding a `--private-key ` \ - argument to the {} command.", - cmd_name - ) -} - -static PRECOMPILED_APTOS_FRAMEWORK: Lazy = Lazy::new(|| { - let deps = vec![PackagePaths { - name: None, - paths: aptos_cached_packages::head_release_bundle() - .files() - .unwrap(), - named_address_map: aptos_framework::named_addresses().clone(), - }]; - let program_res = move_compiler::construct_pre_compiled_lib( - deps, - None, - move_compiler::Flags::empty().set_sources_shadow_deps(false), - aptos_framework::extended_checks::get_all_attribute_names(), - ) - .unwrap(); - match program_res { - Ok(af) => af, - Err((files, errors)) => { - eprintln!("!!!Aptos Framework failed to compile!!!"); - move_compiler::diagnostics::report_diagnostics(&files, errors) - }, - } -}); - -/** - * Test Adapter Implementation - */ - -impl<'a> AptosTestAdapter<'a> { - /// Look up the named private key in the mapping. - fn resolve_named_private_key(&self, s: &IdentStr) -> Ed25519PrivateKey { - if let Some(private_key) = self.private_key_mapping.get(s.as_str()) { - return private_key.clone(); - } - panic!("Failed to resolve private key '{}'", s) - } - - /// Resolve a raw private key into a numeric one. - fn resolve_private_key(&self, private_key: &RawPrivateKey) -> Ed25519PrivateKey { - match private_key { - RawPrivateKey::Anonymous(private_key) => private_key.clone(), - RawPrivateKey::Named(name) => self.resolve_named_private_key(name), - } - } - - /// Resolve addresses and private keys for secondary signers. - fn resolve_secondary_signers( - &mut self, - secondary_signers: &[SignerAndKeyPair], - ) -> (Vec, Vec) { - let mut addresses = vec![]; - let mut private_keys = vec![]; - - for SignerAndKeyPair { - address, - private_key, - } in secondary_signers - { - addresses.push(self.compiled_state().resolve_address(address)); - - let resolved_private_key = match (private_key, address) { - (Some(private_key), _) => self.resolve_private_key(private_key), - (None, ParsedAddress::Named(named_addr)) => { - match self.private_key_mapping.get(named_addr) { - Some(private_key) => private_key.clone(), - None => panic!( - "Failed to resolve private key for secondary signer {}.", - named_addr - ), - } - }, - (None, ParsedAddress::Numerical(addr)) => { - panic!("No private key provided for secondary signer {}.", addr) - }, - }; - - private_keys.push(resolved_private_key); - } - - (addresses, private_keys) - } - - /// Obtain a Rust representation of the account resource from storage, which is used to derive - /// a few default transaction parameters. - fn fetch_account_resource(&self, signer_addr: &AccountAddress) -> Result { - let account_access_path = - AccessPath::resource_access_path(*signer_addr, AccountResource::struct_tag()) - .expect("access path in test"); - let account_blob = self - .storage - .get_state_value_bytes(&StateKey::access_path(account_access_path)) - .unwrap() - .ok_or_else(|| { - format_err!( - "Failed to fetch account resource under address {}. Has the account been created?", - signer_addr - ) - })?; - Ok(bcs::from_bytes(&account_blob).unwrap()) - } - - /// Obtain the AptosCoin amount under address `signer_addr` - fn fetch_account_balance(&self, signer_addr: &AccountAddress) -> Result { - let aptos_coin_tag = CoinStoreResource::struct_tag(); - - let coin_access_path = - AccessPath::resource_access_path(*signer_addr, aptos_coin_tag.clone()) - .expect("access path in test"); - - let balance_blob = self - .storage - .get_state_value_bytes(&StateKey::access_path(coin_access_path)) - .unwrap() - .ok_or_else(|| { - format_err!( - "Failed to fetch balance resource under address {}.", - signer_addr - ) - })?; - - let annotated = MoveValueAnnotator::new(&self.storage.as_move_resolver()) - .view_resource(&aptos_coin_tag, &balance_blob)?; - - // Filter the Coin resource and return the resouce value - for (key, val) in annotated.value { - if key != Identifier::new("coin").unwrap() { - continue; - } - - if let AnnotatedMoveValue::Struct(s) = val { - for (key, val) in s.value { - if key != Identifier::new("value").unwrap() { - continue; - } - - if let AnnotatedMoveValue::U64(v) = val { - return Ok(v); - } - } - } - } - - bail!("Failed to fetch balance under address {}.", signer_addr) - } - - /// Derive the default transaction parameters from the account and balance resources fetched - /// from storage. In the future, we are planning to allow the user to override these using - /// command arguments. - fn fetch_transaction_parameters( - &self, - signer_addr: &AccountAddress, - sequence_number: Option, - expiration_time: Option, - gas_unit_price: Option, - max_gas_amount: Option, - ) -> Result { - let account_resource = self.fetch_account_resource(signer_addr)?; - - let sequence_number = sequence_number.unwrap_or_else(|| account_resource.sequence_number()); - let max_number_of_gas_units = - TransactionGasParameters::initial().maximum_number_of_gas_units; - let gas_unit_price = gas_unit_price.unwrap_or(1000); - let max_gas_amount = match max_gas_amount { - Some(max_gas_amount) => max_gas_amount, - None => { - if gas_unit_price == 0 { - u64::from(max_number_of_gas_units) - } else { - let account_balance = self.fetch_account_balance(signer_addr).unwrap(); - std::cmp::min( - u64::from(max_number_of_gas_units), - account_balance / gas_unit_price, - ) - } - }, - }; - let expiration_timestamp_secs = expiration_time.unwrap_or(40000); - - Ok(TransactionParameters { - sequence_number, - gas_unit_price, - max_gas_amount, - expiration_timestamp_secs, - }) - } - - /// Perform a single transaction. - /// - /// Should error if the transaction ends up being discarded, or having a status other than - /// EXECUTED. - fn run_transaction(&mut self, txn: Transaction) -> Result { - let mut outputs = AptosVM::execute_block(vec![txn], &self.storage.clone(), None)?; - - assert_eq!(outputs.len(), 1); - - let output = outputs.pop().unwrap(); - match output.status() { - TransactionStatus::Keep(kept_vm_status) => { - self.storage.add_write_set(output.write_set()); - match kept_vm_status { - ExecutionStatus::Success => Ok(output), - _ => { - bail!( - "Failed to execute transaction. ExecutionStatus: {:?}", - kept_vm_status - ) - }, - } - }, - TransactionStatus::Discard(status_code) => { - bail!("Transaction discarded. VM status code: {:?}", status_code) - }, - TransactionStatus::Retry => panic!(), - } - } - - fn create_and_fund_account(&mut self, account_addr: AccountAddress, amount: u64) { - let parameters = self - .fetch_transaction_parameters(&aptos_test_root_address(), None, None, None, None) - .unwrap(); - - let txn = RawTransaction::new( - aptos_test_root_address(), - parameters.sequence_number, - aptos_cached_packages::aptos_stdlib::aptos_account_create_account(account_addr), - parameters.max_gas_amount, - parameters.gas_unit_price, - parameters.expiration_timestamp_secs, - ChainId::test(), - ) - .sign(&GENESIS_KEYPAIR.0, GENESIS_KEYPAIR.1.clone()) - .unwrap() - .into_inner(); - - self.run_transaction(Transaction::UserTransaction(txn)) - .expect("Failed to create an account. This should not happen."); - - let txn = RawTransaction::new( - aptos_test_root_address(), - parameters.sequence_number + 1, - aptos_cached_packages::aptos_stdlib::aptos_coin_mint(account_addr, amount), - parameters.max_gas_amount, - parameters.gas_unit_price, - parameters.expiration_timestamp_secs, - ChainId::test(), - ) - .sign(&GENESIS_KEYPAIR.0, GENESIS_KEYPAIR.1.clone()) - .unwrap() - .into_inner(); - - self.run_transaction(Transaction::UserTransaction(txn)) - .expect("Failed to mint aptos coin. This should not happen."); - } -} - -impl<'a> MoveTestAdapter<'a> for AptosTestAdapter<'a> { - type ExtraInitArgs = AptosInitArgs; - type ExtraPublishArgs = AptosPublishArgs; - type ExtraRunArgs = AptosRunArgs; - type ExtraValueArgs = (); - type Subcommand = AptosSubCommand; - - fn compiled_state(&mut self) -> &mut CompiledState<'a> { - &mut self.compiled_state - } - - fn default_syntax(&self) -> SyntaxChoice { - self.default_syntax - } - - fn known_attributes(&self) -> &BTreeSet { - aptos_framework::extended_checks::get_all_attribute_names() - } - - fn init( - default_syntax: SyntaxChoice, - _comparison_mode: bool, - _run_config: TestRunConfig, - pre_compiled_deps: Option<&'a FullyCompiledProgram>, - task_opt: Option>, - ) -> (Self, Option) { - // Named address mapping - let additional_named_address_mapping = match task_opt.as_ref().map(|t| &t.command) { - Some((InitCommand { named_addresses }, _)) => { - verify_and_create_named_address_mapping(named_addresses.clone()).unwrap() - }, - None => BTreeMap::new(), - }; - - let mut named_address_mapping = aptos_framework::named_addresses().clone(); - - for (name, addr) in additional_named_address_mapping.clone() { - if named_address_mapping.contains_key(&name) { - panic!("Invalid init. The named address '{}' already exists.", name) - } - named_address_mapping.insert(name, addr); - } - - // Genesis modules - let mut storage = FakeDataStore::new(HashMap::new()); - storage.add_write_set(GENESIS_CHANGE_SET_HEAD.write_set()); - - // Builtin private key mapping - let mut private_key_mapping = BTreeMap::new(); - for (name, private_key) in aptos_framework_private_key_mapping() { - private_key_mapping.insert(name, private_key); - } - - // Initial coins to mint, defaults to 5000 - let mut coins_to_mint = 5000; - - if let Some(TaskInput { - command: (_, init_args), - .. - }) = task_opt - { - // Private key mapping - if let Some(additional_private_key_mapping) = init_args.private_keys { - for (name, private_key) in additional_private_key_mapping { - if private_key_mapping.contains_key(name.as_str()) { - panic!( - "Invalid init. The named private key '{}' already exists.", - name - ) - } - private_key_mapping.insert(name.as_str().to_string(), private_key); - } - } - - if let Some(initial_coins) = init_args.initial_coins { - coins_to_mint = initial_coins; - } - } - - let mut adapter = Self { - compiled_state: CompiledState::new(named_address_mapping, pre_compiled_deps, None), - default_syntax, - storage, - private_key_mapping, - }; - - for (_, addr) in additional_named_address_mapping { - adapter.create_and_fund_account(addr.into_inner(), coins_to_mint); - } - - (adapter, None) - } - - fn publish_module( - &mut self, - module: CompiledModule, - mut named_addr_opt: Option, - gas_budget: Option, - extra_args: Self::ExtraPublishArgs, - ) -> Result<(Option, CompiledModule)> { - let module_id = module.self_id(); - - // TODO: hack to allow the signer to be overridden. - // See if we can implement it in a cleaner way. - let signer = match extra_args.override_signer { - Some(addr) => { - if let ParsedAddress::Named(named_addr) = &addr { - named_addr_opt = Some(Identifier::new(named_addr.clone()).unwrap()) - } - self.compiled_state().resolve_address(&addr) - }, - None => *module_id.address(), - }; - - let params = self.fetch_transaction_parameters( - &signer, - extra_args.sequence_number, - extra_args.expiration_time, - extra_args.gas_unit_price, - gas_budget, - )?; - - let mut module_blob = vec![]; - module.serialize(&mut module_blob).unwrap(); - - let private_key = match (extra_args.private_key, named_addr_opt) { - (Some(private_key), _) => self.resolve_private_key(&private_key), - (None, Some(named_addr)) => match self - .private_key_mapping - .get(&named_addr.as_str().to_string()) - { - Some(private_key) => private_key.clone(), - None => panic_missing_private_key_named("publish", named_addr.as_str()), - }, - (None, None) => panic_missing_private_key("publish"), - }; - - let txn = RawTransaction::new_module( - signer, - params.sequence_number, - TransactionModule::new(module_blob), - params.max_gas_amount, - params.gas_unit_price, - params.expiration_timestamp_secs, - ChainId::test(), - ) - .sign(&private_key, Ed25519PublicKey::from(&private_key))? - .into_inner(); - - self.run_transaction(Transaction::UserTransaction(txn))?; - - Ok((None, module)) - } - - fn execute_script( - &mut self, - script: CompiledScript, - type_args: Vec, - signers: Vec, - txn_args: Vec, - gas_budget: Option, - extra_args: Self::ExtraRunArgs, - ) -> Result<(Option, SerializedReturnValues)> { - let signer0 = self.compiled_state().resolve_address(&signers[0]); - - if gas_budget.is_some() { - panic!("Cannot set gas budget for script.") - } - if extra_args.gas_unit_price.is_some() { - panic!("Cannot set gas price for script.") - } - if extra_args.expiration_time.is_some() { - panic!("Cannot set expiration time for script.") - } - if extra_args.secondary_signers.is_some() { - panic!("Cannot set secondary signers for script.") - } - - let private_key = match (extra_args.private_key, &signers[0]) { - (Some(private_key), _) => self.resolve_private_key(&private_key), - (None, ParsedAddress::Named(named_addr)) => { - match self.private_key_mapping.get(named_addr) { - Some(private_key) => private_key.clone(), - None => panic_missing_private_key_named("run", named_addr.as_str()), - } - }, - (None, ParsedAddress::Numerical(_)) => panic_missing_private_key("run"), - }; - - let mut script_blob = vec![]; - script.serialize(&mut script_blob)?; - - let params = self.fetch_transaction_parameters( - &signer0, - extra_args.sequence_number, - None, - None, - None, - )?; - - let txn = RawTransaction::new_script( - signer0, - params.sequence_number, - TransactionScript::new( - script_blob, - type_args, - txn_args - .into_iter() - .map(|arg| TransactionArgument::try_from(arg).unwrap()) - .collect(), - ), - params.max_gas_amount, - params.gas_unit_price, - params.expiration_timestamp_secs, - ChainId::test(), - ) - .sign(&private_key, Ed25519PublicKey::from(&private_key)) - .unwrap() - .into_inner(); - - let output = self.run_transaction(Transaction::UserTransaction(txn))?; - - let output = if extra_args.show_events { - render_events(output.events()) - } else { - None - }; - - //TODO: replace this dummy value with actual txn return value - let a = SerializedReturnValues { - mutable_reference_outputs: vec![(0, vec![0], MoveTypeLayout::U8)], - return_values: vec![(vec![0], MoveTypeLayout::U8)], - }; - - Ok((output, a)) - } - - fn call_function( - &mut self, - module: &ModuleId, - function: &IdentStr, - type_args: Vec, - signers: Vec, - txn_args: Vec, - gas_budget: Option, - extra_args: Self::ExtraRunArgs, - ) -> Result<(Option, SerializedReturnValues)> { - if extra_args.script { - panic!("Entry functions are not supported.") - } - - if signers.len() != 1 { - panic!("Expected 1 signer, got {}.", signers.len()); - } - let signer = self.compiled_state().resolve_address(&signers[0]); - - let private_key = match (extra_args.private_key, &signers[0]) { - (Some(private_key), _) => self.resolve_private_key(&private_key), - (None, ParsedAddress::Named(named_addr)) => { - match self.private_key_mapping.get(named_addr) { - Some(private_key) => private_key.clone(), - None => panic_missing_private_key_named("run", named_addr.as_str()), - } - }, - (None, ParsedAddress::Numerical(_)) => panic_missing_private_key("run"), - }; - - let params = self.fetch_transaction_parameters( - &signer, - extra_args.sequence_number, - extra_args.expiration_time, - extra_args.gas_unit_price, - gas_budget, - )?; - let txn = RawTransaction::new_entry_function( - signer, - params.sequence_number, - TransactionEntryFunction::new( - module.clone(), - function.to_owned(), - type_args, - convert_txn_args( - &txn_args - .into_iter() - .map(|arg| TransactionArgument::try_from(arg).unwrap()) - .collect::>(), - ), - ), - params.max_gas_amount, - params.gas_unit_price, - params.expiration_timestamp_secs, - ChainId::test(), - ); - - let txn = match &extra_args.secondary_signers { - Some(secondary_signers) => { - let (secondary_signers, secondary_private_keys) = - self.resolve_secondary_signers(secondary_signers); - - txn.sign_multi_agent( - &private_key, - secondary_signers, - secondary_private_keys.iter().collect(), - )? - .into_inner() - }, - None => txn - .sign(&private_key, Ed25519PublicKey::from(&private_key))? - .into_inner(), - }; - - let output = self.run_transaction(Transaction::UserTransaction(txn))?; - - let output = if extra_args.show_events { - render_events(output.events()) - } else { - None - }; - - //TODO: replace this dummy value with actual txn return value - let a = SerializedReturnValues { - mutable_reference_outputs: vec![(0, vec![0], MoveTypeLayout::U8)], - return_values: vec![(vec![0], MoveTypeLayout::U8)], - }; - Ok((output, a)) - } - - fn view_data( - &mut self, - address: AccountAddress, - module: &ModuleId, - resource: &IdentStr, - type_args: Vec, - ) -> Result { - view_resource_in_move_storage( - &self.storage.as_move_resolver(), - address, - module, - resource, - type_args, - ) - } - - fn handle_subcommand(&mut self, input: TaskInput) -> Result> { - match input.command { - AptosSubCommand::BlockCommand(block_cmd) => { - let proposer = self.compiled_state().resolve_address(&block_cmd.proposer); - let metadata = BlockMetadata::new( - HashValue::zero(), - 0, - block_cmd.time, - proposer, - vec![], - vec![], - block_cmd.time, - ); - - let output = self.run_transaction(Transaction::BlockMetadata(metadata))?; - - Ok(render_events(output.events())) - }, - AptosSubCommand::ViewTableCommand(view_table_cmd) => { - let resolver = self.storage.as_move_resolver(); - let converter = resolver.as_converter(Arc::new(FakeDbReader {})); - - let vm_key = converter - .try_into_vm_value(&view_table_cmd.key_type, view_table_cmd.key_value) - .unwrap(); - let raw_key = vm_key.undecorate().simple_serialize().unwrap(); - - let state_key = - StateKey::table_item(TableHandle(view_table_cmd.table_handle), raw_key); - - let bytes = self - .storage - .get_state_value_bytes(&state_key) - .unwrap() - .ok_or_else(|| format_err!("Failed to fetch table item.",))?; - - let move_value = - converter.try_into_move_value(&view_table_cmd.value_type, &bytes)?; - - Ok(Some(serde_json::to_string(&move_value).unwrap())) - }, - } - } -} - -/** - * Misc - */ - -struct PrettyEvent<'a>(&'a ContractEvent); - -impl<'a> fmt::Display for PrettyEvent<'a> { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - writeln!(f, "{{")?; - match self.0 { - ContractEvent::V1(v1) => { - writeln!(f, " key: {}", v1.key())?; - writeln!(f, " seq_num: {}", v1.sequence_number())?; - }, - ContractEvent::V2(_v2) => (), - } - writeln!(f, " type: {}", self.0.type_tag())?; - writeln!(f, " data: {:?}", hex::encode(self.0.event_data()))?; - write!(f, "}}") - } -} - -struct PrettyEvents<'a>(&'a [ContractEvent]); - -impl<'a> fmt::Display for PrettyEvents<'a> { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - write!(f, "Events:")?; - for event in self.0.iter() { - writeln!(f)?; - write!(f, "{}", PrettyEvent(event))?; - } - Ok(()) - } -} - -fn render_events(events: &[ContractEvent]) -> Option { - if events.is_empty() { - None - } else { - Some(format!("{}", PrettyEvents(events))) - } -} - -pub fn run_aptos_test(path: &Path) -> Result<(), Box> { - // TODO: remove once bundles removed - aptos_vm::aptos_vm::allow_module_bundle_for_test(); - run_test_impl::( - TestRunConfig::CompilerV1, - path, - Some(&*PRECOMPILED_APTOS_FRAMEWORK), - ) -} diff --git a/aptos-move/aptos-transactional-test-harness/src/lib.rs b/aptos-move/aptos-transactional-test-harness/src/lib.rs deleted file mode 100644 index 5dcd2836050a6..0000000000000 --- a/aptos-move/aptos-transactional-test-harness/src/lib.rs +++ /dev/null @@ -1,7 +0,0 @@ -// Copyright © Aptos Foundation -// Parts of the project are originally copyright © Meta Platforms, Inc. -// SPDX-License-Identifier: Apache-2.0 - -mod aptos_test_harness; - -pub use aptos_test_harness::run_aptos_test; diff --git a/aptos-move/aptos-transactional-test-harness/tests/aptos_test_harness/call_function.exp b/aptos-move/aptos-transactional-test-harness/tests/aptos_test_harness/call_function.exp deleted file mode 100644 index 48a8463a5ab6e..0000000000000 --- a/aptos-move/aptos-transactional-test-harness/tests/aptos_test_harness/call_function.exp +++ /dev/null @@ -1,10 +0,0 @@ -processed 4 tasks - -task 1 'publish'. lines 4-30: -Error: Failed to execute transaction. ExecutionStatus: OutOfGas - -task 2 'run'. lines 33-33: -Error: Transaction discarded. VM status code: MAX_GAS_UNITS_BELOW_MIN_TRANSACTION_GAS_UNITS - -task 3 'view'. lines 35-35: -[No Resource Exists] diff --git a/aptos-move/aptos-transactional-test-harness/tests/aptos_test_harness/call_function.move b/aptos-move/aptos-transactional-test-harness/tests/aptos_test_harness/call_function.move deleted file mode 100644 index 06571b5f5fc98..0000000000000 --- a/aptos-move/aptos-transactional-test-harness/tests/aptos_test_harness/call_function.move +++ /dev/null @@ -1,35 +0,0 @@ -//# init --addresses Alice=0xf75daa73fc071f93593335eb9033da804777eb94491650dd3f095ce6f778acb6 -//# --private-keys Alice=56a26140eb233750cd14fb168c3eb4bd0782b099cde626ec8aff7f3cceb6364f - -//# publish -module Alice::hello_world { - use aptos_framework::signer; - use aptos_framework::coin; - use aptos_framework::aptos_coin::AptosCoin; - use std::string::{Self, String}; - - struct ModuleData has key, store { - global_counter: u64, - state: String, - } - - fun init_module(sender: &signer) { - move_to( - sender, - ModuleData { global_counter: 0, state: string::utf8(b"init") } - ); - } - - public fun foo(addr: address): u64 { - coin::balance(addr) - } - - public entry fun hi(sender: &signer, msg: String) acquires ModuleData { - borrow_global_mut(signer::address_of(sender)).state = msg; - } -} - - -//# run --signers Alice --args x"68656C6C6F20776F726C64" --show-events -- Alice::hello_world::hi - -//# view --address Alice --resource Alice::hello_world::ModuleData diff --git a/aptos-move/aptos-transactional-test-harness/tests/aptos_test_harness/execute_script.exp b/aptos-move/aptos-transactional-test-harness/tests/aptos_test_harness/execute_script.exp deleted file mode 100644 index 6dbfbb8a4eaa0..0000000000000 --- a/aptos-move/aptos-transactional-test-harness/tests/aptos_test_harness/execute_script.exp +++ /dev/null @@ -1,31 +0,0 @@ -processed 3 tasks - -task 1 'run'. lines 5-13: -mutable inputs after call: local#0: 0 -return values: 0 - -task 2 'view'. lines 15-15: -key 0x1::coin::CoinStore<0x1::aptos_coin::AptosCoin> { - coin: store 0x1::coin::Coin<0x1::aptos_coin::AptosCoin> { - value: 10100 - } - frozen: false - deposit_events: store 0x1::event::EventHandle<0x1::coin::DepositEvent> { - counter: 2 - guid: drop store 0x1::guid::GUID { - id: copy drop store 0x1::guid::ID { - creation_num: 2 - addr: 9c3b634ac05d0af393e0f93b9b19b61e7cac1c519f566276aa0c6fd15dac12aa - } - } - } - withdraw_events: store 0x1::event::EventHandle<0x1::coin::WithdrawEvent> { - counter: 0 - guid: drop store 0x1::guid::GUID { - id: copy drop store 0x1::guid::ID { - creation_num: 3 - addr: 9c3b634ac05d0af393e0f93b9b19b61e7cac1c519f566276aa0c6fd15dac12aa - } - } - } -} diff --git a/aptos-move/aptos-transactional-test-harness/tests/aptos_test_harness/execute_script.move b/aptos-move/aptos-transactional-test-harness/tests/aptos_test_harness/execute_script.move deleted file mode 100644 index 90a599d6fbf21..0000000000000 --- a/aptos-move/aptos-transactional-test-harness/tests/aptos_test_harness/execute_script.move +++ /dev/null @@ -1,15 +0,0 @@ -//# init --addresses Alice=0xf75daa73fc071f93593335eb9033da804777eb94491650dd3f095ce6f778acb6 Bob=0x9c3b634ac05d0af393e0f93b9b19b61e7cac1c519f566276aa0c6fd15dac12aa -//# --private-keys Alice=56a26140eb233750cd14fb168c3eb4bd0782b099cde626ec8aff7f3cceb6364f Bob=952aaf3a98a27903dd078d76fc9e411740d2ae9dd9ecb87b96c7cd6b791ffc69 -//# --initial-coins 10000 - -//# run --script --signers Alice --args @Bob 100 -script { - use aptos_framework::coin; - use aptos_framework::aptos_coin::AptosCoin; - - fun main(sender: &signer, receiver: address, amount: u64) { - coin::transfer(sender, receiver, amount); - } -} - -//# view --address Bob --resource 0x1::coin::CoinStore<0x1::aptos_coin::AptosCoin> diff --git a/aptos-move/aptos-transactional-test-harness/tests/aptos_test_harness/publish_module.exp b/aptos-move/aptos-transactional-test-harness/tests/aptos_test_harness/publish_module.exp deleted file mode 100644 index d309b8933e42b..0000000000000 --- a/aptos-move/aptos-transactional-test-harness/tests/aptos_test_harness/publish_module.exp +++ /dev/null @@ -1,4 +0,0 @@ -processed 2 tasks - -task 1 'publish'. lines 4-9: -Error: Failed to execute transaction. ExecutionStatus: OutOfGas diff --git a/aptos-move/aptos-transactional-test-harness/tests/aptos_test_harness/publish_module.move b/aptos-move/aptos-transactional-test-harness/tests/aptos_test_harness/publish_module.move deleted file mode 100644 index 76b11dcbc391f..0000000000000 --- a/aptos-move/aptos-transactional-test-harness/tests/aptos_test_harness/publish_module.move +++ /dev/null @@ -1,9 +0,0 @@ -//# init --addresses Alice=0xf75daa73fc071f93593335eb9033da804777eb94491650dd3f095ce6f778acb6 -//# --private-keys Alice=56a26140eb233750cd14fb168c3eb4bd0782b099cde626ec8aff7f3cceb6364f - -//# publish -module Alice::M { - public entry fun foo() { - return - } -} diff --git a/aptos-move/aptos-transactional-test-harness/tests/aptos_test_harness/table.exp b/aptos-move/aptos-transactional-test-harness/tests/aptos_test_harness/table.exp deleted file mode 100644 index ec39bd91962ee..0000000000000 --- a/aptos-move/aptos-transactional-test-harness/tests/aptos_test_harness/table.exp +++ /dev/null @@ -1,124 +0,0 @@ -processed 8 tasks - -task 1 'run'. lines 6-8: -Events: -{ - key: 0400000000000000f75daa73fc071f93593335eb9033da804777eb94491650dd3f095ce6f778acb6 - seq_num: 0 - type: 0x3::token::CreateCollectionEvent - data: "f75daa73fc071f93593335eb9033da804777eb94491650dd3f095ce6f778acb60b6170746f735f70756e6b7300006400000000000000" -} -{ - type: 0x1::transaction_fee::FeeStatement - data: "6c0000000000000007000000000000000200000000000000a0860100000000000000000000000000" -}mutable inputs after call: local#0: 0 -return values: 0 - -task 2 'run'. lines 9-9: -Events: -{ - key: 0500000000000000f75daa73fc071f93593335eb9033da804777eb94491650dd3f095ce6f778acb6 - seq_num: 0 - type: 0x3::token::CreateTokenDataEvent - data: "f75daa73fc071f93593335eb9033da804777eb94491650dd3f095ce6f778acb60b6170746f735f70756e6b730a6765656b5f746f6b656e00640000000000000000f75daa73fc071f93593335eb9033da804777eb94491650dd3f095ce6f778acb6010000000000000000000000000000000a6765656b5f746f6b656e0000000000000000" -} -{ - key: 0600000000000000f75daa73fc071f93593335eb9033da804777eb94491650dd3f095ce6f778acb6 - seq_num: 0 - type: 0x3::token::MintTokenEvent - data: "f75daa73fc071f93593335eb9033da804777eb94491650dd3f095ce6f778acb60b6170746f735f70756e6b730a6765656b5f746f6b656e0a00000000000000" -} -{ - key: 0700000000000000f75daa73fc071f93593335eb9033da804777eb94491650dd3f095ce6f778acb6 - seq_num: 0 - type: 0x3::token::DepositEvent - data: "f75daa73fc071f93593335eb9033da804777eb94491650dd3f095ce6f778acb60b6170746f735f70756e6b730a6765656b5f746f6b656e00000000000000000a00000000000000" -} -{ - type: 0x1::transaction_fee::FeeStatement - data: "ab0000000000000012000000000000000300000000000000f0490200000000000000000000000000" -}mutable inputs after call: local#0: 0 -return values: 0 - -task 3 'view'. lines 11-11: -key 0x3::token::Collections { - collection_data: store 0x1::table::Table<0x1::string::String, 0x3::token::CollectionData> { - handle: 5adf7712b6186bb577ebd9e2067ef3d46014f5bb5fa1c457f51a334295affe9a - } - token_data: store 0x1::table::Table<0x3::token::TokenDataId, 0x3::token::TokenData> { - handle: 630c3a2b1a1f2a5a9d112eb98bfe1b35265a16f1482e6224950abd8ad4c76d52 - } - create_collection_events: store 0x1::event::EventHandle<0x3::token::CreateCollectionEvent> { - counter: 1 - guid: drop store 0x1::guid::GUID { - id: copy drop store 0x1::guid::ID { - creation_num: 4 - addr: f75daa73fc071f93593335eb9033da804777eb94491650dd3f095ce6f778acb6 - } - } - } - create_token_data_events: store 0x1::event::EventHandle<0x3::token::CreateTokenDataEvent> { - counter: 1 - guid: drop store 0x1::guid::GUID { - id: copy drop store 0x1::guid::ID { - creation_num: 5 - addr: f75daa73fc071f93593335eb9033da804777eb94491650dd3f095ce6f778acb6 - } - } - } - mint_token_events: store 0x1::event::EventHandle<0x3::token::MintTokenEvent> { - counter: 1 - guid: drop store 0x1::guid::GUID { - id: copy drop store 0x1::guid::ID { - creation_num: 6 - addr: f75daa73fc071f93593335eb9033da804777eb94491650dd3f095ce6f778acb6 - } - } - } -} - -task 4 'view_table'. lines 13-15: -{"description":"","maximum":"100","mutability_config":{"description":false,"maximum":false,"uri":false},"name":"aptos_punks","supply":"1","uri":""} - -task 5 'run'. lines 16-18: -Events: -{ - key: 0800000000000000f75daa73fc071f93593335eb9033da804777eb94491650dd3f095ce6f778acb6 - seq_num: 0 - type: 0x3::token::WithdrawEvent - data: "f75daa73fc071f93593335eb9033da804777eb94491650dd3f095ce6f778acb60b6170746f735f70756e6b730a6765656b5f746f6b656e00000000000000000100000000000000" -} -{ - key: 0b00000000000000f75daa73fc071f93593335eb9033da804777eb94491650dd3f095ce6f778acb6 - seq_num: 0 - type: 0x3::token_transfers::TokenOfferEvent - data: "9c3b634ac05d0af393e0f93b9b19b61e7cac1c519f566276aa0c6fd15dac12aaf75daa73fc071f93593335eb9033da804777eb94491650dd3f095ce6f778acb60b6170746f735f70756e6b730a6765656b5f746f6b656e00000000000000000100000000000000" -} -{ - type: 0x1::transaction_fee::FeeStatement - data: "71000000000000000b000000000000000300000000000000a0860100000000000000000000000000" -}mutable inputs after call: local#0: 0 -return values: 0 - -task 6 'run'. lines 19-19: -Events: -{ - key: 04000000000000009c3b634ac05d0af393e0f93b9b19b61e7cac1c519f566276aa0c6fd15dac12aa - seq_num: 0 - type: 0x3::token::DepositEvent - data: "f75daa73fc071f93593335eb9033da804777eb94491650dd3f095ce6f778acb60b6170746f735f70756e6b730a6765656b5f746f6b656e00000000000000000100000000000000" -} -{ - key: 0d00000000000000f75daa73fc071f93593335eb9033da804777eb94491650dd3f095ce6f778acb6 - seq_num: 0 - type: 0x3::token_transfers::TokenClaimEvent - data: "9c3b634ac05d0af393e0f93b9b19b61e7cac1c519f566276aa0c6fd15dac12aaf75daa73fc071f93593335eb9033da804777eb94491650dd3f095ce6f778acb60b6170746f735f70756e6b730a6765656b5f746f6b656e00000000000000000100000000000000" -} -{ - type: 0x1::transaction_fee::FeeStatement - data: "71000000000000000b000000000000000300000000000000a08601000000000050c3000000000000" -}mutable inputs after call: local#0: 0 -return values: 0 - -task 7 'view_table'. lines 21-21: -{"default_properties":{"map":{"data":[]}},"description":"","largest_property_version":"0","maximum":"100","mutability_config":{"description":false,"maximum":false,"properties":false,"royalty":false,"uri":false},"name":"geek_token","royalty":{"payee_address":"0xf75daa73fc071f93593335eb9033da804777eb94491650dd3f095ce6f778acb6","royalty_points_denominator":"1","royalty_points_numerator":"0"},"supply":"10","uri":""} diff --git a/aptos-move/aptos-transactional-test-harness/tests/aptos_test_harness/table.move b/aptos-move/aptos-transactional-test-harness/tests/aptos_test_harness/table.move deleted file mode 100644 index ad80a7268a9f9..0000000000000 --- a/aptos-move/aptos-transactional-test-harness/tests/aptos_test_harness/table.move +++ /dev/null @@ -1,21 +0,0 @@ -//# init --addresses Alice=0xf75daa73fc071f93593335eb9033da804777eb94491650dd3f095ce6f778acb6 Bob=0x9c3b634ac05d0af393e0f93b9b19b61e7cac1c519f566276aa0c6fd15dac12aa -//# --private-keys Alice=56a26140eb233750cd14fb168c3eb4bd0782b099cde626ec8aff7f3cceb6364f Bob=952aaf3a98a27903dd078d76fc9e411740d2ae9dd9ecb87b96c7cd6b791ffc69 -//# --initial-coins 100000000 - - -//# run --signers Alice --args x"6170746f735f70756e6b73" x"" x"" 100 x"000000" --show-events -- 0x3::token::create_collection_script - -// Mint "geek_token" for collection "aptos_punks" -//# run --signers Alice --args x"6170746f735f70756e6b73" x"6765656b5f746f6b656e" x"" 10 100 x"" @Alice 1 0 x"0000000000" x"" x"" x"" --show-events -- 0x3::token::create_token_script - -//# view --address Alice --resource 0x3::token::Collections - -//# view_table --table_handle 0x5adf7712b6186bb577ebd9e2067ef3d46014f5bb5fa1c457f51a334295affe9a --key_type 0x1::string::String --value_type 0x3::token::CollectionData --key_value "aptos_punks" - -// Alice offers tokens to Bob -//# run --signers Alice --args @Bob @Alice x"6170746f735f70756e6b73" x"6765656b5f746f6b656e" 0 1 --show-events -- 0x3::token_transfers::offer_script - -// Bob accepts tokens from Alice -//# run --signers Bob --args @Alice @Alice x"6170746f735f70756e6b73" x"6765656b5f746f6b656e" 0 --show-events -- 0x3::token_transfers::claim_script - -//# view_table --table_handle 0x630c3a2b1a1f2a5a9d112eb98bfe1b35265a16f1482e6224950abd8ad4c76d52 --key_type 0x3::token::TokenDataId --value_type 0x3::token::TokenData --key_value {"creator":"0xf75daa73fc071f93593335eb9033da804777eb94491650dd3f095ce6f778acb6","collection":"aptos_punks","name":"geek_token"} diff --git a/aptos-move/aptos-transactional-test-harness/tests/aptos_test_harness/write_set_too_large.exp b/aptos-move/aptos-transactional-test-harness/tests/aptos_test_harness/write_set_too_large.exp deleted file mode 100644 index 82b73cc2a8810..0000000000000 --- a/aptos-move/aptos-transactional-test-harness/tests/aptos_test_harness/write_set_too_large.exp +++ /dev/null @@ -1,4 +0,0 @@ -processed 3 tasks - -task 2 'run'. lines 29-29: -Error: Failed to execute transaction. ExecutionStatus: ExecutionFailure { location: Script, function: 0, code_offset: 0 } diff --git a/aptos-move/aptos-transactional-test-harness/tests/aptos_test_harness/write_set_too_large.move b/aptos-move/aptos-transactional-test-harness/tests/aptos_test_harness/write_set_too_large.move deleted file mode 100644 index 3925cfcce515e..0000000000000 --- a/aptos-move/aptos-transactional-test-harness/tests/aptos_test_harness/write_set_too_large.move +++ /dev/null @@ -1,29 +0,0 @@ -//# init --addresses Alice=0xf75daa73fc071f93593335eb9033da804777eb94491650dd3f095ce6f778acb6 -//# --private-keys Alice=56a26140eb233750cd14fb168c3eb4bd0782b099cde626ec8aff7f3cceb6364f -//# --initial-coins 1000000000000000 - -//# publish -module Alice::Module { - use std::vector; - use std::string::{Self, String}; - - struct ModuleData has key, store { - data: vector, - } - - public entry fun write_large_data(account: &signer) { - let data: vector = vector::empty(); - let str = string::utf8(b"0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef"); - let cnt: u64 = 1024 * 8; - while (cnt > 0) { - vector::push_back(&mut data, str); - cnt = cnt - 1; - }; - - move_to(account, ModuleData { - data, - }); - } -} - -//# run --signers Alice --show-events --gas-budget 2000000 -- Alice::Module::write_large_data diff --git a/aptos-move/aptos-transactional-test-harness/tests/tests.rs b/aptos-move/aptos-transactional-test-harness/tests/tests.rs deleted file mode 100644 index 1eceef7e44f46..0000000000000 --- a/aptos-move/aptos-transactional-test-harness/tests/tests.rs +++ /dev/null @@ -1,7 +0,0 @@ -// Copyright © Aptos Foundation -// Parts of the project are originally copyright © Meta Platforms, Inc. -// SPDX-License-Identifier: Apache-2.0 - -use aptos_transactional_test_harness::run_aptos_test; - -datatest_stable::harness!(run_aptos_test, "tests", r".*\.(mvir|move)$"); diff --git a/aptos-move/aptos-validator-interface/src/lib.rs b/aptos-move/aptos-validator-interface/src/lib.rs index aa2f9ecd65825..0eff345d11ed1 100644 --- a/aptos-move/aptos-validator-interface/src/lib.rs +++ b/aptos-move/aptos-validator-interface/src/lib.rs @@ -151,7 +151,7 @@ async fn handler_thread<'a>( .get_state_value_by_version(&key, version - 1) .await .ok() - .and_then(|v| v.map(|s| s.into_bytes())); + .and_then(|v| v.map(|s| s.bytes().to_vec())); cache.lock().unwrap().put((key, version), val.clone()); sender.send(val) }); @@ -181,7 +181,7 @@ impl DebuggerStateView { .send((state_key.clone(), version, tx)) .unwrap(); let bytes_opt = rx.recv()?; - Ok(bytes_opt.map(StateValue::new_legacy)) + Ok(bytes_opt.map(|bytes| StateValue::new_legacy(bytes.into()))) } } diff --git a/aptos-move/aptos-vm-types/Cargo.toml b/aptos-move/aptos-vm-types/Cargo.toml index 98cf209153900..3923d6bde601d 100644 --- a/aptos-move/aptos-vm-types/Cargo.toml +++ b/aptos-move/aptos-vm-types/Cargo.toml @@ -20,10 +20,12 @@ aptos-gas-schedule = { workspace = true } aptos-state-view = { workspace = true } aptos-types = { workspace = true } bcs = { workspace = true } +bytes = { workspace = true } either = { workspace = true } move-binary-format = { workspace = true } move-core-types = { workspace = true } [dev-dependencies] aptos-language-e2e-tests = { workspace = true } +aptos-vm = { workspace = true } claims = { workspace = true } diff --git a/aptos-move/aptos-vm-types/src/change_set.rs b/aptos-move/aptos-vm-types/src/change_set.rs index 349260b8d20f5..2194436517c09 100644 --- a/aptos-move/aptos-vm-types/src/change_set.rs +++ b/aptos-move/aptos-vm-types/src/change_set.rs @@ -2,8 +2,10 @@ // SPDX-License-Identifier: Apache-2.0 use crate::check_change_set::CheckChangeSet; -use aptos_aggregator::delta_change_set::{serialize, DeltaOp}; -use aptos_state_view::StateView; +use aptos_aggregator::{ + delta_change_set::{serialize, DeltaOp}, + resolver::{AggregatorReadMode, AggregatorResolver}, +}; use aptos_types::{ contract_event::ContractEvent, state_store::state_key::{StateKey, StateKeyInner}, @@ -153,14 +155,14 @@ impl VMChangeSet { } pub fn write_set_iter(&self) -> impl Iterator { - self.resource_write_set + self.resource_write_set() .iter() - .chain(self.module_write_set.iter()) - .chain(self.aggregator_write_set.iter()) + .chain(self.module_write_set().iter()) + .chain(self.aggregator_v1_write_set().iter()) } pub fn num_write_ops(&self) -> usize { - self.resource_write_set.len() + self.resource_write_set().len() + self.module_write_set().len() + self.aggregator_v1_write_set().len() } @@ -203,7 +205,10 @@ impl VMChangeSet { /// Materializes this change set: all deltas are converted into writes and /// are combined with existing aggregator writes. - pub fn try_materialize(self, state_view: &impl StateView) -> anyhow::Result { + pub fn try_materialize( + self, + resolver: &impl AggregatorResolver, + ) -> anyhow::Result { let Self { resource_write_set, module_write_set, @@ -214,7 +219,10 @@ impl VMChangeSet { let into_write = |(state_key, delta): (StateKey, DeltaOp)| -> anyhow::Result<(StateKey, WriteOp), VMStatus> { - let write = delta.try_into_write_op(state_view, &state_key)?; + // Materialization is needed when committing a transaction, so + // we need precise mode to compute the true value of an + // aggregator. + let write = resolver.try_convert_aggregator_v1_delta_into_write_op(&state_key, &delta, AggregatorReadMode::Precise)?; Ok((state_key, write)) }; @@ -243,8 +251,8 @@ impl VMChangeSet { use WriteOp::*; // First, squash deltas. - for (key, additional_delta_op) in additional_aggregator_delta_set { - if let Some(write_op) = aggregator_write_set.get_mut(&key) { + for (state_key, additional_delta_op) in additional_aggregator_delta_set { + if let Some(write_op) = aggregator_write_set.get_mut(&state_key) { // In this case, delta follows a write op. match write_op { Creation(data) @@ -259,7 +267,7 @@ impl VMChangeSet { let value = additional_delta_op .apply_to(base) .map_err(|e| e.finish(Location::Undefined).into_vm_status())?; - *data = serialize(&value); + *data = serialize(&value).into(); }, Deletion | DeletionWithMetadata { .. } => { // This case (applying a delta to deleted item) should @@ -274,7 +282,7 @@ impl VMChangeSet { } else { // Otherwise, this is a either a new delta or an additional delta // for the same state key. - match aggregator_delta_set.entry(key) { + match aggregator_delta_set.entry(state_key) { Occupied(entry) => { // In this case, we need to merge the new incoming delta // to the existing delta, ensuring the strict ordering. @@ -293,8 +301,8 @@ impl VMChangeSet { } // Next, squash write ops. - for (key, additional_write_op) in additional_aggregator_write_set { - match aggregator_write_set.entry(key) { + for (state_key, additional_write_op) in additional_aggregator_write_set { + match aggregator_write_set.entry(state_key) { Occupied(mut entry) => { squash_writes_pair!(entry, additional_write_op); }, @@ -365,4 +373,10 @@ impl VMChangeSet { checker.check_change_set(self) } + + pub fn has_creation(&self) -> bool { + use WriteOp::*; + self.write_set_iter() + .any(|(_key, op)| matches!(op, Creation(..) | CreationWithMetadata { .. })) + } } diff --git a/aptos-move/aptos-vm-types/src/output.rs b/aptos-move/aptos-vm-types/src/output.rs index 94b4ca07ff3e4..bb9a5e584bd1a 100644 --- a/aptos-move/aptos-vm-types/src/output.rs +++ b/aptos-move/aptos-vm-types/src/output.rs @@ -2,7 +2,7 @@ // SPDX-License-Identifier: Apache-2.0 use crate::change_set::VMChangeSet; -use aptos_state_view::StateView; +use aptos_aggregator::resolver::AggregatorResolver; use aptos_types::{ fee_statement::FeeStatement, state_store::state_key::StateKey, @@ -70,7 +70,10 @@ impl VMOutput { /// Materializes delta sets. /// Guarantees that if deltas are materialized successfully, the output /// has an empty delta set. - pub fn try_materialize(self, state_view: &impl StateView) -> anyhow::Result { + pub fn try_materialize( + self, + resolver: &impl AggregatorResolver, + ) -> anyhow::Result { // First, check if output of transaction should be discarded or delta // change set is empty. In both cases, we do not need to apply any // deltas and can return immediately. @@ -79,7 +82,7 @@ impl VMOutput { } let (change_set, fee_statement, status) = self.unpack_with_fee_statement(); - let materialized_change_set = change_set.try_materialize(state_view)?; + let materialized_change_set = change_set.try_materialize(resolver)?; Ok(VMOutput::new( materialized_change_set, fee_statement, @@ -90,9 +93,9 @@ impl VMOutput { /// Same as `try_materialize` but also constructs `TransactionOutput`. pub fn try_into_transaction_output( self, - state_view: &impl StateView, + resolver: &impl AggregatorResolver, ) -> anyhow::Result { - let materialized_output = self.try_materialize(state_view)?; + let materialized_output = self.try_materialize(resolver)?; debug_assert!( materialized_output .change_set() @@ -111,10 +114,10 @@ impl VMOutput { mut self, materialized_deltas: Vec<(StateKey, WriteOp)>, ) -> TransactionOutput { - // We should have a materialized delta for every delta in the output. assert_eq!( materialized_deltas.len(), - self.change_set().aggregator_v1_delta_set().len() + self.change_set().aggregator_v1_delta_set().len(), + "Different number of materialized deltas and deltas in the output." ); debug_assert!( materialized_deltas diff --git a/aptos-move/aptos-vm-types/src/resolver.rs b/aptos-move/aptos-vm-types/src/resolver.rs index 61f150f48992c..6dbaee6152eff 100644 --- a/aptos-move/aptos-vm-types/src/resolver.rs +++ b/aptos-move/aptos-vm-types/src/resolver.rs @@ -1,32 +1,98 @@ // Copyright © Aptos Foundation // SPDX-License-Identifier: Apache-2.0 -use aptos_types::state_store::{state_key::StateKey, state_value::StateValueMetadata}; -use move_core_types::language_storage::StructTag; +use aptos_aggregator::{aggregator_extension::AggregatorID, resolver::TAggregatorView}; +use aptos_state_view::StateViewId; +use aptos_types::state_store::{ + state_key::StateKey, + state_storage_usage::StateStorageUsage, + state_value::{StateValue, StateValueMetadataKind}, +}; +use bytes::Bytes; +use move_core_types::value::MoveTypeLayout; -/// Allows to query storage metadata in the VM session. Needed for storage refunds. -pub trait StateValueMetadataResolver { - /// Returns metadata for a given state value: - /// - None if state value does not exist, - /// - Some(None) if state value has no metadata, - /// - Some(Some(..)) otherwise. - // TODO: Nested options are ugly, refactor. - fn get_state_value_metadata( +/// Allows to query resources from the state. +pub trait TResourceView { + type Key; + type Layout; + + /// Returns + /// - Ok(None) if the resource is not in storage, + /// - Ok(Some(...)) if the resource exists in storage, + /// - Err(...) otherwise (e.g. storage error). + fn get_resource_state_value( &self, - state_key: &StateKey, - ) -> anyhow::Result>>; + key: &Self::Key, + maybe_layout: Option<&Self::Layout>, + ) -> anyhow::Result>; + + fn get_resource_bytes( + &self, + key: &Self::Key, + maybe_layout: Option<&Self::Layout>, + ) -> anyhow::Result> { + let maybe_state_value = self.get_resource_state_value(key, maybe_layout)?; + Ok(maybe_state_value.map(|state_value| state_value.bytes().clone())) + } + + fn get_resource_state_value_metadata( + &self, + key: &Self::Key, + ) -> anyhow::Result> { + // For metadata, layouts are not important. + let maybe_state_value = self.get_resource_state_value(key, None)?; + Ok(maybe_state_value.map(StateValue::into_metadata)) + } + + fn resource_exists(&self, key: &Self::Key) -> anyhow::Result { + // For existence, layouts are not important. + self.get_resource_state_value(key, None) + .map(|maybe_state_value| maybe_state_value.is_some()) + } } -pub trait TResourceGroupResolver { +pub trait TResourceGroupView { type Key; type Tag; fn get_resource_from_group( &self, - key: &Self::Key, - resource_tag: &Self::Tag, - return_group_size: bool, - ) -> anyhow::Result<(Option>, Option)>; + _key: &Self::Key, + _resource_tag: &Self::Tag, + ) -> anyhow::Result> { + unimplemented!("TResourceGroupView not yet implemented"); + } + + /// Implements the functionality requested by get_resource_group_state_value_metadata + /// from StateValueMetadataResolver, which on top of StateValueMetadataKind, requires + /// a speculative size of the resource group before the transaction. + fn get_resource_group_state_value_metadata( + &self, + _state_key: &Self::Key, + ) -> anyhow::Result> { + unimplemented!("TResourceGroupView not yet implemented"); + } + + fn resource_group_exists(&self, _key: &Self::Key) -> anyhow::Result { + unimplemented!("TResourceGroupView not yet implemented"); + } + + /// The size of the resource group, based on the sizes of the latest entries at observed + /// tags. During parallel execution, this is an estimated value that will get validated, + /// but as long as it is assumed correct, the transaction can deterministically derive + /// its behavior, e.g. charge the first access or write-related gas accordingly. The + /// implementation ensures that resource_group_size, resource_exists, and .._metadata + /// methods return somewhat consistent values (e.g. size != 0 if exists is true), and + /// otherwise return an error as the validation is guaranteed to fail. + /// + /// The collected size is only guaranteed to correspond to the correct size when executed + /// from a quiescent, correct state. The result can be viewed as a branch prediction in + /// the parallel execution setting, as a wrong value will be (later) caught by validation. + /// Thus, R/W conflicts are avoided, as long as the estimates are correct (e.g. updating + /// struct members of a fixed size). + fn resource_group_size(&self, _key: &Self::Key) -> anyhow::Result { + unimplemented!("TResourceGroupView not yet implemented"); + } /// Needed for backwards compatibility with the additional safety mechanism for resource /// groups, where the violation of the following invariant causes transaction failure: @@ -35,23 +101,110 @@ pub trait TResourceGroupResolver { /// /// For normal resources, this is asserted, but for resource groups the behavior (that /// we maintain) is for the transaction to fail with INVARIANT_VIOLATION_ERROR. - /// This ensures state does not change and blockchain does not halt while the underlying + /// Thus, the state does not change and blockchain does not halt while the underlying /// issue is addressed. In order to maintain the behavior we check for resource existence, /// which in the context of parallel execution does not cause a full R/W conflict. - /// - /// Note: If and when we start using the method in other use-cases, in particular, if it - /// may access a resource group for the first time, we should also incorporate the size - /// charge for such access. fn resource_exists_in_group( &self, - key: &Self::Key, - resource_tag: &Self::Tag, + _key: &Self::Key, + _resource_tag: &Self::Tag, ) -> anyhow::Result { - self.get_resource_from_group(key, resource_tag, false) - .map(|(res, _)| res.is_some()) + unimplemented!("TResourceGroupView not yet implemented"); } } -pub trait ResourceGroupResolver: TResourceGroupResolver {} +/// Allows to query modules from the state. +pub trait TModuleView { + type Key; + + /// Returns + /// - Ok(None) if the module is not in storage, + /// - Ok(Some(...)) if the module exists in storage, + /// - Err(...) otherwise (e.g. storage error). + fn get_module_state_value(&self, key: &Self::Key) -> anyhow::Result>; -impl> ResourceGroupResolver for T {} + fn get_module_bytes(&self, key: &Self::Key) -> anyhow::Result> { + let maybe_state_value = self.get_module_state_value(key)?; + Ok(maybe_state_value.map(|state_value| state_value.bytes().clone())) + } + + fn get_module_state_value_metadata( + &self, + key: &Self::Key, + ) -> anyhow::Result> { + let maybe_state_value = self.get_module_state_value(key)?; + Ok(maybe_state_value.map(StateValue::into_metadata)) + } + + fn module_exists(&self, key: &Self::Key) -> anyhow::Result { + self.get_module_state_value(key) + .map(|maybe_state_value| maybe_state_value.is_some()) + } +} + +/// Allows to query state information, e.g. its usage. +pub trait StateStorageView { + fn id(&self) -> StateViewId; + + fn get_usage(&self) -> anyhow::Result; +} + +/// A fine-grained view of the state during execution. +/// +/// - The `StateView` trait should be used by the storage backend, e.g. a DB. +/// It only allows a generic key-value access and always returns bytes or +/// state values. +/// - The `ExecutorView` trait is used at executor level, e.g. BlockSTM. When +/// a block is executed, the types of accesses are always known (for example, +/// whether a resource is accessed or a module). Fine-grained structure of +/// `ExecutorView` allows to: +/// 1. Specialize on access type, +/// 2. Separate execution and storage abstractions. +/// +/// **WARNING:** There is no default implementation of `ExecutorView` for +/// `StateView` in order to ensure that a correct type is always used. If +/// conversion from state to executor view is needed, an adapter can be used. +pub trait TExecutorView: + TResourceView + // + TResourceGroupView + + TModuleView + + TAggregatorView + + StateStorageView +{ +} + +impl TExecutorView for A where + A: TResourceView + // + TResourceGroupView + + TModuleView + + TAggregatorView + + StateStorageView +{ +} + +pub trait ExecutorView: TExecutorView {} + +impl ExecutorView for T where T: TExecutorView {} + +/// Allows to query storage metadata in the VM session. Needed for storage refunds. +/// - Result being Err means storage error or some incostistency (e.g. during speculation, +/// needing to abort/halt the transaction with an error status). +/// - Ok(None) means that the corresponding data does not exist / was deleted. +/// - Ok(Some(_ : MetadataKind)) may be internally None (within Kind) if the metadata was +/// not previously provided (e.g. Legacy WriteOps). +pub trait StateValueMetadataResolver { + fn get_module_state_value_metadata( + &self, + state_key: &StateKey, + ) -> anyhow::Result>; + + fn get_resource_state_value_metadata( + &self, + state_key: &StateKey, + ) -> anyhow::Result>; + + fn get_resource_group_state_value_metadata( + &self, + state_key: &StateKey, + ) -> anyhow::Result>; +} diff --git a/aptos-move/aptos-vm-types/src/tests/test_output.rs b/aptos-move/aptos-vm-types/src/tests/test_output.rs index c30b443473e23..437eb33dc712b 100644 --- a/aptos-move/aptos-vm-types/src/tests/test_output.rs +++ b/aptos-move/aptos-vm-types/src/tests/test_output.rs @@ -10,6 +10,7 @@ use aptos_language_e2e_tests::data_store::FakeDataStore; use aptos_types::{ state_store::state_key::StateKey, transaction::TransactionOutput, write_set::WriteOp, }; +use aptos_vm::storage_adapter::AsExecutorView; use claims::{assert_err, assert_matches, assert_ok}; use move_core_types::vm_status::{AbortLocation, VMStatus}; use std::collections::{BTreeMap, HashMap}; @@ -33,6 +34,7 @@ fn assert_eq_outputs(vm_output: &VMOutput, txn_output: TransactionOutput) { #[test] fn test_ok_output_equality_no_deltas() { let state_view = FakeDataStore::default(); + let executor_view = state_view.as_executor_view(); let vm_output = build_vm_output( vec![mock_create("0", 0)], vec![mock_modify("1", 1)], @@ -45,8 +47,10 @@ fn test_ok_output_equality_no_deltas() { // 2. `try_into_transaction_output` changes the type and returns a result. // 3. `into_transaction_output_with_materialized_deltas` changes the type and // simply merges materialized deltas. - let materialized_vm_output = assert_ok!(vm_output.clone().try_materialize(&state_view)); - let txn_output_1 = assert_ok!(vm_output.clone().try_into_transaction_output(&state_view)); + let materialized_vm_output = assert_ok!(vm_output.clone().try_materialize(&executor_view)); + let txn_output_1 = assert_ok!(vm_output + .clone() + .try_into_transaction_output(&executor_view)); let txn_output_2 = vm_output .clone() .into_transaction_output_with_materialized_deltas(vec![]); @@ -63,6 +67,7 @@ fn test_ok_output_equality_with_deltas() { let delta_key = "3"; let mut state_view = FakeDataStore::default(); state_view.set_legacy(as_state_key!(delta_key), serialize(&100)); + let executor_view = state_view.as_executor_view(); let vm_output = build_vm_output( vec![mock_create("0", 0)], @@ -71,8 +76,10 @@ fn test_ok_output_equality_with_deltas() { vec![mock_add(delta_key, 300)], ); - let materialized_vm_output = assert_ok!(vm_output.clone().try_materialize(&state_view)); - let txn_output_1 = assert_ok!(vm_output.clone().try_into_transaction_output(&state_view)); + let materialized_vm_output = assert_ok!(vm_output.clone().try_materialize(&executor_view)); + let txn_output_1 = assert_ok!(vm_output + .clone() + .try_into_transaction_output(&executor_view)); let txn_output_2 = vm_output .clone() .into_transaction_output_with_materialized_deltas(vec![mock_modify("3", 400)]); @@ -110,11 +117,12 @@ fn test_err_output_equality_with_deltas() { let delta_key = "3"; let mut state_view = FakeDataStore::default(); state_view.set_legacy(as_state_key!(delta_key), serialize(&900)); + let executor_view = state_view.as_executor_view(); let vm_output = build_vm_output(vec![], vec![], vec![], vec![mock_add(delta_key, 300)]); - let vm_status_1 = assert_err!(vm_output.clone().try_materialize(&state_view)); - let vm_status_2 = assert_err!(vm_output.try_into_transaction_output(&state_view)); + let vm_status_1 = assert_err!(vm_output.clone().try_materialize(&executor_view)); + let vm_status_2 = assert_err!(vm_output.try_into_transaction_output(&executor_view)); // Error should be consistent. assert_eq!(vm_status_1, vm_status_2); diff --git a/aptos-move/aptos-vm-types/src/tests/utils.rs b/aptos-move/aptos-vm-types/src/tests/utils.rs index cd3c1f8f46de1..8d8ee7f212b11 100644 --- a/aptos-move/aptos-vm-types/src/tests/utils.rs +++ b/aptos-move/aptos-vm-types/src/tests/utils.rs @@ -34,11 +34,11 @@ macro_rules! as_bytes { } pub(crate) fn mock_create(k: impl ToString, v: u128) -> (StateKey, WriteOp) { - (as_state_key!(k), WriteOp::Creation(as_bytes!(v))) + (as_state_key!(k), WriteOp::Creation(as_bytes!(v).into())) } pub(crate) fn mock_modify(k: impl ToString, v: u128) -> (StateKey, WriteOp) { - (as_state_key!(k), WriteOp::Modification(as_bytes!(v))) + (as_state_key!(k), WriteOp::Modification(as_bytes!(v).into())) } pub(crate) fn mock_delete(k: impl ToString) -> (StateKey, WriteOp) { diff --git a/aptos-move/aptos-vm/Cargo.toml b/aptos-move/aptos-vm/Cargo.toml index 014ef7616793e..4a55fa1400664 100644 --- a/aptos-move/aptos-vm/Cargo.toml +++ b/aptos-move/aptos-vm/Cargo.toml @@ -14,7 +14,7 @@ rust-version = { workspace = true } [dependencies] anyhow = { workspace = true } -aptos-aggregator = { workspace = true, features = ["testing"] } +aptos-aggregator = { workspace = true } aptos-block-executor = { workspace = true } aptos-block-partitioner = { workspace = true } aptos-crypto = { workspace = true } @@ -37,6 +37,7 @@ aptos-utils = { workspace = true } aptos-vm-logging = { workspace = true } aptos-vm-types = { workspace = true } bcs = { workspace = true } +bytes = { workspace = true } claims = { workspace = true } crossbeam-channel = { workspace = true } dashmap = { workspace = true } @@ -61,6 +62,7 @@ smallvec = { workspace = true } tracing = { workspace = true } [dev-dependencies] +aptos-aggregator = { workspace = true, features = ["testing"] } aptos-language-e2e-tests = { workspace = true } aptos-types = { workspace = true } proptest = { workspace = true } diff --git a/aptos-move/aptos-vm/src/adapter_common.rs b/aptos-move/aptos-vm/src/adapter_common.rs index 802130f1c0514..96c294aaa10eb 100644 --- a/aptos-move/aptos-vm/src/adapter_common.rs +++ b/aptos-move/aptos-vm/src/adapter_common.rs @@ -2,7 +2,7 @@ // Parts of the project are originally copyright © Meta Platforms, Inc. // SPDX-License-Identifier: Apache-2.0 -use crate::move_vm_ext::{AptosMoveResolver, MoveResolverExt, SessionExt, SessionId}; +use crate::move_vm_ext::{AptosMoveResolver, SessionExt, SessionId}; use anyhow::Result; use aptos_types::{ block_metadata::BlockMetadata, @@ -23,7 +23,7 @@ pub trait VMAdapter { /// this after redesigning cache ownership model. fn new_session<'r>( &self, - remote: &'r impl MoveResolverExt, + remote: &'r impl AptosMoveResolver, session_id: SessionId, ) -> SessionExt<'r, '_>; @@ -50,7 +50,7 @@ pub trait VMAdapter { fn execute_single_transaction( &self, txn: &PreprocessedTransaction, - data_cache: &impl MoveResolverExt, + data_cache: &impl AptosMoveResolver, log_context: &AdapterLogSchema, ) -> Result<(VMStatus, VMOutput, Option), VMStatus>; diff --git a/aptos-move/aptos-vm/src/aptos_vm.rs b/aptos-move/aptos-vm/src/aptos_vm.rs index e231196b96f2b..62dd502657d37 100644 --- a/aptos-move/aptos-vm/src/aptos_vm.rs +++ b/aptos-move/aptos-vm/src/aptos_vm.rs @@ -9,10 +9,11 @@ use crate::{ aptos_vm_impl::{get_transaction_output, AptosVMImpl, AptosVMInternals}, block_executor::{AptosTransactionOutput, BlockAptosVM}, counters::*, - data_cache::StorageAdapter, + data_cache::{AsMoveResolver, StorageAdapter}, errors::expect_only_successful_execution, - move_vm_ext::{AptosMoveResolver, MoveResolverExt, RespawnedSession, SessionExt, SessionId}, + move_vm_ext::{AptosMoveResolver, RespawnedSession, SessionExt, SessionId}, sharded_block_executor::{executor_client::ExecutorClient, ShardedBlockExecutor}, + storage_adapter::AsExecutorView, system_module_names::*, transaction_metadata::TransactionMetadata, verifier, VMExecutor, VMValidator, @@ -47,6 +48,7 @@ use aptos_vm_logging::{log_schema::AdapterLogSchema, speculative_error, speculat use aptos_vm_types::{ change_set::VMChangeSet, output::VMOutput, + resolver::ExecutorView, storage::{ChangeSetConfigs, StorageGasParameters}, }; use fail::fail_point; @@ -121,9 +123,14 @@ impl AptosVM { Self(AptosVMImpl::new(config_storage)) } + pub fn new_from_executor_view(executor_view: &impl ExecutorView) -> Self { + Self(AptosVMImpl::new(&StorageAdapter::from_borrowed( + executor_view, + ))) + } + pub fn new_from_state_view(state_view: &impl StateView) -> Self { - let config_storage = StorageAdapter::new(state_view); - Self(AptosVMImpl::new(&config_storage)) + Self(AptosVMImpl::new(&state_view.as_move_resolver())) } pub fn new_for_validation(state_view: &impl StateView) -> Self { @@ -239,7 +246,7 @@ impl AptosVM { error_code: VMStatus, gas_meter: &impl AptosGasMeter, txn_data: &TransactionMetadata, - resolver: &impl MoveResolverExt, + resolver: &impl AptosMoveResolver, log_context: &AdapterLogSchema, change_set_configs: &ChangeSetConfigs, ) -> VMOutput { @@ -254,18 +261,17 @@ impl AptosVM { .1 } - pub fn as_move_resolver<'a, S>(&self, state_view: &'a S) -> StorageAdapter<'a, S> { - StorageAdapter::new_with_cached_config( - state_view, + pub fn as_move_resolver<'r, R: ExecutorView>( + &self, + executor_view: &'r R, + ) -> StorageAdapter<'r, R> { + StorageAdapter::from_borrowed_with_cached_config( + executor_view, self.0.get_gas_feature_version(), self.0.get_features(), ) } - pub(crate) fn is_storage_slot_metadata_enabled(&self) -> bool { - self.0.get_features().is_storage_slot_metadata_enabled() - } - fn fee_statement_from_gas_meter( txn_data: &TransactionMetadata, gas_meter: &impl AptosGasMeter, @@ -289,7 +295,7 @@ impl AptosVM { error_code: VMStatus, gas_meter: &impl AptosGasMeter, txn_data: &TransactionMetadata, - resolver: &impl MoveResolverExt, + resolver: &impl AptosMoveResolver, log_context: &AdapterLogSchema, change_set_configs: &ChangeSetConfigs, ) -> (VMStatus, VMOutput) { @@ -420,7 +426,7 @@ impl AptosVM { fn execute_script_or_entry_function( &self, - resolver: &impl MoveResolverExt, + resolver: &impl AptosMoveResolver, mut session: SessionExt, gas_meter: &mut impl AptosGasMeter, txn_data: &TransactionMetadata, @@ -511,7 +517,7 @@ impl AptosVM { fn charge_change_set_and_respawn_session<'r, 'l>( &'l self, session: SessionExt, - resolver: &'r impl MoveResolverExt, + resolver: &'r impl AptosMoveResolver, gas_meter: &mut impl AptosGasMeter, change_set_configs: &ChangeSetConfigs, txn_data: &TransactionMetadata, @@ -533,7 +539,7 @@ impl AptosVM { // TODO(Gas): Charge for aggregator writes let session_id = SessionId::epilogue_meta(txn_data); - RespawnedSession::spawn(&self.0, session_id, resolver, change_set, storage_refund) + RespawnedSession::spawn(self, session_id, resolver, change_set, storage_refund) } // Execute a multisig transaction: @@ -545,7 +551,7 @@ impl AptosVM { // 3. Call post transaction cleanup function in multisig account module with the result from (2) fn execute_multisig_transaction( &self, - resolver: &impl MoveResolverExt, + resolver: &impl AptosMoveResolver, mut session: SessionExt, gas_meter: &mut impl AptosGasMeter, txn_data: &TransactionMetadata, @@ -697,7 +703,7 @@ impl AptosVM { fn success_multisig_payload_cleanup<'r, 'l>( &'l self, - resolver: &'r impl MoveResolverExt, + resolver: &'r impl AptosMoveResolver, session: SessionExt, gas_meter: &mut impl AptosGasMeter, txn_data: &TransactionMetadata, @@ -728,7 +734,7 @@ impl AptosVM { fn failure_multisig_payload_cleanup<'r, 'l>( &'l self, - resolver: &'r impl MoveResolverExt, + resolver: &'r impl AptosMoveResolver, execution_error: VMStatus, txn_data: &TransactionMetadata, mut cleanup_args: Vec>, @@ -736,7 +742,7 @@ impl AptosVM { // Start a fresh session for running cleanup that does not contain any changes from // the inner function call earlier (since it failed). let mut respawned_session = RespawnedSession::spawn( - &self.0, + self, SessionId::epilogue_meta(txn_data), resolver, VMChangeSet::empty(), @@ -862,7 +868,7 @@ impl AptosVM { /// NativeCodeContext fn execute_modules( &self, - resolver: &impl MoveResolverExt, + resolver: &impl AptosMoveResolver, mut session: SessionExt, gas_meter: &mut impl AptosGasMeter, txn_data: &TransactionMetadata, @@ -1066,7 +1072,7 @@ impl AptosVM { fn execute_user_transaction_impl( &self, - resolver: &impl MoveResolverExt, + resolver: &impl AptosMoveResolver, txn: &SignatureCheckedTransaction, log_context: &AdapterLogSchema, gas_meter: &mut impl AptosGasMeter, @@ -1181,7 +1187,7 @@ impl AptosVM { fn execute_user_transaction( &self, - resolver: &impl MoveResolverExt, + resolver: &impl AptosMoveResolver, txn: &SignatureCheckedTransaction, log_context: &AdapterLogSchema, ) -> (VMStatus, VMOutput) { @@ -1193,7 +1199,8 @@ impl AptosVM { } pub fn execute_user_transaction_with_custom_gas_meter( - state_view: &impl StateView, + &self, + resolver: &impl AptosMoveResolver, txn: &SignatureCheckedTransaction, log_context: &AdapterLogSchema, make_gas_meter: F, @@ -1202,25 +1209,16 @@ impl AptosVM { G: AptosGasMeter, F: FnOnce(u64, VMGasParameters, StorageGasParameters, Gas) -> Result, { - // TODO(Gas): revisit this. - let vm = AptosVM::new_from_state_view(state_view); - // TODO(Gas): avoid creating txn metadata twice. let balance = TransactionMetadata::new(txn).max_gas_amount(); let mut gas_meter = make_gas_meter( - vm.0.get_gas_feature_version(), - vm.0.get_gas_parameters(log_context)?.vm.clone(), - vm.0.get_storage_gas_parameters(log_context)?.clone(), + self.0.get_gas_feature_version(), + self.0.get_gas_parameters(log_context)?.vm.clone(), + self.0.get_storage_gas_parameters(log_context)?.clone(), balance, )?; - - let resolver = StorageAdapter::new_with_cached_config( - state_view, - vm.0.get_gas_feature_version(), - vm.0.get_features(), - ); let (status, output) = - vm.execute_user_transaction_impl(&resolver, txn, log_context, &mut gas_meter); + self.execute_user_transaction_impl(resolver, txn, log_context, &mut gas_meter); Ok((status, output, gas_meter)) } @@ -1273,7 +1271,7 @@ impl AptosVM { fn read_change_set( &self, - state_view: &impl StateView, + executor_view: &dyn ExecutorView, change_set: &VMChangeSet, ) -> Result<(), VMStatus> { assert!( @@ -1283,9 +1281,14 @@ impl AptosVM { // All Move executions satisfy the read-before-write property. Thus we need to read each // access path that the write set is going to update. - for (state_key, _) in change_set.write_set_iter() { - state_view - .get_state_value_bytes(state_key) + for state_key in change_set.module_write_set().keys() { + executor_view + .get_module_state_value(state_key) + .map_err(|_| VMStatus::error(StatusCode::STORAGE_ERROR, None))?; + } + for state_key in change_set.resource_write_set().keys() { + executor_view + .get_resource_state_value(state_key, None) .map_err(|_| VMStatus::error(StatusCode::STORAGE_ERROR, None))?; } Ok(()) @@ -1316,7 +1319,7 @@ impl AptosVM { pub(crate) fn process_waypoint_change_set( &self, - resolver: &impl MoveResolverExt, + resolver: &impl AptosMoveResolver, write_set_payload: WriteSetPayload, log_context: &AdapterLogSchema, ) -> Result<(VMStatus, VMOutput), VMStatus> { @@ -1330,7 +1333,7 @@ impl AptosVM { )?; Self::validate_waypoint_change_set(&change_set, log_context)?; - self.read_change_set(resolver, &change_set)?; + self.read_change_set(resolver.as_executor_view(), &change_set)?; SYSTEM_TRANSACTIONS_EXECUTED.inc(); @@ -1392,21 +1395,19 @@ impl AptosVM { /// Executes a SignedTransaction without performing signature verification. pub fn simulate_signed_transaction( txn: &SignedTransaction, - state_view: &impl StateView, + executor_view: &impl ExecutorView, ) -> (VMStatus, TransactionOutput) { - let vm = AptosVM::new_from_state_view(state_view); + let vm = AptosVM::new_from_executor_view(executor_view); let simulation_vm = AptosSimulationVM(vm); - let log_context = AdapterLogSchema::new(state_view.id(), 0); + let log_context = AdapterLogSchema::new(executor_view.id(), 0); - let (vm_status, vm_output) = simulation_vm.simulate_signed_transaction( - &simulation_vm.0.as_move_resolver(state_view), - txn, - &log_context, - ); + let resolver = simulation_vm.0.as_move_resolver(executor_view); + let (vm_status, vm_output) = + simulation_vm.simulate_signed_transaction(&resolver, txn, &log_context); ( vm_status, vm_output - .try_into_transaction_output(state_view) + .try_into_transaction_output(&resolver) .expect("Simulation cannot fail"), ) } @@ -1458,7 +1459,9 @@ impl AptosVM { vm.0.get_storage_gas_parameters(&log_context)?.clone(), gas_budget, ))); - let resolver = vm.as_move_resolver(state_view); + + let executor_view = state_view.as_executor_view(); + let resolver = vm.as_move_resolver(&executor_view); let mut session = vm.new_session(&resolver, SessionId::Void); let func_inst = session.load_function(&module_id, &func_name, &type_args)?; @@ -1468,7 +1471,7 @@ impl AptosVM { arguments, func_name.as_ident_str(), &func_inst, - metadata.as_ref(), + metadata.as_ref().map(Arc::as_ref), vm.0.get_features() .is_enabled(FeatureFlag::STRUCT_CONSTRUCTORS), )?; @@ -1636,7 +1639,8 @@ impl VMValidator for AptosVM { }, }; - let resolver = self.as_move_resolver(state_view); + let executor_view = state_view.as_executor_view(); + let resolver = self.as_move_resolver(&executor_view); let mut session = self.0.new_session(&resolver, SessionId::prologue(&txn)); let validation_result = self.validate_signature_checked_transaction( &mut session, @@ -1720,7 +1724,7 @@ impl VMAdapter for AptosVM { fn execute_single_transaction( &self, txn: &PreprocessedTransaction, - resolver: &impl MoveResolverExt, + resolver: &impl AptosMoveResolver, log_context: &AdapterLogSchema, ) -> Result<(VMStatus, VMOutput, Option), VMStatus> { Ok(match txn { @@ -1849,7 +1853,7 @@ impl AptosSimulationVM { fn simulate_signed_transaction( &self, - resolver: &impl MoveResolverExt, + resolver: &impl AptosMoveResolver, txn: &SignedTransaction, log_context: &AdapterLogSchema, ) -> (VMStatus, VMOutput) { diff --git a/aptos-move/aptos-vm/src/aptos_vm_impl.rs b/aptos-move/aptos-vm/src/aptos_vm_impl.rs index 1b967d966b366..eaa8b908f5283 100644 --- a/aptos-move/aptos-vm/src/aptos_vm_impl.rs +++ b/aptos-move/aptos-vm/src/aptos_vm_impl.rs @@ -637,7 +637,7 @@ impl AptosVMImpl { pub(crate) fn extract_module_metadata( &self, module: &ModuleId, - ) -> Option { + ) -> Option> { if self.features.is_enabled(FeatureFlag::VM_BINARY_FORMAT_V6) { aptos_framework::get_vm_metadata(&self.move_vm, module) } else { diff --git a/aptos-move/aptos-vm/src/block_executor/mod.rs b/aptos-move/aptos-vm/src/block_executor/mod.rs index d7dfbe851c046..6b1c365b84e2d 100644 --- a/aptos-move/aptos-vm/src/block_executor/mod.rs +++ b/aptos-move/aptos-vm/src/block_executor/mod.rs @@ -13,7 +13,7 @@ use crate::{ }, AptosVM, }; -use aptos_aggregator::delta_change_set::DeltaOp; +use aptos_aggregator::{aggregator_extension::AggregatorID, delta_change_set::DeltaOp}; use aptos_block_executor::{ errors::Error, executor::BlockExecutor, @@ -35,14 +35,16 @@ use aptos_types::{ }; use aptos_vm_logging::{flush_speculative_logs, init_speculative_logs}; use aptos_vm_types::output::VMOutput; -use move_core_types::vm_status::VMStatus; +use move_core_types::{language_storage::StructTag, vm_status::VMStatus}; use once_cell::sync::OnceCell; use rayon::{prelude::*, ThreadPool}; use std::{collections::HashMap, sync::Arc}; impl BlockExecutorTransaction for PreprocessedTransaction { type Event = ContractEvent; + type Identifier = AggregatorID; type Key = StateKey; + type Tag = StructTag; type Value = WriteOp; } diff --git a/aptos-move/aptos-vm/src/block_executor/vm_wrapper.rs b/aptos-move/aptos-vm/src/block_executor/vm_wrapper.rs index 6fcb3eabf064c..bf5301742c3fe 100644 --- a/aptos-move/aptos-vm/src/block_executor/vm_wrapper.rs +++ b/aptos-move/aptos-vm/src/block_executor/vm_wrapper.rs @@ -6,17 +6,15 @@ use crate::{ adapter_common::{PreprocessedTransaction, VMAdapter}, aptos_vm::AptosVM, block_executor::AptosTransactionOutput, - data_cache::StorageAdapter, - move_vm_ext::write_op_converter::WriteOpConverter, + storage_adapter::AsExecutorView, }; use aptos_block_executor::task::{ExecutionStatus, ExecutorTask}; use aptos_logger::{enabled, Level}; use aptos_mvhashmap::types::TxnIndex; use aptos_state_view::StateView; -use aptos_types::{state_store::state_key::StateKey, write_set::WriteOp}; use aptos_vm_logging::{log_schema::AdapterLogSchema, prelude::*}; +use aptos_vm_types::resolver::ExecutorView; use move_core_types::{ - effects::Op as MoveStorageOp, ident_str, language_storage::{ModuleId, CORE_CODE_ADDRESS}, vm_status::VMStatus, @@ -35,11 +33,7 @@ impl<'a, S: 'a + StateView + Sync> ExecutorTask for AptosExecutorTask<'a, S> { fn init(argument: &'a S) -> Self { // AptosVM has to be initialized using configs from storage. - // Using adapter allows us to fetch those. - // TODO: with new adapter we can relax trait bounds on S and avoid - // creating `StorageAdapter` here. - let config_storage = StorageAdapter::new(argument); - let vm = AptosVM::new(&config_storage); + let vm = AptosVM::new_from_state_view(&argument); // Loading `0x1::account` and its transitive dependency into the code cache. // @@ -49,9 +43,10 @@ impl<'a, S: 'a + StateView + Sync> ExecutorTask for AptosExecutorTask<'a, S> { // Loading up `0x1::account` should be sufficient as this is the most common module // used for prologue, epilogue and transfer functionality. + let executor_view = argument.as_executor_view(); let _ = vm.load_module( &ModuleId::new(CORE_CODE_ADDRESS, ident_str!("account").to_owned()), - &vm.as_move_resolver(argument), + &vm.as_move_resolver(&executor_view), ); Self { @@ -65,22 +60,22 @@ impl<'a, S: 'a + StateView + Sync> ExecutorTask for AptosExecutorTask<'a, S> { // execution, or speculatively as a part of a parallel execution. fn execute_transaction( &self, - view: &impl StateView, + executor_view: &impl ExecutorView, txn: &PreprocessedTransaction, txn_idx: TxnIndex, materialize_deltas: bool, ) -> ExecutionStatus { let log_context = AdapterLogSchema::new(self.base_view.id(), txn_idx as usize); - + let resolver = self.vm.as_move_resolver(executor_view); match self .vm - .execute_single_transaction(txn, &self.vm.as_move_resolver(view), &log_context) + .execute_single_transaction(txn, &resolver, &log_context) { Ok((vm_status, mut vm_output, sender)) => { if materialize_deltas { // TODO: Integrate aggregator v2. vm_output = vm_output - .try_materialize(view) + .try_materialize(&resolver) .expect("Delta materialization failed"); } @@ -114,31 +109,4 @@ impl<'a, S: 'a + StateView + Sync> ExecutorTask for AptosExecutorTask<'a, S> { Err(err) => ExecutionStatus::Abort(err), } } - - fn convert_to_value( - &self, - view: &impl StateView, - key: &StateKey, - maybe_blob: Option>, - creation: bool, - ) -> anyhow::Result { - let storage_adapter = self.vm.as_move_resolver(view); - let wop_converter = - WriteOpConverter::new(&storage_adapter, self.vm.is_storage_slot_metadata_enabled()); - - let move_op = match maybe_blob { - Some(blob) => { - if creation { - MoveStorageOp::New(blob) - } else { - MoveStorageOp::Modify(blob) - } - }, - None => MoveStorageOp::Delete, - }; - - wop_converter - .convert(key, move_op, false) - .map_err(|_| anyhow::Error::msg("Error on converting to WriteOp")) - } } diff --git a/aptos-move/aptos-vm/src/data_cache.rs b/aptos-move/aptos-vm/src/data_cache.rs index 5ed822fbdad7e..0a128116324c7 100644 --- a/aptos-move/aptos-vm/src/data_cache.rs +++ b/aptos-move/aptos-vm/src/data_cache.rs @@ -5,16 +5,16 @@ use crate::{ aptos_vm_impl::gas_config, - move_vm_ext::{get_max_binary_format_version, AptosMoveResolver}, + move_vm_ext::{get_max_binary_format_version, AptosMoveResolver, AsExecutorView}, + storage_adapter::ExecutorViewBase, }; #[allow(unused_imports)] use anyhow::Error; use aptos_aggregator::{ aggregator_extension::AggregatorID, - resolver::{AggregatorReadMode, AggregatorResolver}, + resolver::{AggregatorReadMode, TAggregatorView}, }; -use aptos_framework::natives::state_storage::StateStorageUsageResolver; -use aptos_state_view::{StateView, TStateView}; +use aptos_state_view::{StateView, StateViewId}; use aptos_table_natives::{TableHandle, TableResolver}; use aptos_types::{ access_path::AccessPath, @@ -22,10 +22,11 @@ use aptos_types::{ state_store::{ state_key::StateKey, state_storage_usage::StateStorageUsage, - state_value::{StateValue, StateValueMetadata}, + state_value::{StateValue, StateValueMetadataKind}, }, }; -use aptos_vm_types::resolver::{StateValueMetadataResolver, TResourceGroupResolver}; +use aptos_vm_types::resolver::{ExecutorView, StateStorageView, StateValueMetadataResolver}; +use bytes::Bytes; use claims::assert_none; use move_binary_format::{errors::*, CompiledModule}; use move_core_types::{ @@ -38,6 +39,7 @@ use move_core_types::{ use std::{ cell::RefCell, collections::{BTreeMap, HashMap}, + ops::Deref, }; pub(crate) fn get_resource_group_from_metadata( @@ -52,9 +54,31 @@ pub(crate) fn get_resource_group_from_metadata( .find_map(|attr| attr.get_resource_group_member()) } -/// Adapter to convert a `StateView` into a `MoveResolverExt`. -pub struct StorageAdapter<'a, S> { - state_store: &'a S, +// Allows to keep a single `StorageAdapter` for both borrowed or owned views. +// For example, views are typically borrowed during block execution, but are +// owned in tests or in indexer. +// We also do not use `std::borrow::CoW` because otherwise `E` (which is the +// executor view) has to implement `Clone`. +enum ExecutorViewKind<'e, E: 'e> { + Borrowed(&'e E), + Owned(E), +} + +impl Deref for ExecutorViewKind<'_, E> { + type Target = E; + + fn deref(&self) -> &Self::Target { + match *self { + ExecutorViewKind::Borrowed(e) => e, + ExecutorViewKind::Owned(ref e) => e, + } + } +} + +/// Adapter to convert a `ExecutorView` into a `AptosMoveResolver`. +pub struct StorageAdapter<'e, E> { + // Underlying storage backend, borrowed or owned. + executor_view: ExecutorViewKind<'e, E>, // When set, and if the resource group was not cached, the serialized resource // group size in bytes is added to the size of the resource from the group // (returned for gas purposes). @@ -64,10 +88,10 @@ pub struct StorageAdapter<'a, S> { // the serialized sizes of the tags. This avoids dependency on group serialization. group_byte_count_as_sum: bool, max_binary_format_version: u32, - resource_group_cache: RefCell>>>, + resource_group_cache: RefCell>>, } -impl<'a, S> StorageAdapter<'a, S> { +impl<'e, E: ExecutorView> StorageAdapter<'e, E> { fn init(mut self, features: &Features, gas_feature_version: u64) -> Self { if gas_feature_version >= 9 { if gas_feature_version >= 12 { @@ -81,13 +105,22 @@ impl<'a, S> StorageAdapter<'a, S> { self } - pub fn new_with_cached_config( - state_store: &'a S, + pub(crate) fn from_borrowed_with_cached_config( + executor_view: &'e E, + gas_feature_version: u64, + features: &Features, + ) -> Self { + let executor_view = ExecutorViewKind::Borrowed(executor_view); + Self::new_with_cached_config(executor_view, gas_feature_version, features) + } + + fn new_with_cached_config( + executor_view: ExecutorViewKind<'e, E>, gas_feature_version: u64, features: &Features, ) -> Self { let s = Self { - state_store, + executor_view, accurate_byte_count: false, group_byte_count_as_sum: false, max_binary_format_version: 0, @@ -95,12 +128,20 @@ impl<'a, S> StorageAdapter<'a, S> { }; s.init(features, gas_feature_version) } -} -impl<'a, S: StateView> StorageAdapter<'a, S> { - pub fn new(state_store: &'a S) -> Self { + pub(crate) fn from_owned(executor_view: E) -> Self { + let executor_view = ExecutorViewKind::Owned(executor_view); + Self::new(executor_view) + } + + pub(crate) fn from_borrowed(executor_view: &'e E) -> Self { + let executor_view = ExecutorViewKind::Borrowed(executor_view); + Self::new(executor_view) + } + + fn new(executor_view: ExecutorViewKind<'e, E>) -> Self { let s = Self { - state_store, + executor_view, accurate_byte_count: false, group_byte_count_as_sum: false, max_binary_format_version: 0, @@ -111,18 +152,12 @@ impl<'a, S: StateView> StorageAdapter<'a, S> { s.init(&features, gas_feature_version) } - pub fn get(&self, access_path: AccessPath) -> PartialVMResult>> { - self.state_store - .get_state_value_bytes(&StateKey::access_path(access_path)) - .map_err(|_| PartialVMError::new(StatusCode::STORAGE_ERROR)) - } - fn get_any_resource( &self, address: &AccountAddress, struct_tag: &StructTag, metadata: &[Metadata], - ) -> Result<(Option>, usize), VMError> { + ) -> Result<(Option, usize), VMError> { let resource_group = get_resource_group_from_metadata(struct_tag, metadata); if let Some(resource_group) = resource_group { let key = StateKey::access_path(AccessPath::resource_group_access_path( @@ -147,38 +182,32 @@ impl<'a, S: StateView> StorageAdapter<'a, S> { let buf_size = resource_size(&buf); Ok((buf, buf_size + maybe_group_size.unwrap_or(0))) } else { - let ap = - AccessPath::resource_access_path(*address, struct_tag.clone()).map_err(|_| { + let access_path = AccessPath::resource_access_path(*address, struct_tag.clone()) + .map_err(|_| { PartialVMError::new(StatusCode::TOO_MANY_TYPE_NODES).finish(Location::Undefined) })?; - let buf = self.get(ap).map_err(|e| e.finish(Location::Undefined))?; + let buf = self + .executor_view + .get_resource_bytes(&StateKey::access_path(access_path), None) + .map_err(|_| { + PartialVMError::new(StatusCode::STORAGE_ERROR).finish(Location::Undefined) + })?; let buf_size = resource_size(&buf); Ok((buf, buf_size)) } } -} - -impl<'a, S: StateView> AptosMoveResolver for StorageAdapter<'a, S> { - fn release_resource_group_cache(&self) -> HashMap>> { - self.resource_group_cache.take() - } -} - -impl<'a, S: StateView> TResourceGroupResolver for StorageAdapter<'a, S> { - type Key = StateKey; - type Tag = StructTag; fn get_resource_from_group( &self, key: &StateKey, resource_tag: &StructTag, return_group_size: bool, - ) -> anyhow::Result<(Option>, Option)> { - // Resolve directly from state store (StateView interface). - let group_data = self.state_store.get_state_value_bytes(key)?; + ) -> anyhow::Result<(Option, Option)> { + // Resolve directly from state store (ExecutorView interface). + let group_data = self.executor_view.get_resource_bytes(key, None)?; if let Some(group_data_blob) = group_data { - let group_data: BTreeMap> = bcs::from_bytes(&group_data_blob) + let group_data: BTreeMap = bcs::from_bytes(&group_data_blob) .map_err(|_| anyhow::Error::msg("Resource group deserialization error"))?; let maybe_group_size = if return_group_size { @@ -219,18 +248,24 @@ impl<'a, S: StateView> TResourceGroupResolver for StorageAdapter<'a, S> { } } -impl<'a, S: StateView> ResourceResolver for StorageAdapter<'a, S> { +impl<'e, E: ExecutorView> AptosMoveResolver for StorageAdapter<'e, E> { + fn release_resource_group_cache(&self) -> HashMap> { + self.resource_group_cache.take() + } +} + +impl<'e, E: ExecutorView> ResourceResolver for StorageAdapter<'e, E> { fn get_resource_with_metadata( &self, address: &AccountAddress, struct_tag: &StructTag, metadata: &[Metadata], - ) -> anyhow::Result<(Option>, usize)> { + ) -> anyhow::Result<(Option, usize)> { Ok(self.get_any_resource(address, struct_tag, metadata)?) } } -impl<'a, S: StateView> ModuleResolver for StorageAdapter<'a, S> { +impl<'e, E: ExecutorView> ModuleResolver for StorageAdapter<'e, E> { fn get_module_metadata(&self, module_id: &ModuleId) -> Vec { let module_bytes = match self.get_module(module_id) { Ok(Some(bytes)) => bytes, @@ -246,97 +281,108 @@ impl<'a, S: StateView> ModuleResolver for StorageAdapter<'a, S> { module.metadata } - fn get_module(&self, module_id: &ModuleId) -> Result>, Error> { - // REVIEW: cache this? - let ap = AccessPath::from(module_id); - Ok(self.get(ap).map_err(|e| e.finish(Location::Undefined))?) + fn get_module(&self, module_id: &ModuleId) -> Result, Error> { + let access_path = AccessPath::from(module_id); + Ok(self + .executor_view + .get_module_bytes(&StateKey::access_path(access_path)) + .map_err(|_| { + PartialVMError::new(StatusCode::STORAGE_ERROR).finish(Location::Undefined) + })?) } } -impl<'a, S: StateView> TableResolver for StorageAdapter<'a, S> { +impl<'e, E: ExecutorView> TableResolver for StorageAdapter<'e, E> { fn resolve_table_entry( &self, handle: &TableHandle, key: &[u8], - ) -> Result>, Error> { - self.get_state_value_bytes(&StateKey::table_item((*handle).into(), key.to_vec())) + ) -> Result, Error> { + self.executor_view + .get_resource_bytes(&StateKey::table_item((*handle).into(), key.to_vec()), None) } } -impl<'a, S: StateView> AggregatorResolver for StorageAdapter<'a, S> { - fn resolve_aggregator_value( +impl<'e, E: ExecutorView> TAggregatorView for StorageAdapter<'e, E> { + type IdentifierV1 = StateKey; + type IdentifierV2 = AggregatorID; + + fn get_aggregator_v1_state_value( &self, - id: &AggregatorID, - _mode: AggregatorReadMode, - ) -> Result { - let AggregatorID { handle, key } = id; - let state_key = StateKey::table_item(*handle, key.0.to_vec()); - match self.get_state_value_u128(&state_key)? { - Some(value) => Ok(value), - None => { - anyhow::bail!("Could not find the value of the aggregator") - }, - } + id: &Self::IdentifierV1, + mode: AggregatorReadMode, + ) -> anyhow::Result> { + self.executor_view.get_aggregator_v1_state_value(id, mode) } +} - fn generate_aggregator_id(&self) -> AggregatorID { - unimplemented!("Aggregator id generation will be implemented for V2 aggregators.") +impl<'e, E: ExecutorView> ConfigStorage for StorageAdapter<'e, E> { + fn fetch_config(&self, access_path: AccessPath) -> Option { + self.executor_view + .get_resource_bytes(&StateKey::access_path(access_path), None) + .ok()? } } -impl<'a, S: StateView> ConfigStorage for StorageAdapter<'a, S> { - fn fetch_config(&self, access_path: AccessPath) -> Option> { - self.get(access_path).ok()? - } +/// Converts `StateView` into `AptosMoveResolver`. +pub trait AsMoveResolver { + fn as_move_resolver(&self) -> StorageAdapter>; } -impl<'a, S: StateView> StateStorageUsageResolver for StorageAdapter<'a, S> { - fn get_state_storage_usage(&self) -> Result { - self.state_store.get_usage() +impl AsMoveResolver for S { + fn as_move_resolver(&self) -> StorageAdapter> { + StorageAdapter::from_owned(ExecutorViewBase::new(self)) } } -pub trait AsMoveResolver { - fn as_move_resolver(&self) -> StorageAdapter; -} +impl<'e, E: ExecutorView> StateStorageView for StorageAdapter<'e, E> { + fn id(&self) -> StateViewId { + self.executor_view.id() + } -impl AsMoveResolver for S { - fn as_move_resolver(&self) -> StorageAdapter { - StorageAdapter::new(self) + fn get_usage(&self) -> anyhow::Result { + self.executor_view.get_usage() } } -impl<'a, S: StateView> StateValueMetadataResolver for StorageAdapter<'a, S> { - fn get_state_value_metadata( +impl<'e, E: ExecutorView> StateValueMetadataResolver for StorageAdapter<'e, E> { + fn get_module_state_value_metadata( &self, state_key: &StateKey, - ) -> anyhow::Result>> { - let maybe_state_value = self.state_store.get_state_value(state_key)?; - Ok(maybe_state_value.map(StateValue::into_metadata)) + ) -> anyhow::Result> { + self.executor_view + .get_module_state_value_metadata(state_key) } -} -// We need to implement StateView for adapter because: -// 1. When processing write set payload, storage is accessed -// directly. -// 2. When stacking Storage adapters on top of each other, e.g. -// in epilogue. -impl<'a, S: StateView> TStateView for StorageAdapter<'a, S> { - type Key = StateKey; + fn get_resource_state_value_metadata( + &self, + state_key: &StateKey, + ) -> anyhow::Result> { + self.executor_view + .get_resource_state_value_metadata(state_key) + } - fn get_state_value(&self, state_key: &Self::Key) -> anyhow::Result> { - self.state_store.get_state_value(state_key) + fn get_resource_group_state_value_metadata( + &self, + _state_key: &StateKey, + ) -> anyhow::Result> { + // TODO: forward to self.executor_view. + unimplemented!("Resource group metadata handling not yet implemented"); } +} - fn get_usage(&self) -> anyhow::Result { - self.state_store.get_usage() +// Allows to extract the view from `StorageAdapter`. +impl<'e, E: ExecutorView> AsExecutorView for StorageAdapter<'e, E> { + fn as_executor_view(&self) -> &dyn ExecutorView { + self.executor_view.deref() } } #[cfg(test)] mod tests { use super::*; - use claims::{assert_gt, assert_lt, assert_ok_eq, assert_some, assert_some_eq}; + use aptos_state_view::TStateView; + use claims::{assert_gt, assert_lt, assert_some, assert_some_eq}; use move_core_types::{identifier::Identifier, language_storage::TypeTag}; use std::cmp::max; @@ -390,7 +436,7 @@ mod tests { Ok(self .group .get(state_key) - .map(|entry| StateValue::new_legacy(entry.blob.clone()))) + .map(|entry| StateValue::new_legacy(entry.blob.clone().into()))) } fn get_usage(&self) -> anyhow::Result { @@ -428,7 +474,7 @@ mod tests { #[test] fn test_version_flags() { let state_view = MockStateView::new(); - let mut s = StorageAdapter::new(&state_view); + let mut s = state_view.as_move_resolver(); assert!(!s.accurate_byte_count); assert!(!s.group_byte_count_as_sum); @@ -455,7 +501,7 @@ mod tests { #[should_panic] fn test_already_cached() { let state_view = MockStateView::new(); - let s = StorageAdapter::new(&state_view); + let s = state_view.as_move_resolver(); let tag_0 = tag_0(); let tag_1 = tag_1(); @@ -469,7 +515,7 @@ mod tests { #[test] fn test_get_resource_by_tag() { let state_view = MockStateView::new(); - let s = StorageAdapter::new(&state_view); + let s = state_view.as_move_resolver(); let key_0 = StateKey::raw(vec![0]); let key_1 = StateKey::raw(vec![1]); @@ -524,7 +570,7 @@ mod tests { #[test] fn test_size_by_blob_len() { let state_view = MockStateView::new(); - let mut s = StorageAdapter::new(&state_view); + let mut s = state_view.as_move_resolver(); s = s.init(&Features::default(), 10); // Tested separately, but re-confirm for the sanity of this test. assert!(s.accurate_byte_count); @@ -567,7 +613,7 @@ mod tests { #[test] fn test_size_as_sum() { let state_view = MockStateView::new(); - let mut s = StorageAdapter::new(&state_view); + let mut s = state_view.as_move_resolver(); s = s.init(&Features::default(), 20); // Tested separately, but re-confirm for the sanity of this test. assert!(s.accurate_byte_count); @@ -619,32 +665,36 @@ mod tests { ); } - #[test] - fn test_exists_resource_in_group() { - let state_view = MockStateView::new(); - let mut s = StorageAdapter::new(&state_view); - s = s.init(&Features::default(), 10); - // Tested separately, but re-confirm for the sanity of this test. - assert!(s.accurate_byte_count); - assert!(!s.group_byte_count_as_sum); - - let key_1 = StateKey::raw(vec![1]); - let tag_0 = tag_0(); - let tag_1 = tag_1(); - let tag_2 = tag_2(); - - assert_ok_eq!(s.resource_exists_in_group(&key_1, &tag_0), true); - // Release the cache to test contents, and to avoid assert when querying key_1 again. - let cache = s.release_resource_group_cache(); - assert_eq!(cache.len(), 1); - assert_some!(cache.get(&key_1)); - - assert_ok_eq!(s.resource_exists_in_group(&key_1, &tag_1), true); - // Release the cache to test contents, and to avoid assert when querying key_1 again. - let cache = s.release_resource_group_cache(); - assert_eq!(cache.len(), 1); - assert_some!(cache.get(&key_1)); - - assert_ok_eq!(s.resource_exists_in_group(&key_1, &tag_2), false); - } + // TODO: bring back with proper implementation of resource exists in a group. Storage adapter + // needs this because resource group change-set preparation will check existence. + // #[test] + // fn test_exists_resource_in_group() { + // let state_view = MockStateView::new(); + // let mut s = state_view.as_move_resolver(); + // s = s.init(&Features::default(), 10); + // // Tested separately, but re-confirm for the sanity of this test. + // assert!(s.accurate_byte_count); + // assert!(!s.group_byte_count_as_sum); + + // let key_1 = StateKey::raw(vec![1]); + // let tag_0 = tag_0(); + // let tag_1 = tag_1(); + // let tag_2 = tag_2(); + + // // TODO: test when we implement the group methods. + // assert_ok_eq!(s.resource_exists_in_group(&key_1, &tag_0), true); + + // // Release the cache to test contents, and to avoid assert when querying key_1 again. + // let cache = s.release_resource_group_cache(); + // assert_eq!(cache.len(), 1); + // assert_some!(cache.get(&key_1)); + + // assert_ok_eq!(s.resource_exists_in_group(&key_1, &tag_1), true); + // // Release the cache to test contents, and to avoid assert when querying key_1 again. + // let cache = s.release_resource_group_cache(); + // assert_eq!(cache.len(), 1); + // assert_some!(cache.get(&key_1)); + + // assert_ok_eq!(s.resource_exists_in_group(&key_1, &tag_2), false); + // } } diff --git a/aptos-move/aptos-vm/src/lib.rs b/aptos-move/aptos-vm/src/lib.rs index eae0de371b3aa..39642d6811143 100644 --- a/aptos-move/aptos-vm/src/lib.rs +++ b/aptos-move/aptos-vm/src/lib.rs @@ -114,6 +114,7 @@ mod errors; pub mod move_vm_ext; pub mod natives; pub mod sharded_block_executor; +pub mod storage_adapter; pub mod system_module_names; pub mod testing; pub mod transaction_metadata; diff --git a/aptos-move/aptos-vm/src/move_vm_ext/mod.rs b/aptos-move/aptos-vm/src/move_vm_ext/mod.rs index 40b8216d98f28..09de4e75608fa 100644 --- a/aptos-move/aptos-vm/src/move_vm_ext/mod.rs +++ b/aptos-move/aptos-vm/src/move_vm_ext/mod.rs @@ -10,7 +10,7 @@ mod vm; pub(crate) mod write_op_converter; pub use crate::move_vm_ext::{ - resolver::{AptosMoveResolver, MoveResolverExt}, + resolver::{AptosMoveResolver, AsExecutorView}, respawned_session::RespawnedSession, session::{SessionExt, SessionId}, vm::{get_max_binary_format_version, verifier_config, MoveVmExt}, diff --git a/aptos-move/aptos-vm/src/move_vm_ext/resolver.rs b/aptos-move/aptos-vm/src/move_vm_ext/resolver.rs index fef582adef9a2..7626fd6ab3d73 100644 --- a/aptos-move/aptos-vm/src/move_vm_ext/resolver.rs +++ b/aptos-move/aptos-vm/src/move_vm_ext/resolver.rs @@ -2,28 +2,28 @@ // SPDX-License-Identifier: Apache-2.0 use aptos_aggregator::resolver::AggregatorResolver; -use aptos_framework::natives::state_storage::StateStorageUsageResolver; -use aptos_state_view::StateView; use aptos_table_natives::TableResolver; use aptos_types::{on_chain_config::ConfigStorage, state_store::state_key::StateKey}; -use aptos_vm_types::resolver::{ResourceGroupResolver, StateValueMetadataResolver}; +use aptos_vm_types::resolver::{ExecutorView, StateStorageView, StateValueMetadataResolver}; +use bytes::Bytes; use move_core_types::{language_storage::StructTag, resolver::MoveResolver}; use std::collections::{BTreeMap, HashMap}; /// A general resolver used by AptosVM. Allows to implement custom hooks on /// top of storage, e.g. get resources from resource groups, etc. +/// MoveResolver implements ResourceResolver and ModuleResolver pub trait AptosMoveResolver: - MoveResolver - + AggregatorResolver + AggregatorResolver + + ConfigStorage + + MoveResolver + TableResolver - + StateStorageUsageResolver + StateValueMetadataResolver - + ConfigStorage - + ResourceGroupResolver + + StateStorageView + + AsExecutorView { - fn release_resource_group_cache(&self) -> HashMap>>; + fn release_resource_group_cache(&self) -> HashMap>; } -// TODO: Remove dependency on StateView. -pub trait MoveResolverExt: AptosMoveResolver + StateView {} -impl MoveResolverExt for T {} +pub trait AsExecutorView { + fn as_executor_view(&self) -> &dyn ExecutorView; +} diff --git a/aptos-move/aptos-vm/src/move_vm_ext/respawned_session.rs b/aptos-move/aptos-vm/src/move_vm_ext/respawned_session.rs index 6c06ef6a94895..502ce3c60c444 100644 --- a/aptos-move/aptos-vm/src/move_vm_ext/respawned_session.rs +++ b/aptos-move/aptos-vm/src/move_vm_ext/respawned_session.rs @@ -2,21 +2,31 @@ // SPDX-License-Identifier: Apache-2.0 use crate::{ - aptos_vm_impl::AptosVMImpl, data_cache::StorageAdapter, - move_vm_ext::{SessionExt, SessionId}, + move_vm_ext::{AptosMoveResolver, SessionExt, SessionId}, + AptosVM, +}; +use aptos_aggregator::{ + aggregator_extension::AggregatorID, + resolver::{AggregatorReadMode, TAggregatorView}, }; -use anyhow::{bail, Result}; use aptos_gas_algebra::Fee; -use aptos_state_view::{StateView, StateViewId, TStateView}; +use aptos_state_view::StateViewId; use aptos_types::{ state_store::{ state_key::StateKey, state_storage_usage::StateStorageUsage, state_value::StateValue, }, write_set::TransactionWrite, }; -use aptos_vm_types::{change_set::VMChangeSet, storage::ChangeSetConfigs}; -use move_core_types::vm_status::{err_msg, StatusCode, VMStatus}; +use aptos_vm_types::{ + change_set::VMChangeSet, + resolver::{ExecutorView, StateStorageView, TModuleView, TResourceView}, + storage::ChangeSetConfigs, +}; +use move_core_types::{ + value::MoveTypeLayout, + vm_status::{err_msg, StatusCode, VMStatus}, +}; /// We finish the session after the user transaction is done running to get the change set and /// charge gas and storage fee based on it before running storage refunds and the transaction @@ -24,10 +34,10 @@ use move_core_types::vm_status::{err_msg, StatusCode, VMStatus}; /// the base state view, and this struct implements that. #[ouroboros::self_referencing] pub struct RespawnedSession<'r, 'l> { - state_view: ChangeSetStateView<'r>, - #[borrows(state_view)] + executor_view: ExecutorViewWithChangeSet<'r>, + #[borrows(executor_view)] #[covariant] - resolver: StorageAdapter<'this, ChangeSetStateView<'r>>, + resolver: StorageAdapter<'this, ExecutorViewWithChangeSet<'r>>, #[borrows(resolver)] #[not_covariant] session: Option>, @@ -36,24 +46,19 @@ pub struct RespawnedSession<'r, 'l> { impl<'r, 'l> RespawnedSession<'r, 'l> { pub fn spawn( - vm: &'l AptosVMImpl, + vm: &'l AptosVM, session_id: SessionId, - base_state_view: &'r dyn StateView, + base: &'r dyn AptosMoveResolver, previous_session_change_set: VMChangeSet, storage_refund: Fee, ) -> Result { - let state_view = ChangeSetStateView::new(base_state_view, previous_session_change_set)?; + let executor_view = + ExecutorViewWithChangeSet::new(base.as_executor_view(), previous_session_change_set); Ok(RespawnedSessionBuilder { - state_view, - resolver_builder: |state_view| { - StorageAdapter::new_with_cached_config( - state_view, - vm.get_gas_feature_version(), - vm.get_features(), - ) - }, - session_builder: |resolver| Some(vm.new_session(resolver, session_id)), + executor_view, + resolver_builder: |executor_view| vm.as_move_resolver(executor_view), + session_builder: |resolver| Some(vm.0.new_session(resolver, session_id)), storage_refund, } .build()) @@ -70,7 +75,20 @@ impl<'r, 'l> RespawnedSession<'r, 'l> { let additional_change_set = self.with_session_mut(|session| { session.take().unwrap().finish(&mut (), change_set_configs) })?; - let mut change_set = self.into_heads().state_view.change_set; + if additional_change_set.has_creation() { + // After respawning, for example, in the epilogue, there shouldn't be new slots + // created, otherwise there's a potential vulnerability like this: + // 1. slot created by the user + // 2. another user transaction deletes the slot and claims the refund + // 3. in the epilogue the same slot gets recreated, and the final write set will have + // a ModifyWithMetadata carrying the original metadata + // 4. user keeps doing the same and repeatedly claim refund out of the slot. + return Err(VMStatus::error( + StatusCode::UNKNOWN_INVARIANT_VIOLATION_ERROR, + err_msg("Unexpected storage allocation after respawning session."), + )); + } + let mut change_set = self.into_heads().executor_view.change_set; change_set .squash_additional_change_set(additional_change_set, change_set_configs) .map_err(|_err| { @@ -87,53 +105,81 @@ impl<'r, 'l> RespawnedSession<'r, 'l> { } } -/// A state view as if a change set is applied on top of the base state view. -struct ChangeSetStateView<'r> { - base: &'r dyn StateView, +/// Adapter to allow resolving the calls to `ExecutorView` via change set. +pub struct ExecutorViewWithChangeSet<'r> { + base: &'r dyn ExecutorView, change_set: VMChangeSet, } -impl<'r> ChangeSetStateView<'r> { - pub fn new(base: &'r dyn StateView, change_set: VMChangeSet) -> Result { - Ok(Self { base, change_set }) +impl<'r> ExecutorViewWithChangeSet<'r> { + pub(crate) fn new(base: &'r dyn ExecutorView, change_set: VMChangeSet) -> Self { + Self { base, change_set } + } +} + +impl<'r> TAggregatorView for ExecutorViewWithChangeSet<'r> { + type IdentifierV1 = StateKey; + type IdentifierV2 = AggregatorID; + + fn get_aggregator_v1_state_value( + &self, + id: &Self::IdentifierV1, + mode: AggregatorReadMode, + ) -> anyhow::Result> { + match self.change_set.aggregator_v1_delta_set().get(id) { + Some(delta_op) => Ok(self + .base + .try_convert_aggregator_v1_delta_into_write_op(id, delta_op, mode)? + .as_state_value()), + None => match self.change_set.aggregator_v1_write_set().get(id) { + Some(write_op) => Ok(write_op.as_state_value()), + None => self.base.get_aggregator_v1_state_value(id, mode), + }, + } } } -impl<'r> TStateView for ChangeSetStateView<'r> { +impl<'r> TResourceView for ExecutorViewWithChangeSet<'r> { type Key = StateKey; + type Layout = MoveTypeLayout; - fn id(&self) -> StateViewId { - self.base.id() + fn get_resource_state_value( + &self, + state_key: &Self::Key, + maybe_layout: Option<&Self::Layout>, + ) -> anyhow::Result> { + match self.change_set.resource_write_set().get(state_key) { + Some(write_op) => Ok(write_op.as_state_value()), + None => self.base.get_resource_state_value(state_key, maybe_layout), + } } +} - fn get_state_value(&self, state_key: &Self::Key) -> Result> { - // TODO: `get_state_value` should differentiate between different write types. - match self.change_set.aggregator_v1_delta_set().get(state_key) { - Some(delta_op) => Ok(delta_op - .try_into_write_op(self.base, state_key)? - .as_state_value()), - None => { - let cached_value = self - .change_set - .write_set_iter() - .find(|(k, _)| *k == state_key) - .map(|(_, v)| v); - match cached_value { - Some(write_op) => Ok(write_op.as_state_value()), - None => self.base.get_state_value(state_key), - } - }, +impl<'r> TModuleView for ExecutorViewWithChangeSet<'r> { + type Key = StateKey; + + fn get_module_state_value(&self, state_key: &Self::Key) -> anyhow::Result> { + match self.change_set.module_write_set().get(state_key) { + Some(write_op) => Ok(write_op.as_state_value()), + None => self.base.get_module_state_value(state_key), } } +} - fn get_usage(&self) -> Result { - bail!("Unexpected access to get_usage()") +impl<'r> StateStorageView for ExecutorViewWithChangeSet<'r> { + fn id(&self) -> StateViewId { + self.base.id() + } + + fn get_usage(&self) -> anyhow::Result { + anyhow::bail!("Unexpected access to get_usage()") } } #[cfg(test)] mod test { use super::*; + use crate::storage_adapter::AsExecutorView; use aptos_aggregator::delta_change_set::{delta_add, serialize}; use aptos_language_e2e_tests::data_store::FakeDataStore; use aptos_types::write_set::WriteOp; @@ -154,25 +200,35 @@ mod test { } fn write(v: u128) -> WriteOp { - WriteOp::Modification(serialize(&v)) + WriteOp::Modification(serialize(&v).into()) + } + + fn read_resource(view: &ExecutorViewWithChangeSet, s: impl ToString) -> u128 { + bcs::from_bytes(&view.get_resource_bytes(&key(s), None).unwrap().unwrap()).unwrap() } - fn read(view: &ChangeSetStateView, s: impl ToString) -> u128 { - view.get_state_value_u128(&key(s)).unwrap().unwrap() + fn read_module(view: &ExecutorViewWithChangeSet, s: impl ToString) -> u128 { + bcs::from_bytes(&view.get_module_bytes(&key(s)).unwrap().unwrap()).unwrap() + } + + fn read_aggregator(view: &ExecutorViewWithChangeSet, s: impl ToString) -> u128 { + view.get_aggregator_v1_value(&key(s), AggregatorReadMode::Precise) + .unwrap() + .unwrap() } #[test] fn test_change_set_state_view() { - let mut base_view = FakeDataStore::default(); - base_view.set_legacy(key("module_base"), serialize(&10)); - base_view.set_legacy(key("module_both"), serialize(&20)); + let mut state_view = FakeDataStore::default(); + state_view.set_legacy(key("module_base"), serialize(&10)); + state_view.set_legacy(key("module_both"), serialize(&20)); - base_view.set_legacy(key("resource_base"), serialize(&30)); - base_view.set_legacy(key("resource_both"), serialize(&40)); + state_view.set_legacy(key("resource_base"), serialize(&30)); + state_view.set_legacy(key("resource_both"), serialize(&40)); - base_view.set_legacy(key("aggregator_base"), serialize(&50)); - base_view.set_legacy(key("aggregator_both"), serialize(&60)); - base_view.set_legacy(key("aggregator_delta_set"), serialize(&70)); + state_view.set_legacy(key("aggregator_base"), serialize(&50)); + state_view.set_legacy(key("aggregator_both"), serialize(&60)); + state_view.set_legacy(key("aggregator_delta_set"), serialize(&70)); let resource_write_set = HashMap::from([ (key("resource_both"), write(80)), @@ -201,19 +257,21 @@ mod test { &NoOpChangeSetChecker, ) .unwrap(); - let view = ChangeSetStateView::new(&base_view, change_set).unwrap(); - assert_eq!(read(&view, "module_base"), 10); - assert_eq!(read(&view, "module_both"), 100); - assert_eq!(read(&view, "module_write_set"), 110); + let executor_view = state_view.as_executor_view(); + let view = ExecutorViewWithChangeSet::new(&executor_view, change_set); + + assert_eq!(read_module(&view, "module_base"), 10); + assert_eq!(read_module(&view, "module_both"), 100); + assert_eq!(read_module(&view, "module_write_set"), 110); - assert_eq!(read(&view, "resource_base"), 30); - assert_eq!(read(&view, "resource_both"), 80); - assert_eq!(read(&view, "resource_write_set"), 90); + assert_eq!(read_resource(&view, "resource_base"), 30); + assert_eq!(read_resource(&view, "resource_both"), 80); + assert_eq!(read_resource(&view, "resource_write_set"), 90); - assert_eq!(read(&view, "aggregator_base"), 50); - assert_eq!(read(&view, "aggregator_both"), 120); - assert_eq!(read(&view, "aggregator_write_set"), 130); - assert_eq!(read(&view, "aggregator_delta_set"), 71); + assert_eq!(read_aggregator(&view, "aggregator_base"), 50); + assert_eq!(read_aggregator(&view, "aggregator_both"), 120); + assert_eq!(read_aggregator(&view, "aggregator_write_set"), 130); + assert_eq!(read_aggregator(&view, "aggregator_delta_set"), 71); } } diff --git a/aptos-move/aptos-vm/src/move_vm_ext/session.rs b/aptos-move/aptos-vm/src/move_vm_ext/session.rs index e4a171d5a6dd6..765339c75dc7b 100644 --- a/aptos-move/aptos-vm/src/move_vm_ext/session.rs +++ b/aptos-move/aptos-vm/src/move_vm_ext/session.rs @@ -7,7 +7,6 @@ use crate::{ move_vm_ext::{write_op_converter::WriteOpConverter, AptosMoveResolver}, transaction_metadata::TransactionMetadata, }; -use aptos_aggregator::aggregator_extension::AggregatorID; use aptos_crypto::{hash::CryptoHash, HashValue}; use aptos_crypto_derive::{BCSCryptoHash, CryptoHasher}; use aptos_framework::natives::{ @@ -21,6 +20,7 @@ use aptos_types::{ state_store::state_key::StateKey, transaction::SignatureCheckedTransaction, }; use aptos_vm_types::{change_set::VMChangeSet, storage::ChangeSetConfigs}; +use bytes::Bytes; use move_binary_format::errors::{Location, PartialVMError, VMResult}; use move_core_types::{ account_address::AccountAddress, @@ -210,7 +210,7 @@ impl<'r, 'l> SessionExt<'r, 'l> { remote: &dyn AptosMoveResolver, change_set: MoveChangeSet, ap_cache: &mut C, - ) -> VMResult<(MoveChangeSet, HashMap>>)> { + ) -> VMResult<(MoveChangeSet, HashMap>)> { // The use of this implies that we could theoretically call unwrap with no consequences, // but using unwrap means the code panics if someone can come up with an attack. let common_error = || { @@ -280,9 +280,17 @@ impl<'r, 'l> SessionExt<'r, 'l> { let op = if source_data.is_empty() { MoveStorageOp::Delete } else if create { - MoveStorageOp::New(bcs::to_bytes(&source_data).map_err(|_| common_error())?) + MoveStorageOp::New( + bcs::to_bytes(&source_data) + .map_err(|_| common_error())? + .into(), + ) } else { - MoveStorageOp::Modify(bcs::to_bytes(&source_data).map_err(|_| common_error())?) + MoveStorageOp::Modify( + bcs::to_bytes(&source_data) + .map_err(|_| common_error())? + .into(), + ) }; resource_group_change_set.insert(state_key, op); } @@ -294,7 +302,7 @@ impl<'r, 'l> SessionExt<'r, 'l> { pub(crate) fn convert_change_set( woc: &WriteOpConverter, change_set: MoveChangeSet, - resource_group_change_set: HashMap>>, + resource_group_change_set: HashMap>, events: Vec, table_change_set: TableChangeSet, aggregator_change_set: AggregatorChangeSet, @@ -310,7 +318,7 @@ impl<'r, 'l> SessionExt<'r, 'l> { let (modules, resources) = account_changeset.into_inner(); for (struct_tag, blob_op) in resources { let state_key = StateKey::access_path(ap_cache.get_resource_path(addr, struct_tag)); - let op = woc.convert( + let op = woc.convert_resource( &state_key, blob_op, configs.legacy_resource_creation_as_modification(), @@ -322,39 +330,37 @@ impl<'r, 'l> SessionExt<'r, 'l> { for (name, blob_op) in modules { let state_key = StateKey::access_path(ap_cache.get_module_path(ModuleId::new(addr, name))); - let op = woc.convert(&state_key, blob_op, false)?; + let op = woc.convert_module(&state_key, blob_op, false)?; module_write_set.insert(state_key, op); } } for (state_key, blob_op) in resource_group_change_set { - let op = woc.convert(&state_key, blob_op, false)?; + let op = woc.convert_resource(&state_key, blob_op, false)?; resource_write_set.insert(state_key, op); } for (handle, change) in table_change_set.changes { for (key, value_op) in change.entries { let state_key = StateKey::table_item(handle.into(), key); - let op = woc.convert(&state_key, value_op, false)?; + let op = woc.convert_resource(&state_key, value_op, false)?; resource_write_set.insert(state_key, op); } } for (id, change) in aggregator_change_set.changes { - let AggregatorID { handle, key } = id; - let key_bytes = key.0.to_vec(); - let state_key = StateKey::table_item(handle, key_bytes); - + let state_key = id.into_state_key(); match change { AggregatorChange::Write(value) => { - let write_op = woc.convert_aggregator_mod(&state_key, value)?; + let write_op = woc.convert_aggregator_modification(&state_key, value)?; aggregator_write_set.insert(state_key, write_op); }, AggregatorChange::Merge(delta_op) => { aggregator_delta_set.insert(state_key, delta_op); }, AggregatorChange::Delete => { - let write_op = woc.convert(&state_key, MoveStorageOp::Delete, false)?; + let write_op = + woc.convert_aggregator(&state_key, MoveStorageOp::Delete, false)?; aggregator_write_set.insert(state_key, write_op); }, } diff --git a/aptos-move/aptos-vm/src/move_vm_ext/write_op_converter.rs b/aptos-move/aptos-vm/src/move_vm_ext/write_op_converter.rs index f6ac9008b5c3c..9c811e1f9a9cc 100644 --- a/aptos-move/aptos-vm/src/move_vm_ext/write_op_converter.rs +++ b/aptos-move/aptos-vm/src/move_vm_ext/write_op_converter.rs @@ -5,9 +5,13 @@ use crate::move_vm_ext::AptosMoveResolver; use aptos_aggregator::delta_change_set::serialize; use aptos_types::{ on_chain_config::{CurrentTimeMicroseconds, OnChainConfig}, - state_store::{state_key::StateKey, state_value::StateValueMetadata}, + state_store::{ + state_key::StateKey, + state_value::{StateValueMetadata, StateValueMetadataKind}, + }, write_set::WriteOp, }; +use bytes::Bytes; use move_core_types::{ effects::Op as MoveStorageOp, vm_status::{err_msg, StatusCode, VMStatus}, @@ -18,7 +22,30 @@ pub(crate) struct WriteOpConverter<'r> { new_slot_metadata: Option, } +macro_rules! convert_impl { + ($convert_func_name:ident, $get_metadata_callback:ident) => { + pub(crate) fn $convert_func_name( + &self, + state_key: &StateKey, + move_storage_op: MoveStorageOp, + legacy_creation_as_modification: bool, + ) -> Result { + self.convert( + self.remote.$get_metadata_callback(state_key), + move_storage_op, + legacy_creation_as_modification, + ) + } + }; +} + impl<'r> WriteOpConverter<'r> { + convert_impl!(convert_resource, get_resource_state_value_metadata); + + convert_impl!(convert_module, get_module_state_value_metadata); + + convert_impl!(convert_aggregator, get_aggregator_v1_state_value_metadata); + pub(crate) fn new( remote: &'r dyn AptosMoveResolver, is_storage_slot_metadata_enabled: bool, @@ -38,24 +65,21 @@ impl<'r> WriteOpConverter<'r> { } } - pub(crate) fn convert( + fn convert( &self, - state_key: &StateKey, - move_storage_op: MoveStorageOp>, + state_value_metadata_result: anyhow::Result>, + move_storage_op: MoveStorageOp, legacy_creation_as_modification: bool, ) -> Result { use MoveStorageOp::*; use WriteOp::*; - let maybe_existing_metadata = - self.remote - .get_state_value_metadata(state_key) - .map_err(|_| { - VMStatus::error( - StatusCode::STORAGE_ERROR, - err_msg("Storage read failed when converting change set."), - ) - })?; + let maybe_existing_metadata = state_value_metadata_result.map_err(|_| { + VMStatus::error( + StatusCode::STORAGE_ERROR, + err_msg("Storage read failed when converting change set."), + ) + })?; let write_op = match (maybe_existing_metadata, move_storage_op) { (None, Modify(_) | Delete) => { @@ -103,16 +127,16 @@ impl<'r> WriteOpConverter<'r> { Ok(write_op) } - pub(crate) fn convert_aggregator_mod( + pub(crate) fn convert_aggregator_modification( &self, state_key: &StateKey, value: u128, ) -> Result { let maybe_existing_metadata = self .remote - .get_state_value_metadata(state_key) + .get_aggregator_v1_state_value_metadata(state_key) .map_err(|_| VMStatus::error(StatusCode::STORAGE_ERROR, None))?; - let data = serialize(&value); + let data = serialize(&value).into(); let op = match maybe_existing_metadata { None => { diff --git a/aptos-move/aptos-vm/src/natives.rs b/aptos-move/aptos-vm/src/natives.rs index ec11f85657816..ba6e24545edbc 100644 --- a/aptos-move/aptos-vm/src/natives.rs +++ b/aptos-move/aptos-vm/src/natives.rs @@ -5,23 +5,27 @@ #[cfg(feature = "testing")] use anyhow::Error; #[cfg(feature = "testing")] -use aptos_aggregator::resolver::AggregatorReadMode; -#[cfg(feature = "testing")] -use aptos_aggregator::{aggregator_extension::AggregatorID, resolver::AggregatorResolver}; -#[cfg(feature = "testing")] -use aptos_framework::natives::cryptography::algebra::AlgebraContext; +use aptos_aggregator::{ + aggregator_extension::AggregatorID, + resolver::{AggregatorReadMode, TAggregatorView}, +}; #[cfg(feature = "testing")] -use aptos_framework::natives::event::NativeEventContext; +use aptos_framework::natives::{cryptography::algebra::AlgebraContext, event::NativeEventContext}; use aptos_gas_schedule::{MiscGasParameters, NativeGasParameters, LATEST_GAS_FEATURE_VERSION}; use aptos_native_interface::SafeNativeBuilder; #[cfg(feature = "testing")] use aptos_table_natives::{TableHandle, TableResolver}; -#[cfg(feature = "testing")] -use aptos_types::chain_id::ChainId; use aptos_types::{ account_config::CORE_CODE_ADDRESS, on_chain_config::{Features, TimedFeatures}, }; +#[cfg(feature = "testing")] +use aptos_types::{ + chain_id::ChainId, + state_store::{state_key::StateKey, state_value::StateValue}, +}; +#[cfg(feature = "testing")] +use bytes::Bytes; use move_vm_runtime::native_functions::NativeFunctionTable; #[cfg(feature = "testing")] use { @@ -38,19 +42,16 @@ use { struct AptosBlankStorage; #[cfg(feature = "testing")] -impl AggregatorResolver for AptosBlankStorage { - fn resolve_aggregator_value( +impl TAggregatorView for AptosBlankStorage { + type IdentifierV1 = StateKey; + type IdentifierV2 = AggregatorID; + + fn get_aggregator_v1_state_value( &self, - _id: &AggregatorID, + _id: &Self::IdentifierV1, _mode: AggregatorReadMode, - ) -> Result { - // All Move tests have aggregator in Data state, and so the resolver should - // not be called. - unreachable!("Aggregator cannot be resolved for blank storage") - } - - fn generate_aggregator_id(&self) -> AggregatorID { - unimplemented!("Aggregator id generation will be implemented for V2 aggregators.") + ) -> anyhow::Result> { + Ok(None) } } @@ -60,7 +61,7 @@ impl TableResolver for AptosBlankStorage { &self, _handle: &TableHandle, _key: &[u8], - ) -> Result>, Error> { + ) -> Result, Error> { Ok(None) } } diff --git a/aptos-move/aptos-vm/src/sharded_block_executor/aggr_overridden_state_view.rs b/aptos-move/aptos-vm/src/sharded_block_executor/aggr_overridden_state_view.rs new file mode 100644 index 0000000000000..246cee168efed --- /dev/null +++ b/aptos-move/aptos-vm/src/sharded_block_executor/aggr_overridden_state_view.rs @@ -0,0 +1,50 @@ +// Copyright © Aptos Foundation +// SPDX-License-Identifier: Apache-2.0 + +use anyhow::Result; +use aptos_state_view::{StateView, TStateView}; +use aptos_types::{ + state_store::{ + state_key::StateKey, state_storage_usage::StateStorageUsage, state_value::StateValue, + }, + write_set::TOTAL_SUPPLY_STATE_KEY, +}; + +pub const TOTAL_SUPPLY_AGGR_BASE_VAL: u128 = u128::MAX >> 1; +#[derive(Clone)] +pub struct AggregatorOverriddenStateView<'a, S> { + base_view: &'a S, + total_supply_aggr_base_val: u128, +} + +impl<'a, S: StateView + Sync + Send> AggregatorOverriddenStateView<'a, S> { + pub fn new(base_view: &'a S, total_supply_aggr_base_val: u128) -> Self { + Self { + base_view, + total_supply_aggr_base_val, + } + } + + fn total_supply_base_view_override(&self) -> Result> { + Ok(Some(StateValue::new_legacy( + bcs::to_bytes(&self.total_supply_aggr_base_val) + .unwrap() + .into(), + ))) + } +} + +impl<'a, S: StateView + Sync + Send> TStateView for AggregatorOverriddenStateView<'a, S> { + type Key = StateKey; + + fn get_state_value(&self, state_key: &StateKey) -> Result> { + if *state_key == *TOTAL_SUPPLY_STATE_KEY { + return self.total_supply_base_view_override(); + } + self.base_view.get_state_value(state_key) + } + + fn get_usage(&self) -> Result { + self.base_view.get_usage() + } +} diff --git a/aptos-move/aptos-vm/src/sharded_block_executor/cross_shard_state_view.rs b/aptos-move/aptos-vm/src/sharded_block_executor/cross_shard_state_view.rs index c2d25a4618d7b..4bfbb5e30b0cd 100644 --- a/aptos-move/aptos-vm/src/sharded_block_executor/cross_shard_state_view.rs +++ b/aptos-move/aptos-vm/src/sharded_block_executor/cross_shard_state_view.rs @@ -1,6 +1,6 @@ // Copyright © Aptos Foundation -// Parts of the project are originally copyright © Meta Platforms, Inc. // SPDX-License-Identifier: Apache-2.0 +use crate::sharded_block_executor::remote_state_value::RemoteStateValue; use anyhow::Result; use aptos_logger::trace; use aptos_state_view::{StateView, TStateView}; @@ -11,59 +11,14 @@ use aptos_types::{ }, transaction::analyzed_transaction::AnalyzedTransaction, }; -use std::{ - collections::{HashMap, HashSet}, - sync::{Arc, Condvar, Mutex}, -}; - -#[derive(Clone)] -enum CrossShardValueStatus { - /// The state value is available as a result of cross shard execution - Ready(Option), - /// We are still waiting for remote shard to push the state value - Waiting, -} - -#[derive(Clone)] -struct CrossShardStateValue { - value_condition: Arc<(Mutex, Condvar)>, -} - -impl CrossShardStateValue { - pub fn waiting() -> Self { - Self { - value_condition: Arc::new((Mutex::new(CrossShardValueStatus::Waiting), Condvar::new())), - } - } - - pub fn set_value(&self, value: Option) { - let (lock, cvar) = &*self.value_condition; - let mut status = lock.lock().unwrap(); - // We only allow setting the value once and it must be in the waiting state - assert!(matches!(*status, CrossShardValueStatus::Waiting)); - *status = CrossShardValueStatus::Ready(value); - cvar.notify_all(); - } - - pub fn get_value(&self) -> Option { - let (lock, cvar) = &*self.value_condition; - let mut status = lock.lock().unwrap(); - while let CrossShardValueStatus::Waiting = *status { - status = cvar.wait(status).unwrap(); - } - match &*status { - CrossShardValueStatus::Ready(value) => value.clone(), - CrossShardValueStatus::Waiting => unreachable!(), - } - } -} +use std::collections::{HashMap, HashSet}; /// A state view for reading cross shard state values. It is backed by a state view /// and a hashmap of cross shard state keys. When a cross shard state value is not /// available in the hashmap, it will be fetched from the underlying base view. #[derive(Clone)] pub struct CrossShardStateView<'a, S> { - cross_shard_data: HashMap, + cross_shard_data: HashMap, base_view: &'a S, } @@ -75,7 +30,7 @@ impl<'a, S: StateView + Sync + Send> CrossShardStateView<'a, S> { cross_shard_keys.len(), ); for key in cross_shard_keys { - cross_shard_data.insert(key, CrossShardStateValue::waiting()); + cross_shard_data.insert(key, RemoteStateValue::waiting()); } Self { cross_shard_data, @@ -87,12 +42,7 @@ impl<'a, S: StateView + Sync + Send> CrossShardStateView<'a, S> { fn waiting_count(&self) -> usize { self.cross_shard_data .values() - .filter(|v| { - matches!( - v.value_condition.0.lock().unwrap().clone(), - CrossShardValueStatus::Waiting - ) - }) + .filter(|v| !v.is_ready()) .count() } diff --git a/aptos-move/aptos-vm/src/sharded_block_executor/global_executor.rs b/aptos-move/aptos-vm/src/sharded_block_executor/global_executor.rs index 3def08e9b65b6..208d596e59f07 100644 --- a/aptos-move/aptos-vm/src/sharded_block_executor/global_executor.rs +++ b/aptos-move/aptos-vm/src/sharded_block_executor/global_executor.rs @@ -60,4 +60,8 @@ impl GlobalExecutor { maybe_block_gas_limit, ) } + + pub fn get_executor_thread_pool(&self) -> Arc { + self.executor_thread_pool.clone() + } } diff --git a/aptos-move/aptos-vm/src/sharded_block_executor/local_executor_shard.rs b/aptos-move/aptos-vm/src/sharded_block_executor/local_executor_shard.rs index 3d352fb0ab19b..113c208586fd8 100644 --- a/aptos-move/aptos-vm/src/sharded_block_executor/local_executor_shard.rs +++ b/aptos-move/aptos-vm/src/sharded_block_executor/local_executor_shard.rs @@ -8,6 +8,7 @@ use crate::sharded_block_executor::{ executor_client::{ExecutorClient, ShardedExecutionOutput}, global_executor::GlobalExecutor, messages::CrossShardMsg, + sharded_aggregator_service, sharded_executor_service::ShardedExecutorService, ExecutorShardCommand, }; @@ -189,13 +190,21 @@ impl ExecutorClient for LocalExecutorCl // global transactions will be blocked for cross shard transaction results. This hopefully will help with // finishing the global transactions faster but we need to evaluate if this causes thread contention. If it // does, then we can simply move this call to the end of the function. - let global_output = self.global_executor.execute_global_txns( + let mut global_output = self.global_executor.execute_global_txns( global_txns, state_view.as_ref(), maybe_block_gas_limit, )?; - let sharded_output = self.get_output_from_shards()?; + let mut sharded_output = self.get_output_from_shards()?; + + sharded_aggregator_service::aggregate_and_update_total_supply( + &mut sharded_output, + &mut global_output, + state_view.as_ref(), + self.global_executor.get_executor_thread_pool(), + ); + Ok(ShardedExecutionOutput::new(sharded_output, global_output)) } } diff --git a/aptos-move/aptos-vm/src/sharded_block_executor/mod.rs b/aptos-move/aptos-vm/src/sharded_block_executor/mod.rs index fcefc254db4bc..d6bd6bbb7ce4c 100644 --- a/aptos-move/aptos-vm/src/sharded_block_executor/mod.rs +++ b/aptos-move/aptos-vm/src/sharded_block_executor/mod.rs @@ -18,6 +18,7 @@ use aptos_types::{ use move_core_types::vm_status::VMStatus; use std::{marker::PhantomData, sync::Arc}; +pub mod aggr_overridden_state_view; pub mod coordinator_client; mod counters; pub mod cross_shard_client; @@ -26,6 +27,8 @@ pub mod executor_client; pub mod global_executor; pub mod local_executor_shard; pub mod messages; +pub mod remote_state_value; +pub mod sharded_aggregator_service; pub mod sharded_executor_service; #[cfg(test)] mod test_utils; diff --git a/aptos-move/aptos-vm/src/sharded_block_executor/remote_state_value.rs b/aptos-move/aptos-vm/src/sharded_block_executor/remote_state_value.rs new file mode 100644 index 0000000000000..e791e7b015bd1 --- /dev/null +++ b/aptos-move/aptos-vm/src/sharded_block_executor/remote_state_value.rs @@ -0,0 +1,54 @@ +// Copyright © Aptos Foundation +// SPDX-License-Identifier: Apache-2.0 + +use aptos_types::state_store::state_value::StateValue; +use std::sync::{Arc, Condvar, Mutex}; + +#[derive(Clone)] +// This struct is used to store the status of a remote state value. It provides semantics for +// blocking on a remote state value to be available locally while it is being asynchronously +// fetched from a remote server. +pub struct RemoteStateValue { + value_condition: Arc<(Mutex, Condvar)>, +} + +impl RemoteStateValue { + pub fn waiting() -> Self { + Self { + value_condition: Arc::new((Mutex::new(RemoteValueStatus::Waiting), Condvar::new())), + } + } + + pub fn set_value(&self, value: Option) { + let (lock, cvar) = &*self.value_condition; + let mut status = lock.lock().unwrap(); + *status = RemoteValueStatus::Ready(value); + cvar.notify_all(); + } + + pub fn get_value(&self) -> Option { + let (lock, cvar) = &*self.value_condition; + let mut status = lock.lock().unwrap(); + while let RemoteValueStatus::Waiting = *status { + status = cvar.wait(status).unwrap(); + } + match &*status { + RemoteValueStatus::Ready(value) => value.clone(), + RemoteValueStatus::Waiting => unreachable!(), + } + } + + pub fn is_ready(&self) -> bool { + let (lock, _cvar) = &*self.value_condition; + let status = lock.lock().unwrap(); + matches!(&*status, RemoteValueStatus::Ready(_)) + } +} + +#[derive(Clone)] +pub enum RemoteValueStatus { + /// The state value is available as a result of cross shard execution + Ready(Option), + /// We are still waiting for remote shard to push the state value + Waiting, +} diff --git a/aptos-move/aptos-vm/src/sharded_block_executor/sharded_aggregator_service.rs b/aptos-move/aptos-vm/src/sharded_block_executor/sharded_aggregator_service.rs new file mode 100644 index 0000000000000..4081e3b14b3c8 --- /dev/null +++ b/aptos-move/aptos-vm/src/sharded_block_executor/sharded_aggregator_service.rs @@ -0,0 +1,254 @@ +// Copyright © Aptos Foundation +// SPDX-License-Identifier: Apache-2.0 + +use crate::sharded_block_executor::aggr_overridden_state_view::TOTAL_SUPPLY_AGGR_BASE_VAL; +use aptos_state_view::StateView; +use aptos_types::{ + state_store::state_key::StateKey, transaction::TransactionOutput, + write_set::TOTAL_SUPPLY_STATE_KEY, +}; +use rayon::prelude::*; +use serde::de::DeserializeOwned; +use std::{ops, sync::Arc}; + +pub fn get_state_value( + state_key: &StateKey, + state_view: &S, +) -> Option { + let value = state_view + .get_state_value_bytes(state_key) + .ok()? + .map(move |value| bcs::from_bytes(&value)); + value.transpose().map_err(anyhow::Error::msg).unwrap() +} + +/// This class ensures that deltas can use all 128 bits without having to let go of the sign bit for +/// cases where the delta is negative. That is, we don't have to use conversions to i128. +/// However, it does not handle overflow and underflow. That is, it will indicate to the caller of +/// the faulty logic with their usage of deltas. +#[derive(Clone, Copy)] +struct DeltaU128 { + delta: u128, + is_positive: bool, +} + +impl DeltaU128 { + pub fn get_delta(minuend: u128, subtrahend: u128) -> Self { + if minuend >= subtrahend { + Self { + delta: minuend - subtrahend, + is_positive: true, + } + } else { + Self { + delta: subtrahend - minuend, + is_positive: false, + } + } + } + + fn add_delta(self, other: u128) -> u128 { + if self.is_positive { + self.delta + other + } else { + other - self.delta + } + } +} + +impl Default for DeltaU128 { + fn default() -> Self { + Self { + delta: 0, + is_positive: true, + } + } +} + +impl ops::Add for DeltaU128 { + type Output = Self; + + fn add(self, rhs: Self) -> Self::Output { + // the deltas are both positive or both negative, we add the deltas and keep the sign + if self.is_positive == rhs.is_positive { + return Self { + delta: self.delta + rhs.delta, + is_positive: self.is_positive, + }; + } + + // the deltas are of opposite signs, we subtract the smaller from the larger and keep the + // sign of the larger + let (pos, neg) = if self.is_positive { + (self.delta, rhs.delta) + } else { + (rhs.delta, self.delta) + }; + + if pos >= neg { + return Self { + delta: pos - neg, + is_positive: true, + }; + } + Self { + delta: neg - pos, + is_positive: false, + } + } +} + +#[test] +fn test_delta_u128() { + assert_eq!(DeltaU128::default().delta, 0); + assert!(DeltaU128::default().is_positive); + + { + // get positive delta + let delta_pos = DeltaU128::get_delta(100, 50); + assert_eq!(delta_pos.delta, 50); + assert!(delta_pos.is_positive); + + // get negative delta + let delta_neg = DeltaU128::get_delta(50, 100); + assert_eq!(delta_neg.delta, 50); + assert!(!delta_neg.is_positive); + } + + { + // test add_delta + let delta1 = DeltaU128 { + delta: 100, + is_positive: true, + }; + assert_eq!(delta1.add_delta(50), 150); + + let delta2 = DeltaU128 { + delta: 50, + is_positive: false, + }; + assert_eq!(delta2.add_delta(50), 0); + } + + { + // test all cases for ops::Add + let delta1 = DeltaU128 { + delta: 100, + is_positive: true, + }; + let delta2 = DeltaU128 { + delta: 50, + is_positive: false, + }; + let delta3 = DeltaU128 { + delta: 100, + is_positive: true, + }; + // checks for [pos > neg]; [pos, pos] + let mut delta_sum = delta1 + delta2 + delta3; + assert_eq!(delta_sum.delta, 150); + assert!(delta_sum.is_positive); + + let delta4 = DeltaU128 { + delta: 500, + is_positive: false, + }; + let delta5 = DeltaU128 { + delta: 200, + is_positive: false, + }; + // checks for [neg > pos]; [neg, neg] + delta_sum = delta_sum + delta4 + delta5; + assert_eq!(delta_sum.delta, 550); + assert!(!delta_sum.is_positive); + } +} + +pub fn aggregate_and_update_total_supply( + sharded_output: &mut Vec>>, + global_output: &mut [TransactionOutput], + state_view: &S, + executor_thread_pool: Arc, +) { + let num_shards = sharded_output.len(); + let num_rounds = sharded_output[0].len(); + + // The first element is 0, which is the delta for shard 0 in round 0. +1 element will contain + // the delta for the global shard + let mut aggr_total_supply_delta = vec![DeltaU128::default(); num_shards * num_rounds + 1]; + + // No need to parallelize this as the runtime is O(num_shards * num_rounds) + // TODO: Get this from the individual shards while getting 'sharded_output' + let mut aggr_ts_idx = 1; + for round in 0..num_rounds { + sharded_output.iter().for_each(|shard_output| { + let mut curr_delta = DeltaU128::default(); + // Though we expect all the txn_outputs to have total_supply, there can be + // exceptions like 'block meta' (first txn in the block) and 'chkpt info' (last txn + // in the block) which may not have total supply. Hence we iterate till we find the + // last txn with total supply. + for txn in shard_output[round].iter().rev() { + if let Some(last_txn_total_supply) = txn.write_set().get_total_supply() { + curr_delta = + DeltaU128::get_delta(last_txn_total_supply, TOTAL_SUPPLY_AGGR_BASE_VAL); + break; + } + } + aggr_total_supply_delta[aggr_ts_idx] = + curr_delta + aggr_total_supply_delta[aggr_ts_idx - 1]; + aggr_ts_idx += 1; + }); + } + + // The txn_outputs contain 'txn_total_supply' with + // 'CrossShardStateViewAggrOverride::total_supply_aggr_base_val' as the base value. + // The actual 'total_supply_base_val' is in the state_view. + // The 'delta' for the shard/round is in aggr_total_supply_delta[round * num_shards + shard_id + 1] + // For every txn_output, we have to compute + // txn_total_supply = txn_total_supply - CrossShardStateViewAggrOverride::total_supply_aggr_base_val + total_supply_base_val + delta + // While 'txn_total_supply' is u128, the intermediate computation can be negative. So we use + // DeltaU128 to handle any intermediate underflow of u128. + let total_supply_base_val: u128 = get_state_value(&TOTAL_SUPPLY_STATE_KEY, state_view).unwrap(); + let base_val_delta = DeltaU128::get_delta(total_supply_base_val, TOTAL_SUPPLY_AGGR_BASE_VAL); + + let aggr_total_supply_delta_ref = &aggr_total_supply_delta; + // Runtime is O(num_txns), hence parallelized at the shard level and at the txns level. + executor_thread_pool.scope(|_| { + sharded_output + .par_iter_mut() + .enumerate() + .for_each(|(shard_id, shard_output)| { + for (round, txn_outputs) in shard_output.iter_mut().enumerate() { + let delta_for_round = + aggr_total_supply_delta_ref[round * num_shards + shard_id] + base_val_delta; + txn_outputs + .par_iter_mut() + .with_min_len(25) + .for_each(|txn_output| { + if let Some(txn_total_supply) = + txn_output.write_set().get_total_supply() + { + txn_output.update_total_supply( + delta_for_round.add_delta(txn_total_supply), + ); + } + }); + } + }); + }); + + let delta_for_global_shard = aggr_total_supply_delta[num_shards * num_rounds] + base_val_delta; + let delta_for_global_shard_ref = &delta_for_global_shard; + executor_thread_pool.scope(|_| { + global_output + .par_iter_mut() + .with_min_len(25) + .for_each(|txn_output| { + if let Some(txn_total_supply) = txn_output.write_set().get_total_supply() { + txn_output.update_total_supply( + delta_for_global_shard_ref.add_delta(txn_total_supply), + ); + } + }); + }); +} diff --git a/aptos-move/aptos-vm/src/sharded_block_executor/sharded_executor_service.rs b/aptos-move/aptos-vm/src/sharded_block_executor/sharded_executor_service.rs index 0356ad5c7dcd9..30d062080db76 100644 --- a/aptos-move/aptos-vm/src/sharded_block_executor/sharded_executor_service.rs +++ b/aptos-move/aptos-vm/src/sharded_block_executor/sharded_executor_service.rs @@ -3,6 +3,7 @@ use crate::{ block_executor::BlockAptosVM, sharded_block_executor::{ + aggr_overridden_state_view::{AggregatorOverriddenStateView, TOTAL_SUPPLY_AGGR_BASE_VAL}, coordinator_client::CoordinatorClient, counters::{SHARDED_BLOCK_EXECUTION_BY_ROUNDS_SECONDS, SHARDED_BLOCK_EXECUTOR_TXN_COUNT}, cross_shard_client::{CrossShardClient, CrossShardCommitReceiver, CrossShardCommitSender}, @@ -44,6 +45,7 @@ impl ShardedExecutorService { rayon::ThreadPoolBuilder::new() // We need two extra threads for the cross-shard commit receiver and the thread // that is blocked on waiting for execute block to finish. + .thread_name(move |i| format!("sharded-executor-shard-{}-{}", shard_id, i)) .num_threads(num_threads + 2) .build() .unwrap(), @@ -106,6 +108,12 @@ impl ShardedExecutorService { let cross_shard_state_view_clone = cross_shard_state_view.clone(); let cross_shard_client_clone = cross_shard_client.clone(); + + let aggr_overridden_state_view = Arc::new(AggregatorOverriddenStateView::new( + cross_shard_state_view.as_ref(), + TOTAL_SUPPLY_AGGR_BASE_VAL, + )); + executor_thread_pool.clone().scope(|s| { s.spawn(move |_| { CrossShardCommitReceiver::start( @@ -121,7 +129,7 @@ impl ShardedExecutorService { .into_iter() .map(|txn| txn.into_txn().into_txn()) .collect(), - cross_shard_state_view.as_ref(), + aggr_overridden_state_view.as_ref(), concurrency_level, maybe_block_gas_limit, cross_shard_commit_sender, diff --git a/aptos-move/aptos-vm/src/sharded_block_executor/test_utils.rs b/aptos-move/aptos-vm/src/sharded_block_executor/test_utils.rs index 1af8aec26eaec..362a14337cfa8 100644 --- a/aptos-move/aptos-vm/src/sharded_block_executor/test_utils.rs +++ b/aptos-move/aptos-vm/src/sharded_block_executor/test_utils.rs @@ -12,7 +12,6 @@ use aptos_language_e2e_tests::{ }; use aptos_types::{ block_executor::partitioner::PartitionedTransactions, - state_store::state_key::StateKeyInner, transaction::{analyzed_transaction::AnalyzedTransaction, Transaction, TransactionOutput}, }; use move_core_types::account_address::AccountAddress; @@ -77,27 +76,13 @@ pub fn compare_txn_outputs( unsharded_txn_output[i].gas_used(), sharded_txn_output[i].gas_used() ); - //assert_eq!(unsharded_txn_output[i].write_set(), sharded_txn_output[i].write_set()); assert_eq!( - unsharded_txn_output[i].events(), - sharded_txn_output[i].events() + unsharded_txn_output[i].write_set(), + sharded_txn_output[i].write_set() ); - // Global supply tracking for coin is not supported in sharded execution yet, so we filter - // out the table item from the write set, which has the global supply. This is a hack until - // we support global supply tracking in sharded execution. - let unsharded_write_set_without_table_item = unsharded_txn_output[i] - .write_set() - .into_iter() - .filter(|(k, _)| matches!(k.inner(), &StateKeyInner::AccessPath(_))) - .collect::>(); - let sharded_write_set_without_table_item = sharded_txn_output[i] - .write_set() - .into_iter() - .filter(|(k, _)| matches!(k.inner(), &StateKeyInner::AccessPath(_))) - .collect::>(); assert_eq!( - unsharded_write_set_without_table_item, - sharded_write_set_without_table_item + unsharded_txn_output[i].events(), + sharded_txn_output[i].events() ); } } @@ -188,7 +173,7 @@ pub fn sharded_block_executor_with_random_transfers(&'s S); + +impl<'s, S: StateView> ExecutorViewBase<'s, S> { + pub(crate) fn new(state_view: &'s S) -> Self { + Self(state_view) + } +} + +pub trait AsExecutorView { + fn as_executor_view(&self) -> ExecutorViewBase; +} + +impl AsExecutorView for S { + fn as_executor_view(&self) -> ExecutorViewBase { + ExecutorViewBase::new(self) + } +} + +impl<'s, S: StateView> TAggregatorView for ExecutorViewBase<'s, S> { + type IdentifierV1 = StateKey; + type IdentifierV2 = AggregatorID; + + fn get_aggregator_v1_state_value( + &self, + state_key: &Self::IdentifierV1, + // Reading from StateView can be in precise mode only. + _mode: AggregatorReadMode, + ) -> anyhow::Result> { + self.0.get_state_value(state_key) + } +} + +impl<'s, S: StateView> TResourceView for ExecutorViewBase<'s, S> { + type Key = StateKey; + type Layout = MoveTypeLayout; + + fn get_resource_state_value( + &self, + state_key: &Self::Key, + _maybe_layout: Option<&Self::Layout>, + ) -> anyhow::Result> { + self.0.get_state_value(state_key) + } +} + +impl<'s, S: StateView> TModuleView for ExecutorViewBase<'s, S> { + type Key = StateKey; + + fn get_module_state_value(&self, state_key: &Self::Key) -> anyhow::Result> { + self.0.get_state_value(state_key) + } +} + +impl<'s, S: StateView> StateStorageView for ExecutorViewBase<'s, S> { + fn id(&self) -> StateViewId { + self.0.id() + } + + fn get_usage(&self) -> anyhow::Result { + self.0.get_usage() + } +} diff --git a/aptos-move/aptos-vm/src/storage_adapter/mod.rs b/aptos-move/aptos-vm/src/storage_adapter/mod.rs new file mode 100644 index 0000000000000..ff9b2f1c7ebe4 --- /dev/null +++ b/aptos-move/aptos-vm/src/storage_adapter/mod.rs @@ -0,0 +1,6 @@ +// Copyright © Aptos Foundation +// SPDX-License-Identifier: Apache-2.0 + +mod executor_view_base; + +pub use crate::storage_adapter::executor_view_base::{AsExecutorView, ExecutorViewBase}; diff --git a/aptos-move/block-executor/Cargo.toml b/aptos-move/block-executor/Cargo.toml index b44e7c1501c9d..5b292ab856630 100644 --- a/aptos-move/block-executor/Cargo.toml +++ b/aptos-move/block-executor/Cargo.toml @@ -22,8 +22,10 @@ aptos-mvhashmap = { workspace = true } aptos-state-view = { workspace = true } aptos-types = { workspace = true } aptos-vm-logging = { workspace = true } +aptos-vm-types = { workspace = true } arc-swap = { workspace = true } bcs = { workspace = true } +bytes = { workspace = true } claims = { workspace = true } criterion = { workspace = true, optional = true } crossbeam = { workspace = true } @@ -36,8 +38,10 @@ parking_lot = { workspace = true } proptest = { workspace = true, optional = true } proptest-derive = { workspace = true, optional = true } rayon = { workspace = true } +serde = { workspace = true } [dev-dependencies] +aptos-aggregator = { workspace = true, features = ["testing"] } criterion = { workspace = true } itertools = { workspace = true } proptest = { workspace = true } diff --git a/aptos-move/block-executor/src/executor.rs b/aptos-move/block-executor/src/executor.rs index 1d2824ac03a1e..f2f35d7da9ab2 100644 --- a/aptos-move/block-executor/src/executor.rs +++ b/aptos-move/block-executor/src/executor.rs @@ -18,12 +18,16 @@ use crate::{ use aptos_aggregator::delta_change_set::serialize; use aptos_logger::{debug, info}; use aptos_mvhashmap::{ - types::{MVDataError, MVDataOutput, TxnIndex, Version}, + types::{Incarnation, MVDataError, MVDataOutput, TxnIndex}, unsync_map::UnsyncMap, MVHashMap, }; use aptos_state_view::TStateView; -use aptos_types::{executable::Executable, fee_statement::FeeStatement, write_set::WriteOp}; +use aptos_types::{ + executable::Executable, + fee_statement::FeeStatement, + write_set::{TransactionWrite, WriteOp}, +}; use aptos_vm_logging::{clear_speculative_txn_logs, init_speculative_logs}; use num_cpus; use rayon::ThreadPool; @@ -111,17 +115,17 @@ where } fn execute( - version: Version, + idx_to_execute: TxnIndex, + incarnation: Incarnation, signature_verified_block: &[T], last_input_output: &TxnLastInputOutput, - versioned_cache: &MVHashMap, + versioned_cache: &MVHashMap, scheduler: &Scheduler, executor: &E, base_view: &S, latest_view: ParallelState, ) -> SchedulerTask { let _timer = TASK_EXECUTE_SECONDS.start_timer(); - let (idx_to_execute, incarnation) = version; let txn = &signature_verified_block[idx_to_execute as usize]; // VM execution. @@ -136,7 +140,6 @@ where let mut updates_outside = false; let mut apply_updates = |output: &E::Output| { // First, apply writes. - let write_version = (idx_to_execute, incarnation); for (k, v) in output .resource_write_set() .into_iter() @@ -145,7 +148,9 @@ where if prev_modified_keys.remove(&k).is_none() { updates_outside = true; } - versioned_cache.data().write(k, write_version, v); + versioned_cache + .data() + .write(k, idx_to_execute, incarnation, v); } for (k, v) in output.module_write_set().into_iter() { @@ -160,7 +165,7 @@ where if prev_modified_keys.remove(&k).is_none() { updates_outside = true; } - versioned_cache.add_delta(k, idx_to_execute, d); + versioned_cache.data().add_delta(k, idx_to_execute, d); } }; @@ -207,37 +212,40 @@ where } fn validate( - version_to_validate: Version, + idx_to_validate: TxnIndex, + incarnation: Incarnation, validation_wave: Wave, last_input_output: &TxnLastInputOutput, - versioned_cache: &MVHashMap, + versioned_cache: &MVHashMap, scheduler: &Scheduler, ) -> SchedulerTask { use MVDataError::*; use MVDataOutput::*; let _timer = TASK_VALIDATE_SECONDS.start_timer(); - let (idx_to_validate, incarnation) = version_to_validate; let read_set = last_input_output .read_set(idx_to_validate) .expect("[BlockSTM]: Prior read-set must be recorded"); let valid = read_set.iter().all(|r| { - match versioned_cache.fetch_data(r.path(), idx_to_validate) { - Ok(Versioned(version, _)) => r.validate_version(version), + if r.is_speculative_failure() { + return false; + } + + match versioned_cache.data().fetch_data(r.path(), idx_to_validate) { + Ok(Versioned(version, _)) => r.validate_versioned(version), Ok(Resolved(value)) => r.validate_resolved(value), + Err(Uninitialized) => { + // Can match the current behavior for modules: the path would be considered + // 'Uninitialized' for data() hashmap, as the output is stored in the modules + // MVHashMap. We validate all module reads successfully, as reading any + // module that is also published triggeres ModulePathReadWrite fallback. + r.validate_module() + }, // Dependency implies a validation failure, and if the original read were to // observe an unresolved delta, it would set the aggregator base value in the // multi-versioned data-structure, resolve, and record the resolved value. - Err(Dependency(_)) | Err(Unresolved(_)) => false, - Err(NotFound) => r.validate_storage(), - // We successfully validate when read (again) results in a delta application - // failure. If the failure is speculative, a later validation will fail due to - // a read without this error. However, if the failure is real, passing - // validation here allows to avoid infinitely looping and instead panic when - // materializing deltas as writes in the final output preparation state. Panic - // is also preferable as it allows testing for this scenario. - Err(DeltaApplicationFailure) => r.validate_delta_application_failure(), + Err(Dependency(_)) | Err(Unresolved(_)) | Err(DeltaApplicationFailure) => false, } }); @@ -319,7 +327,7 @@ where { if txn_idx + 1 == scheduler.num_txns() { assert!( - !matches!(scheduler_task, SchedulerTask::ExecutionTask(_, _)), + !matches!(scheduler_task, SchedulerTask::ExecutionTask(_, _, _)), "All transactions can be committed, can't have execution task" ); @@ -362,7 +370,7 @@ where fn worker_commit_hook( &self, txn_idx: TxnIndex, - versioned_cache: &MVHashMap, + versioned_cache: &MVHashMap, last_input_output: &TxnLastInputOutput, base_view: &S, ) { @@ -381,22 +389,28 @@ where // single materialized aggregator. If needed, the contention may be further // mitigated by batching consecutive commit_hooks. let committed_delta = versioned_cache + .data() .materialize_delta(&k, txn_idx) .unwrap_or_else(|op| { // TODO: this logic should improve with the new AGGR data structure // TODO: and the ugly base_view parameter will also disappear. let storage_value = base_view - .get_state_value_u128(&k) - .expect("Error reading the base value for committed delta in storage") - .expect("No base value for committed delta in storage"); + .get_state_value(&k) + .expect("Error reading the base value for committed delta in storage"); - versioned_cache.set_aggregator_base_value(&k, storage_value); - op.apply_to(storage_value) + let w: T::Value = TransactionWrite::from_state_value(storage_value); + let value_u128 = w + .as_u128() + .expect("Aggregator base value deserialization error") + .expect("Aggregator base value must exist"); + + versioned_cache.data().provide_base_value(k.clone(), w); + op.apply_to(value_u128) .expect("Materializing delta w. base value set must succeed") }); // Must contain committed value as we set the base value above. - delta_writes.push((k, WriteOp::Modification(serialize(&committed_delta)))); + delta_writes.push((k, WriteOp::Modification(serialize(&committed_delta).into()))); } last_input_output.record_delta_writes(txn_idx, delta_writes); if let Some(txn_commit_listener) = &self.transaction_commit_hook { @@ -419,7 +433,7 @@ where executor_arguments: &E::Argument, block: &[T], last_input_output: &TxnLastInputOutput, - versioned_cache: &MVHashMap, + versioned_cache: &MVHashMap, scheduler: &Scheduler, // TODO: should not need to pass base view. base_view: &S, @@ -467,26 +481,30 @@ where } scheduler_task = match scheduler_task { - SchedulerTask::ValidationTask(version_to_validate, wave) => Self::validate( - version_to_validate, + SchedulerTask::ValidationTask(txn_idx, incarnation, wave) => Self::validate( + txn_idx, + incarnation, wave, last_input_output, versioned_cache, scheduler, ), - SchedulerTask::ExecutionTask(version_to_execute, ExecutionTaskType::Execution) => { - Self::execute( - version_to_execute, - block, - last_input_output, - versioned_cache, - scheduler, - &executor, - base_view, - ParallelState::new(versioned_cache, scheduler, shared_counter), - ) - }, - SchedulerTask::ExecutionTask(_, ExecutionTaskType::Wakeup(condvar)) => { + SchedulerTask::ExecutionTask( + txn_idx, + incarnation, + ExecutionTaskType::Execution, + ) => Self::execute( + txn_idx, + incarnation, + block, + last_input_output, + versioned_cache, + scheduler, + &executor, + base_view, + ParallelState::new(versioned_cache, scheduler, shared_counter), + ), + SchedulerTask::ExecutionTask(_, _, ExecutionTaskType::Wakeup(condvar)) => { let (lock, cvar) = &*condvar; // Mark dependency resolved. *lock.lock() = DependencyStatus::Resolved; diff --git a/aptos-move/block-executor/src/lib.rs b/aptos-move/block-executor/src/lib.rs index 321ec25d81278..d71171072e54b 100644 --- a/aptos-move/block-executor/src/lib.rs +++ b/aptos-move/block-executor/src/lib.rs @@ -48,7 +48,7 @@ dependencies. In particular, an incarnation of transaction tx_j stops and waits on a condition variable whenever it reads a value marked as an ESTIMATE that was written by a lower transaction tx_k. When the execution of tx_k finishes, it signals the condition variable and the execution of tx_j continues. This way, -tx_k does not read a value that is likely to cause an abort in the future due to a +tx_j does not read a value that is likely to cause an abort in the future due to a validation failure, which would happen if the next incarnation of tx_k would indeed write to the same location (the ESTIMATE markers that are not overwritten are removed by the next incarnation). diff --git a/aptos-move/block-executor/src/proptest_types/baseline.rs b/aptos-move/block-executor/src/proptest_types/baseline.rs index 094914545e70a..d09226c05ad60 100644 --- a/aptos-move/block-executor/src/proptest_types/baseline.rs +++ b/aptos-move/block-executor/src/proptest_types/baseline.rs @@ -15,7 +15,7 @@ use crate::{ errors::{Error as BlockExecutorError, Result as BlockExecutorResult}, proptest_types::types::{MockOutput, MockTransaction, STORAGE_AGGREGATOR_VALUE}, }; -use aptos_aggregator::{delta_change_set::serialize, transaction::AggregatorValue}; +use aptos_aggregator::delta_change_set::serialize; use aptos_types::{contract_event::ReadWriteEvent, write_set::TransactionWrite}; use claims::{assert_matches, assert_none, assert_some_eq}; use itertools::izip; @@ -93,7 +93,7 @@ impl], maybe_block_gas_limit: Option, ) -> Self { - let mut current_world = HashMap::>::new(); + let mut current_world = HashMap::>::new(); let mut accumulated_gas = 0; let mut status = BaselineStatus::Success; @@ -133,11 +133,10 @@ impl { - AggregatorValue::from_write(w_value) - .expect("Delta to a non-existent aggregator") - .into() - }, + BaselineValue::GenericWrite(w_value) => w_value + .as_u128() + .expect("Delta to a non-existent aggregator") + .expect("Must deserialize the aggregator base value"), // Get base value from latest resolved aggregator value. BaselineValue::Aggregator(value) => *value, // Storage always gets resolved to a default constant. @@ -245,9 +244,10 @@ impl { pub(crate) struct BencherState< K: Hash + Clone + Debug + Eq + PartialOrd + Ord, - V: Clone + Eq + Arbitrary, E: Send + Sync + Debug + Clone + ReadWriteEvent, -> where - Vec: From, -{ - transactions: Vec, ValueType, E>>, - baseline_output: BaselineOutput, ValueType>, +> { + transactions: Vec, ValueType, E>>, + baseline_output: BaselineOutput, ValueType>, } impl Bencher @@ -62,7 +59,7 @@ where pub fn bench(&self, key_strategy: &impl Strategy, bencher: &mut CBencher) { bencher.iter_batched( || { - BencherState::::with_universe( + BencherState::::with_universe::( vec(key_strategy, self.universe_size), self.transaction_size, self.transaction_gen_param, @@ -75,16 +72,16 @@ where } } -impl BencherState +impl BencherState where K: Hash + Clone + Debug + Eq + Send + Sync + PartialOrd + Ord + 'static, - V: Clone + Eq + Send + Sync + Arbitrary + 'static, E: Send + Sync + Debug + Clone + ReadWriteEvent + 'static, - Vec: From, { /// Creates a new benchmark state with the given account universe strategy and number of /// transactions. - pub(crate) fn with_universe( + pub(crate) fn with_universe< + V: Into> + Clone + Eq + Send + Sync + Arbitrary + 'static, + >( universe_strategy: impl Strategy>, num_transactions: usize, transaction_params: TransactionGenParams, @@ -117,7 +114,7 @@ where } pub(crate) fn run(self) { - let data_view = EmptyDataView::, ValueType> { + let data_view = EmptyDataView::, ValueType> { phantom: PhantomData, }; @@ -129,10 +126,10 @@ where ); let output = BlockExecutor::< - MockTransaction, ValueType, E>, - MockTask, ValueType, E>, - EmptyDataView, ValueType>, - NoOpTransactionCommitHook, ValueType, E>, usize>, + MockTransaction, ValueType, E>, + MockTask, ValueType, E>, + EmptyDataView, ValueType>, + NoOpTransactionCommitHook, ValueType, E>, usize>, ExecutableTestType, >::new(num_cpus::get(), executor_thread_pool, None, None) .execute_transactions_parallel((), &self.transactions, &data_view); diff --git a/aptos-move/block-executor/src/proptest_types/tests.rs b/aptos-move/block-executor/src/proptest_types/tests.rs index c2561abeb098c..8df008b74c7dc 100644 --- a/aptos-move/block-executor/src/proptest_types/tests.rs +++ b/aptos-move/block-executor/src/proptest_types/tests.rs @@ -54,7 +54,7 @@ fn run_transactions( *transactions.get_mut(i.index(length)).unwrap() = MockTransaction::SkipRest; } - let data_view = EmptyDataView::, ValueType> { + let data_view = EmptyDataView::, ValueType> { phantom: PhantomData, }; @@ -67,10 +67,10 @@ fn run_transactions( for _ in 0..num_repeat { let output = BlockExecutor::< - MockTransaction, ValueType, E>, - MockTask, ValueType, E>, - EmptyDataView, ValueType>, - NoOpTransactionCommitHook, ValueType, E>, usize>, + MockTransaction, ValueType, E>, + MockTask, ValueType, E>, + EmptyDataView, ValueType>, + NoOpTransactionCommitHook, ValueType, E>, usize>, ExecutableTestType, >::new( num_cpus::get(), @@ -189,7 +189,7 @@ fn deltas_writes_mixed_with_block_gas_limit(num_txns: usize, maybe_block_gas_lim .map(|txn_gen| txn_gen.materialize_with_deltas(&universe, 15, false)) .collect(); - let data_view = DeltaDataView::, ValueType<[u8; 32]>> { + let data_view = DeltaDataView::, ValueType> { phantom: PhantomData, }; @@ -202,13 +202,10 @@ fn deltas_writes_mixed_with_block_gas_limit(num_txns: usize, maybe_block_gas_lim for _ in 0..20 { let output = BlockExecutor::< - MockTransaction, ValueType<[u8; 32]>, MockEvent>, - MockTask, ValueType<[u8; 32]>, MockEvent>, - DeltaDataView, ValueType<[u8; 32]>>, - NoOpTransactionCommitHook< - MockOutput, ValueType<[u8; 32]>, MockEvent>, - usize, - >, + MockTransaction, ValueType, MockEvent>, + MockTask, ValueType, MockEvent>, + DeltaDataView, ValueType>, + NoOpTransactionCommitHook, ValueType, MockEvent>, usize>, ExecutableTestType, >::new( num_cpus::get(), @@ -237,7 +234,7 @@ fn deltas_resolver_with_block_gas_limit(num_txns: usize, maybe_block_gas_limit: .expect("creating a new value should succeed") .current(); - let data_view = DeltaDataView::, ValueType<[u8; 32]>> { + let data_view = DeltaDataView::, ValueType> { phantom: PhantomData, }; @@ -256,13 +253,10 @@ fn deltas_resolver_with_block_gas_limit(num_txns: usize, maybe_block_gas_limit: for _ in 0..20 { let output = BlockExecutor::< - MockTransaction, ValueType<[u8; 32]>, MockEvent>, - MockTask, ValueType<[u8; 32]>, MockEvent>, - DeltaDataView, ValueType<[u8; 32]>>, - NoOpTransactionCommitHook< - MockOutput, ValueType<[u8; 32]>, MockEvent>, - usize, - >, + MockTransaction, ValueType, MockEvent>, + MockTask, ValueType, MockEvent>, + DeltaDataView, ValueType>, + NoOpTransactionCommitHook, ValueType, MockEvent>, usize>, ExecutableTestType, >::new( num_cpus::get(), @@ -402,7 +396,7 @@ fn publishing_fixed_params_with_block_gas_limit( }, }; - let data_view = DeltaDataView::, ValueType<[u8; 32]>> { + let data_view = DeltaDataView::, ValueType> { phantom: PhantomData, }; @@ -415,13 +409,10 @@ fn publishing_fixed_params_with_block_gas_limit( // Confirm still no intersection let output = BlockExecutor::< - MockTransaction, ValueType<[u8; 32]>, MockEvent>, - MockTask, ValueType<[u8; 32]>, MockEvent>, - DeltaDataView, ValueType<[u8; 32]>>, - NoOpTransactionCommitHook< - MockOutput, ValueType<[u8; 32]>, MockEvent>, - usize, - >, + MockTransaction, ValueType, MockEvent>, + MockTask, ValueType, MockEvent>, + DeltaDataView, ValueType>, + NoOpTransactionCommitHook, ValueType, MockEvent>, usize>, ExecutableTestType, >::new( num_cpus::get(), @@ -461,13 +452,10 @@ fn publishing_fixed_params_with_block_gas_limit( for _ in 0..200 { let output = BlockExecutor::< - MockTransaction, ValueType<[u8; 32]>, MockEvent>, - MockTask, ValueType<[u8; 32]>, MockEvent>, - DeltaDataView, ValueType<[u8; 32]>>, - NoOpTransactionCommitHook< - MockOutput, ValueType<[u8; 32]>, MockEvent>, - usize, - >, + MockTransaction, ValueType, MockEvent>, + MockTask, ValueType, MockEvent>, + DeltaDataView, ValueType>, + NoOpTransactionCommitHook, ValueType, MockEvent>, usize>, ExecutableTestType, >::new( num_cpus::get(), diff --git a/aptos-move/block-executor/src/proptest_types/types.rs b/aptos-move/block-executor/src/proptest_types/types.rs index c80e607bce04b..2fc4784bba119 100644 --- a/aptos-move/block-executor/src/proptest_types/types.rs +++ b/aptos-move/block-executor/src/proptest_types/types.rs @@ -3,10 +3,7 @@ // SPDX-License-Identifier: Apache-2.0 use crate::task::{ExecutionStatus, ExecutorTask, Transaction, TransactionOutput}; -use aptos_aggregator::{ - delta_change_set::{delta_add, delta_sub, serialize, DeltaOp}, - transaction::AggregatorValue, -}; +use aptos_aggregator::delta_change_set::{delta_add, delta_sub, serialize, DeltaOp}; use aptos_mvhashmap::types::TxnIndex; use aptos_state_view::{StateViewId, TStateView}; use aptos_types::{ @@ -19,8 +16,10 @@ use aptos_types::{ state_store::{state_storage_usage::StateStorageUsage, state_value::StateValue}, write_set::{TransactionWrite, WriteOp}, }; +use aptos_vm_types::resolver::TExecutorView; +use bytes::Bytes; use claims::assert_ok; -use move_core_types::language_storage::TypeTag; +use move_core_types::{language_storage::TypeTag, value::MoveTypeLayout}; use once_cell::sync::OnceCell; use proptest::{arbitrary::Arbitrary, collection::vec, prelude::*, proptest, sample::Index}; use proptest_derive::Arbitrary; @@ -35,7 +34,6 @@ use std::{ Arc, }, }; - // Should not be possible to overflow or underflow, as each delta is at most 100 in the tests. // TODO: extend to delta failures. pub(crate) const STORAGE_AGGREGATOR_VALUE: u128 = 100001; @@ -54,9 +52,9 @@ where /// Gets the state value for a given state key. fn get_state_value(&self, _: &K) -> anyhow::Result> { - Ok(Some(StateValue::new_legacy(serialize( - &STORAGE_AGGREGATOR_VALUE, - )))) + Ok(Some(StateValue::new_legacy( + serialize(&STORAGE_AGGREGATOR_VALUE).into(), + ))) } fn id(&self) -> StateViewId { @@ -127,26 +125,52 @@ impl ModulePath for KeyType } } -#[derive(Debug, Clone, PartialEq, Eq, Arbitrary)] -pub struct ValueType> + Debug + Clone + Eq + Arbitrary>( +#[derive(Debug, Clone, PartialEq, Eq)] +pub(crate) struct ValueType { /// Wrapping the types used for testing to add TransactionWrite trait implementation (below). - pub V, - /// Determines whether V is going to contain a value (o.w. deletion). This is useful for - /// testing the behavior of deleting aggregators, in which case we shouldn't panic - /// but let the Move-VM handle the read the same as for any deleted resource. - pub bool, -); + bytes: Option, +} -impl> + Debug + Clone + Eq + Send + Sync + Arbitrary> TransactionWrite - for ValueType -{ - fn extract_raw_bytes(&self) -> Option> { - if self.1 { - let mut v = self.0.clone().into(); - v.resize(16, 1); - Some(v) - } else { - None +impl Arbitrary for ValueType { + type Parameters = (); + type Strategy = BoxedStrategy; + + fn arbitrary_with(_args: Self::Parameters) -> Self::Strategy { + vec(any::(), 17) + .prop_map(|mut v| { + let use_value = v[0] < 128; + v.resize(16, 0); + ValueType::new(v, use_value) + }) + .boxed() + } +} + +impl ValueType { + /// If use_value is not set, the resulting Value will correspond to a deletion, i.e. + /// not contain a value (o.w. deletion). + pub(crate) fn new> + Debug + Clone + Eq + Send + Sync + Arbitrary>( + value: V, + use_value: bool, + ) -> Self { + Self { + bytes: use_value.then(|| { + let mut v = value.clone().into(); + v.resize(16, 1); + v.into() + }), + } + } +} + +impl TransactionWrite for ValueType { + fn bytes(&self) -> Option<&Bytes> { + self.bytes.as_ref() + } + + fn from_state_value(maybe_state_value: Option) -> Self { + Self { + bytes: maybe_state_value.map(|state_value| state_value.bytes().clone()), } } @@ -256,7 +280,9 @@ impl< > Transaction for MockTransaction { type Event = E; + type Identifier = (); type Key = K; + type Tag = u32; type Value = V; } @@ -293,7 +319,7 @@ impl> + Arbitrary + Clone + Debug + Eq + Sync + Send> Transactio delta_fn: &dyn Fn(usize, &V) -> Option, allow_deletes: bool, ) -> Vec<( - /* writes = */ Vec<(KeyType, ValueType)>, + /* writes = */ Vec<(KeyType, ValueType)>, /* deltas = */ Vec<(KeyType, DeltaOp)>, )> { let mut ret = vec![]; @@ -311,14 +337,15 @@ impl> + Arbitrary + Clone + Debug + Eq + Sync + Send> Transactio None => { // One out of 23 writes will be a deletion let is_deletion = allow_deletes - && AggregatorValue::from_write(&ValueType(value.clone(), true)) + && ValueType::new(value.clone(), true) + .as_u128() + .unwrap() .unwrap() - .into() % 23 == 0; incarnation_writes.push(( KeyType(key, module_write_fn(i)), - ValueType(value.clone(), !is_deletion), + ValueType::new(value.clone(), !is_deletion), )); }, } @@ -362,7 +389,7 @@ impl> + Arbitrary + Clone + Debug + Eq + Sync + Send> Transactio module_write_fn: &dyn Fn(usize) -> bool, delta_fn: &dyn Fn(usize, &V) -> Option, allow_deletes: bool, - ) -> MockTransaction, ValueType, E> { + ) -> MockTransaction, ValueType, E> { let reads = Self::reads_from_gen(universe, self.reads, &module_read_fn); let gas = Self::gas_from_gen(self.gas); @@ -396,7 +423,7 @@ impl> + Arbitrary + Clone + Debug + Eq + Sync + Send> Transactio universe: &[K], // Are writes and reads module access (same access path). module_access: (bool, bool), - ) -> MockTransaction, ValueType, E> { + ) -> MockTransaction, ValueType, E> { let is_module_read = |_| -> bool { module_access.1 }; let is_module_write = |_| -> bool { module_access.0 }; let is_delta = |_, _: &V| -> Option { None }; @@ -420,14 +447,12 @@ impl> + Arbitrary + Clone + Debug + Eq + Sync + Send> Transactio universe: &[K], delta_threshold: usize, allow_deletes: bool, - ) -> MockTransaction, ValueType, E> { + ) -> MockTransaction, ValueType, E> { let is_module_read = |_| -> bool { false }; let is_module_write = |_| -> bool { false }; let is_delta = |i, v: &V| -> Option { if i >= delta_threshold { - let val = AggregatorValue::from_write(&ValueType(v.clone(), true)) - .unwrap() - .into(); + let val = ValueType::new(v.clone(), true).as_u128().unwrap().unwrap(); if val % 10 == 0 { None } else if val % 10 < 5 { @@ -461,7 +486,7 @@ impl> + Arbitrary + Clone + Debug + Eq + Sync + Send> Transactio // writes. This way there will be module accesses but no intersection. read_threshold: usize, write_threshold: usize, - ) -> MockTransaction, ValueType, E> { + ) -> MockTransaction, ValueType, E> { assert!(read_threshold < universe.len()); assert!(write_threshold > read_threshold); assert!(write_threshold < universe.len()); @@ -510,7 +535,7 @@ where fn execute_transaction( &self, - view: &impl TStateView, + view: &impl TExecutorView, txn: &Self::Txn, txn_idx: TxnIndex, _materialize_deltas: bool, @@ -529,19 +554,26 @@ where let behavior = &incarnation_behaviors[idx % incarnation_behaviors.len()]; // Reads - let mut reads_result = vec![]; + let mut read_results = vec![]; for k in behavior.reads.iter() { // TODO: later test errors as well? (by fixing state_view behavior). - match view.get_state_value_bytes(k) { - Ok(v) => reads_result.push(v), - Err(_) => reads_result.push(None), + // TODO: test aggregator reads. + match k.module_path() { + Some(_) => match view.get_module_bytes(k) { + Ok(v) => read_results.push(v.map(Into::into)), + Err(_) => read_results.push(None), + }, + None => match view.get_resource_bytes(k, None) { + Ok(v) => read_results.push(v.map(Into::into)), + Err(_) => read_results.push(None), + }, } } ExecutionStatus::Success(MockOutput { writes: behavior.writes.clone(), deltas: behavior.deltas.clone(), events: behavior.events.to_vec(), - read_results: reads_result, + read_results, materialized_delta_writes: OnceCell::new(), total_gas: behavior.gas, }) @@ -550,16 +582,6 @@ where MockTransaction::Abort => ExecutionStatus::Abort(txn_idx as usize), } } - - fn convert_to_value( - &self, - _view: &impl TStateView, - _key: &K, - _maybe_blob: Option>, - _creation: bool, - ) -> anyhow::Result { - unimplemented!("TODO: implement for AggregatorV2 testing"); - } } #[derive(Debug)] diff --git a/aptos-move/block-executor/src/scheduler.rs b/aptos-move/block-executor/src/scheduler.rs index f13dcbd2bcc33..3cfe011ff643b 100644 --- a/aptos-move/block-executor/src/scheduler.rs +++ b/aptos-move/block-executor/src/scheduler.rs @@ -4,7 +4,7 @@ use crate::counters::GET_NEXT_TASK_SECONDS; use aptos_infallible::Mutex; -use aptos_mvhashmap::types::{Incarnation, TxnIndex, Version}; +use aptos_mvhashmap::types::{Incarnation, TxnIndex}; use crossbeam::utils::CachePadded; use parking_lot::{RwLock, RwLockUpgradableReadGuard}; use std::{ @@ -54,8 +54,8 @@ pub enum ExecutionTaskType { /// there are no more tasks and the scheduler is done. #[derive(Debug)] pub enum SchedulerTask { - ExecutionTask(Version, ExecutionTaskType), - ValidationTask(Version, Wave), + ExecutionTask(TxnIndex, Incarnation, ExecutionTaskType), + ValidationTask(TxnIndex, Incarnation, Wave), NoTask, Done, } @@ -375,15 +375,15 @@ impl Scheduler { } if prefer_validate { - if let Some((version_to_validate, wave)) = + if let Some((txn_idx, incarnation, wave)) = self.try_validate_next_version(idx_to_validate, wave) { - return SchedulerTask::ValidationTask(version_to_validate, wave); + return SchedulerTask::ValidationTask(txn_idx, incarnation, wave); } - } else if let Some((version_to_execute, execution_task_type)) = + } else if let Some((txn_idx, incarnation, execution_task_type)) = self.try_execute_next_version() { - return SchedulerTask::ExecutionTask(version_to_execute, execution_task_type); + return SchedulerTask::ExecutionTask(txn_idx, incarnation, execution_task_type); } } } @@ -510,7 +510,7 @@ impl Scheduler { } // Update the minimum wave this txn needs to pass. validation_status.required_wave = cur_wave; - return SchedulerTask::ValidationTask((txn_idx, incarnation), cur_wave); + return SchedulerTask::ValidationTask(txn_idx, incarnation, cur_wave); } SchedulerTask::NoTask @@ -549,10 +549,7 @@ impl Scheduler { // nothing to do, as another thread must have succeeded to incarnate and // obtain the task for re-execution. if let Some((new_incarnation, execution_task_type)) = self.try_incarnate(txn_idx) { - return SchedulerTask::ExecutionTask( - (txn_idx, new_incarnation), - execution_task_type, - ); + return SchedulerTask::ExecutionTask(txn_idx, new_incarnation, execution_task_type); } } @@ -726,7 +723,7 @@ impl Scheduler { &self, idx_to_validate: TxnIndex, wave: Wave, - ) -> Option<(Version, Wave)> { + ) -> Option<(TxnIndex, Incarnation, Wave)> { // We do compare-and-swap here instead of fetch-and-increment as for execution index // because we would like to not validate transactions when lower indices are in the // 'never_executed' state (to avoid unnecessarily reducing validation index and creating @@ -750,7 +747,7 @@ impl Scheduler { // return version and wave for validation task, otherwise None. return self .is_executed(idx_to_validate, false) - .map(|incarnation| ((idx_to_validate, incarnation), wave)); + .map(|incarnation| (idx_to_validate, incarnation, wave)); } None @@ -763,7 +760,7 @@ impl Scheduler { /// to create the next incarnation (should happen exactly once), and if successful, /// return the version to the caller for the corresponding ExecutionTask. /// - Otherwise, return None. - fn try_execute_next_version(&self) -> Option<(Version, ExecutionTaskType)> { + fn try_execute_next_version(&self) -> Option<(TxnIndex, Incarnation, ExecutionTaskType)> { let idx_to_execute = self.execution_idx.fetch_add(1, Ordering::SeqCst); if idx_to_execute >= self.num_txns { @@ -774,7 +771,7 @@ impl Scheduler { // return version for execution task, otherwise None. self.try_incarnate(idx_to_execute) .map(|(incarnation, execution_task_type)| { - ((idx_to_execute, incarnation), execution_task_type) + (idx_to_execute, incarnation, execution_task_type) }) } diff --git a/aptos-move/block-executor/src/task.rs b/aptos-move/block-executor/src/task.rs index 82c72711ba072..a5b910cccdc04 100644 --- a/aptos-move/block-executor/src/task.rs +++ b/aptos-move/block-executor/src/task.rs @@ -4,13 +4,15 @@ use aptos_aggregator::delta_change_set::DeltaOp; use aptos_mvhashmap::types::TxnIndex; -use aptos_state_view::TStateView; use aptos_types::{ contract_event::ReadWriteEvent, executable::ModulePath, fee_statement::FeeStatement, write_set::{TransactionWrite, WriteOp}, }; +use aptos_vm_types::resolver::TExecutorView; +use move_core_types::value::MoveTypeLayout; +use serde::{de::DeserializeOwned, Serialize}; use std::{collections::HashMap, fmt::Debug, hash::Hash}; /// The execution result of a transaction @@ -30,6 +32,22 @@ pub enum ExecutionStatus { /// transaction will write to a key value storage as their side effect. pub trait Transaction: Sync + Send + Clone + 'static { type Key: PartialOrd + Ord + Send + Sync + Clone + Hash + Eq + ModulePath + Debug; + /// Some keys contain multiple "resources" distinguished by a tag. Reading these keys requires + /// specifying a tag, and output requires merging all resources together (Note: this may change + /// in the future if write-set format changes to be per-resource, could be more performant). + /// Is generic primarily to provide easy plug-in replacement for mock tests and be extensible. + type Tag: PartialOrd + + Ord + + Send + + Sync + + Clone + + Hash + + Eq + + Debug + + DeserializeOwned + + Serialize; + /// AggregatorV2 identifier type. + type Identifier: PartialOrd + Ord + Send + Sync + Clone + Hash + Eq + Debug; type Value: Send + Sync + Clone + TransactionWrite; type Event: Send + Sync + Debug + Clone + ReadWriteEvent; } @@ -62,20 +80,15 @@ pub trait ExecutorTask: Sync { /// Execute a single transaction given the view of the current state. fn execute_transaction( &self, - view: &impl TStateView::Key>, + view: &impl TExecutorView< + ::Key, + MoveTypeLayout, + ::Identifier, + >, txn: &Self::Txn, txn_idx: TxnIndex, materialize_deltas: bool, ) -> ExecutionStatus; - - /// Trait that allows converting blobs to proper values. - fn convert_to_value( - &self, - view: &impl TStateView::Key>, - key: &::Key, - maybe_blob: Option>, - creation: bool, - ) -> anyhow::Result<::Value>; } /// Trait for execution result of a single transaction. diff --git a/aptos-move/block-executor/src/txn_last_input_output.rs b/aptos-move/block-executor/src/txn_last_input_output.rs index 11717bdd137be..d6cc643b22fba 100644 --- a/aptos-move/block-executor/src/txn_last_input_output.rs +++ b/aptos-move/block-executor/src/txn_last_input_output.rs @@ -6,7 +6,7 @@ use crate::{ task::{ExecutionStatus, Transaction, TransactionOutput}, }; use anyhow::anyhow; -use aptos_mvhashmap::types::{Incarnation, TxnIndex, Version}; +use aptos_mvhashmap::types::{TxnIndex, Version}; use aptos_types::{ access_path::AccessPath, executable::ModulePath, fee_statement::FeeStatement, write_set::WriteOp, @@ -42,34 +42,31 @@ impl TxnOutput { } /// Information about the read which is used by validation. -#[derive(Clone, PartialEq)] +#[derive(Debug, Clone, PartialEq)] enum ReadKind { /// Read returned a value from the multi-version data-structure, with index /// and incarnation number of the execution associated with the write of /// that entry. - Version(TxnIndex, Incarnation), + Versioned(Version), /// Read resolved a delta. Resolved(u128), - /// Read occurred from storage. - Storage, - /// Read triggered a delta application failure. - DeltaApplicationFailure, + /// Speculative inconsistency failure. + SpeculativeFailure, /// Module read. TODO: Design a better representation once more meaningfully separated. Module, } -#[derive(Clone)] +#[derive(Debug, Clone)] pub struct ReadDescriptor { access_path: K, - kind: ReadKind, } -impl ReadDescriptor { - pub fn from_version(access_path: K, txn_idx: TxnIndex, incarnation: Incarnation) -> Self { +impl ReadDescriptor { + pub fn from_versioned(access_path: K, version: Version) -> Self { Self { access_path, - kind: ReadKind::Version(txn_idx, incarnation), + kind: ReadKind::Versioned(version), } } @@ -80,13 +77,6 @@ impl ReadDescriptor { } } - pub fn from_storage(access_path: K) -> Self { - Self { - access_path, - kind: ReadKind::Storage, - } - } - pub fn from_module(access_path: K) -> Self { Self { access_path, @@ -94,10 +84,10 @@ impl ReadDescriptor { } } - pub fn from_delta_application_failure(access_path: K) -> Self { + pub fn from_speculative_failure(access_path: K) -> Self { Self { access_path, - kind: ReadKind::DeltaApplicationFailure, + kind: ReadKind::SpeculativeFailure, } } @@ -110,9 +100,8 @@ impl ReadDescriptor { } // Does the read descriptor describe a read from MVHashMap w. a specified version. - pub fn validate_version(&self, version: Version) -> bool { - let (txn_idx, incarnation) = version; - self.kind == ReadKind::Version(txn_idx, incarnation) + pub fn validate_versioned(&self, version: Version) -> bool { + self.kind == ReadKind::Versioned(version) } // Does the read descriptor describe a read from MVHashMap w. a resolved delta. @@ -120,15 +109,14 @@ impl ReadDescriptor { self.kind == ReadKind::Resolved(value) } - // Does the read descriptor describe a read from storage. - pub fn validate_storage(&self) -> bool { - // Module reading supported from storage version only at the moment. - self.kind == ReadKind::Storage || self.kind == ReadKind::Module + // Does the read descriptor describe a read from MVHashMap w. a resolved delta. + pub fn validate_module(&self) -> bool { + self.kind == ReadKind::Module } // Does the read descriptor describe to a read with a delta application failure. - pub fn validate_delta_application_failure(&self) -> bool { - self.kind == ReadKind::DeltaApplicationFailure + pub fn is_speculative_failure(&self) -> bool { + self.kind == ReadKind::SpeculativeFailure } } @@ -146,7 +134,9 @@ pub struct TxnLastInputOutput { module_read_write_intersection: AtomicBool, } -impl TxnLastInputOutput { +impl + TxnLastInputOutput +{ pub fn new(num_txns: TxnIndex) -> Self { Self { inputs: (0..num_txns) @@ -198,15 +188,21 @@ impl TxnLastInputO let read_modules: Vec = input .iter() .filter_map(|desc| { - matches!(desc.kind, ReadKind::Module) - .then(|| desc.module_path().expect("Module path guaranteed to exist")) + matches!(desc.kind, ReadKind::Module).then(|| { + desc.module_path() + .unwrap_or_else(|| panic!("Module path guaranteed to exist {:?}", desc)) + }) }) .collect(); let written_modules: Vec = match &output { ExecutionStatus::Success(output) | ExecutionStatus::SkipRest(output) => output .module_write_set() .keys() - .map(|k| k.module_path().expect("Module path guaranteed to exist")) + .map(|k| { + k.module_path().unwrap_or_else(|| { + panic!("Unexpected non-module key found in putput: {:?}", k) + }) + }) .collect(), ExecutionStatus::Abort(_) => Vec::new(), }; diff --git a/aptos-move/block-executor/src/unit_tests/mod.rs b/aptos-move/block-executor/src/unit_tests/mod.rs index 60ca7a0310693..9f6111a786372 100644 --- a/aptos-move/block-executor/src/unit_tests/mod.rs +++ b/aptos-move/block-executor/src/unit_tests/mod.rs @@ -58,13 +58,9 @@ where baseline.assert_output(&output); } -fn random_value(delete_value: bool) -> ValueType<[u8; 32]> { - ValueType( - (0..32) - .map(|_| (random::())) - .collect::>() - .try_into() - .expect("Unable to convert Vec to [u8; 32]"), +fn random_value(delete_value: bool) -> ValueType { + ValueType::new( + (0..32).map(|_| (random::())).collect::>(), !delete_value, ) } @@ -73,7 +69,7 @@ fn random_value(delete_value: bool) -> ValueType<[u8; 32]> { fn empty_block() { // This test checks that we do not trigger asserts due to an empty block, e.g. in the // scheduler. Instead, parallel execution should gracefully early return empty output. - run_and_assert::, ValueType<[u8; 32]>, MockEvent>(vec![]); + run_and_assert::, ValueType, MockEvent>(vec![]); } #[test] @@ -81,7 +77,7 @@ fn delta_counters() { let key = KeyType(random::<[u8; 32]>(), false); let mut transactions = vec![MockTransaction::from_behavior(MockIncarnation::< KeyType<[u8; 32]>, - ValueType<[u8; 32]>, + ValueType, MockEvent, > { reads: vec![], @@ -94,7 +90,7 @@ fn delta_counters() { for _ in 0..50 { transactions.push(MockTransaction::from_behavior(MockIncarnation::< KeyType<[u8; 32]>, - ValueType<[u8; 32]>, + ValueType, MockEvent, > { reads: vec![key], @@ -107,7 +103,7 @@ fn delta_counters() { transactions.push(MockTransaction::from_behavior(MockIncarnation::< KeyType<[u8; 32]>, - ValueType<[u8; 32]>, + ValueType, MockEvent, > { reads: vec![], @@ -120,7 +116,7 @@ fn delta_counters() { for _ in 0..50 { transactions.push(MockTransaction::from_behavior(MockIncarnation::< KeyType<[u8; 32]>, - ValueType<[u8; 32]>, + ValueType, MockEvent, > { reads: vec![key], @@ -144,38 +140,38 @@ fn delta_chains() { .collect(); for i in 0..500 { - transactions.push(MockTransaction::< - KeyType<[u8; 32]>, - ValueType<[u8; 32]>, - MockEvent, - >::from_behavior(MockIncarnation { - reads: keys.clone(), - writes: vec![], - events: vec![], - deltas: keys - .iter() - .enumerate() - .filter_map(|(j, k)| match (i + j) % 2 == 0 { - true => Some(( - *k, - // Deterministic pattern for adds/subtracts. - DeltaOp::new( - if (i % 2 == 0) == (j < 5) { - DeltaUpdate::Plus(10) - } else { - DeltaUpdate::Minus(1) - }, - // below params irrelevant for this test. - u128::MAX, - 0, - 0, - ), - )), - false => None, - }) - .collect(), - gas: 1, - })); + transactions.push( + MockTransaction::, ValueType, MockEvent>::from_behavior( + MockIncarnation { + reads: keys.clone(), + writes: vec![], + events: vec![], + deltas: keys + .iter() + .enumerate() + .filter_map(|(j, k)| match (i + j) % 2 == 0 { + true => Some(( + *k, + // Deterministic pattern for adds/subtracts. + DeltaOp::new( + if (i % 2 == 0) == (j < 5) { + DeltaUpdate::Plus(10) + } else { + DeltaUpdate::Minus(1) + }, + // below params irrelevant for this test. + u128::MAX, + 0, + 0, + ), + )), + false => None, + }) + .collect(), + gas: 1, + }, + ), + ); } run_and_assert(transactions) @@ -194,7 +190,7 @@ fn cycle_transactions() { for _ in 0..WRITES_PER_KEY { transactions.push(MockTransaction::from_behavior(MockIncarnation::< KeyType<[u8; 32]>, - ValueType<[u8; 32]>, + ValueType, MockEvent, > { reads: vec![KeyType(key, false)], @@ -221,7 +217,7 @@ fn one_reads_all_barrier() { for key in &keys { transactions.push(MockTransaction::from_behavior(MockIncarnation::< KeyType<[u8; 32]>, - ValueType<[u8; 32]>, + ValueType, MockEvent, > { reads: vec![*key], @@ -234,7 +230,7 @@ fn one_reads_all_barrier() { // One transaction reading the write results of every prior transactions in the block. transactions.push(MockTransaction::from_behavior(MockIncarnation::< KeyType<[u8; 32]>, - ValueType<[u8; 32]>, + ValueType, MockEvent, > { reads: keys.clone(), @@ -266,7 +262,7 @@ fn one_writes_all_barrier() { // One transaction writing to the write results of every prior transactions in the block. transactions.push(MockTransaction::from_behavior(MockIncarnation::< KeyType<[u8; 32]>, - ValueType<[u8; 32]>, + ValueType, MockEvent, > { reads: keys.clone(), @@ -293,7 +289,7 @@ fn early_aborts() { for key in &keys { transactions.push(MockTransaction::from_behavior(MockIncarnation::< KeyType<[u8; 32]>, - ValueType<[u8; 32]>, + ValueType, MockEvent, > { reads: vec![*key], @@ -320,7 +316,7 @@ fn early_skips() { for key in &keys { transactions.push(MockTransaction::from_behavior(MockIncarnation::< KeyType<[u8; 32]>, - ValueType<[u8; 32]>, + ValueType, MockEvent, > { reads: vec![*key], @@ -344,7 +340,7 @@ fn scheduler_tasks() { // No validation tasks. assert!(matches!( s.next_task(false), - SchedulerTask::ExecutionTask((j, 0), ExecutionTaskType::Execution) if i == j + SchedulerTask::ExecutionTask(j, 0, ExecutionTaskType::Execution) if i == j )); } @@ -360,7 +356,7 @@ fn scheduler_tasks() { for i in 0..5 { assert!(matches!( s.next_task(false), - SchedulerTask::ValidationTask((j, 0), 0) if i == j + SchedulerTask::ValidationTask(j, 0, 0) if i == j )); } @@ -376,16 +372,16 @@ fn scheduler_tasks() { assert!(matches!( s.finish_abort(4, 0), - SchedulerTask::ExecutionTask((4, 1), ExecutionTaskType::Execution) + SchedulerTask::ExecutionTask(4, 1, ExecutionTaskType::Execution) )); assert!(matches!( s.finish_abort(1, 0), - SchedulerTask::ExecutionTask((1, 1), ExecutionTaskType::Execution) + SchedulerTask::ExecutionTask(1, 1, ExecutionTaskType::Execution) )); // Validation index = 2, wave = 1. assert!(matches!( s.finish_abort(3, 0), - SchedulerTask::ExecutionTask((3, 1), ExecutionTaskType::Execution) + SchedulerTask::ExecutionTask(3, 1, ExecutionTaskType::Execution) )); assert!(matches!( @@ -394,23 +390,23 @@ fn scheduler_tasks() { )); assert!(matches!( s.finish_execution(1, 1, false), - SchedulerTask::ValidationTask((1, 1), 1) + SchedulerTask::ValidationTask(1, 1, 1) )); // Another validation task for (2, 0). assert!(matches!( s.next_task(false), - SchedulerTask::ValidationTask((2, 0), 1) + SchedulerTask::ValidationTask(2, 0, 1) )); // Now skip over txn 3 (status is Executing), and validate 4. assert!(matches!( s.next_task(false), - SchedulerTask::ValidationTask((4, 1), 1) + SchedulerTask::ValidationTask(4, 1, 1) )); assert!(matches!( s.finish_execution(3, 1, false), - SchedulerTask::ValidationTask((3, 1), 1), + SchedulerTask::ValidationTask(3, 1, 1), )); s.finish_validation(0, 0); @@ -435,7 +431,7 @@ fn scheduler_first_wave() { // Nothing to validate. assert!(matches!( s.next_task(false), - SchedulerTask::ExecutionTask((j, 0), ExecutionTaskType::Execution) if j == i + SchedulerTask::ExecutionTask(j, 0, ExecutionTaskType::Execution) if j == i )); } @@ -449,11 +445,11 @@ fn scheduler_first_wave() { // Now we can validate version (0, 0). assert!(matches!( s.next_task(false), - SchedulerTask::ValidationTask((0, 0), 0) + SchedulerTask::ValidationTask(0, 0, 0) )); assert!(matches!( s.next_task(false), - SchedulerTask::ExecutionTask((5, 0), ExecutionTaskType::Execution) + SchedulerTask::ExecutionTask(5, 0, ExecutionTaskType::Execution) )); // Since (1, 0) is not EXECUTED, no validation tasks, and execution index // is already at the limit, so no tasks immediately available. @@ -473,11 +469,11 @@ fn scheduler_first_wave() { )); assert!(matches!( s.next_task(false), - SchedulerTask::ValidationTask((1, 0), 0) + SchedulerTask::ValidationTask(1, 0, 0) )); assert!(matches!( s.next_task(false), - SchedulerTask::ValidationTask((2, 0), 0) + SchedulerTask::ValidationTask(2, 0, 0) )); assert!(matches!(s.next_task(false), SchedulerTask::NoTask)); } @@ -490,7 +486,7 @@ fn scheduler_dependency() { // Nothing to validate. assert!(matches!( s.next_task(false), - SchedulerTask::ExecutionTask((j, 0), ExecutionTaskType::Execution) if j == i + SchedulerTask::ExecutionTask(j, 0, ExecutionTaskType::Execution) if j == i )); } @@ -503,7 +499,7 @@ fn scheduler_dependency() { // Now we can validate version (0, 0). assert!(matches!( s.next_task(false), - SchedulerTask::ValidationTask((0, 0), 0) + SchedulerTask::ValidationTask(0, 0, 0) )); // Current status of 0 is executed - hence, no dependency added. assert!(matches!( @@ -524,7 +520,7 @@ fn scheduler_dependency() { // resumed task doesn't bump incarnation assert!(matches!( s.next_task(false), - SchedulerTask::ExecutionTask((4, 0), ExecutionTaskType::Wakeup(_)) + SchedulerTask::ExecutionTask(4, 0, ExecutionTaskType::Wakeup(_)) )); } @@ -537,7 +533,7 @@ fn incarnation_one_scheduler(num_txns: TxnIndex) -> Scheduler { // Get the first executions out of the way. assert!(matches!( s.next_task(false), - SchedulerTask::ExecutionTask((j, 0), ExecutionTaskType::Execution) if j == i + SchedulerTask::ExecutionTask(j, 0, ExecutionTaskType::Execution) if j == i )); assert!(matches!( s.finish_execution(i, 0, false), @@ -545,12 +541,12 @@ fn incarnation_one_scheduler(num_txns: TxnIndex) -> Scheduler { )); assert!(matches!( s.next_task(false), - SchedulerTask::ValidationTask((j, 0), 0) if i == j + SchedulerTask::ValidationTask(j, 0, 0) if i == j )); assert!(s.try_abort(i, 0)); assert!(matches!( s.finish_abort(i, 0), - SchedulerTask::ExecutionTask((j, 1), ExecutionTaskType::Execution) if i == j + SchedulerTask::ExecutionTask(j, 1, ExecutionTaskType::Execution) if i == j )); } s @@ -575,7 +571,7 @@ fn scheduler_incarnation() { // here validation wave increases to 1, and index is reduced to 3. assert!(matches!( s.finish_execution(2, 1, true), - SchedulerTask::ValidationTask((2, 1), 1) + SchedulerTask::ValidationTask(2, 1, 1) )); // Here since validation index is lower, wave doesn't increase and no task returned. assert!(matches!( @@ -585,7 +581,7 @@ fn scheduler_incarnation() { assert!(matches!( s.next_task(false), - SchedulerTask::ValidationTask((4, 1), 1), + SchedulerTask::ValidationTask(4, 1, 1), )); assert!(s.try_abort(2, 1)); @@ -594,12 +590,12 @@ fn scheduler_incarnation() { assert!(matches!( s.finish_abort(2, 1), - SchedulerTask::ExecutionTask((2, 2), ExecutionTaskType::Execution) + SchedulerTask::ExecutionTask(2, 2, ExecutionTaskType::Execution) )); // wave = 2, validation index = 2. assert!(matches!( s.finish_execution(0, 1, false), - SchedulerTask::ValidationTask((0, 1), 2) + SchedulerTask::ValidationTask(0, 1, 2) )); // execution index = 1 @@ -607,29 +603,29 @@ fn scheduler_incarnation() { assert!(matches!( s.next_task(false), - SchedulerTask::ExecutionTask((1, 1), ExecutionTaskType::Wakeup(_)) + SchedulerTask::ExecutionTask(1, 1, ExecutionTaskType::Wakeup(_)) )); assert!(matches!( s.next_task(false), - SchedulerTask::ExecutionTask((3, 1), ExecutionTaskType::Wakeup(_)) + SchedulerTask::ExecutionTask(3, 1, ExecutionTaskType::Wakeup(_)) )); assert!(matches!( s.next_task(false), - SchedulerTask::ExecutionTask((4, 2), ExecutionTaskType::Execution) + SchedulerTask::ExecutionTask(4, 2, ExecutionTaskType::Execution) )); // execution index = 5 assert!(matches!( s.finish_execution(1, 1, false), - SchedulerTask::ValidationTask((1, 1), 2) + SchedulerTask::ValidationTask(1, 1, 2) )); assert!(matches!( s.finish_execution(2, 2, false), - SchedulerTask::ValidationTask((2, 2), 2) + SchedulerTask::ValidationTask(2, 2, 2) )); assert!(matches!( s.finish_execution(3, 1, false), - SchedulerTask::ValidationTask((3, 1), 2) + SchedulerTask::ValidationTask(3, 1, 2) )); // validation index is 4, so finish execution doesn't return validation task, next task does. @@ -639,7 +635,7 @@ fn scheduler_incarnation() { )); assert!(matches!( s.next_task(false), - SchedulerTask::ValidationTask((4, 2), 2) + SchedulerTask::ValidationTask(4, 2, 2) )); } @@ -651,7 +647,7 @@ fn scheduler_basic() { // Nothing to validate. assert!(matches!( s.next_task(false), - SchedulerTask::ExecutionTask((j, 0), ExecutionTaskType::Execution) if j == i + SchedulerTask::ExecutionTask(j, 0, ExecutionTaskType::Execution) if j == i )); } @@ -666,11 +662,11 @@ fn scheduler_basic() { )); assert!(matches!( s.next_task(false), - SchedulerTask::ValidationTask((0, 0), 0) + SchedulerTask::ValidationTask(0, 0, 0) )); assert!(matches!( s.next_task(false), - SchedulerTask::ValidationTask((1, 0), 0) + SchedulerTask::ValidationTask(1, 0, 0) )); assert!(matches!( s.finish_execution(2, 0, true), @@ -678,7 +674,7 @@ fn scheduler_basic() { )); assert!(matches!( s.next_task(false), - SchedulerTask::ValidationTask((2, 0), 0) + SchedulerTask::ValidationTask(2, 0, 0) )); for i in 0..3 { @@ -701,7 +697,7 @@ fn scheduler_drain_idx() { // Nothing to validate. assert!(matches!( s.next_task(false), - SchedulerTask::ExecutionTask((j, 0), ExecutionTaskType::Execution) if j == i + SchedulerTask::ExecutionTask(j, 0, ExecutionTaskType::Execution) if j == i )); } @@ -716,11 +712,11 @@ fn scheduler_drain_idx() { )); assert!(matches!( s.next_task(false), - SchedulerTask::ValidationTask((0, 0), 0) + SchedulerTask::ValidationTask(0, 0, 0) )); assert!(matches!( s.next_task(false), - SchedulerTask::ValidationTask((1, 0), 0) + SchedulerTask::ValidationTask(1, 0, 0) )); assert!(matches!( s.finish_execution(2, 0, true), @@ -728,7 +724,7 @@ fn scheduler_drain_idx() { )); assert!(matches!( s.next_task(false), - SchedulerTask::ValidationTask((2, 0), 0) + SchedulerTask::ValidationTask(2, 0, 0) )); for i in 0..3 { @@ -750,21 +746,21 @@ fn finish_execution_wave() { let s = incarnation_one_scheduler(2); assert!(matches!( s.finish_execution(1, 1, true), - SchedulerTask::ValidationTask((1, 1), 0), + SchedulerTask::ValidationTask(1, 1, 0), )); // Here wave will increase, because validation index is reduced from 3 to 2. let s = incarnation_one_scheduler(3); assert!(matches!( s.finish_execution(1, 1, true), - SchedulerTask::ValidationTask((1, 1), 1), + SchedulerTask::ValidationTask(1, 1, 1), )); // Here wave won't be increased, because we pass revalidate_suffix = false. let s = incarnation_one_scheduler(3); assert!(matches!( s.finish_execution(1, 1, false), - SchedulerTask::ValidationTask((1, 1), 0), + SchedulerTask::ValidationTask(1, 1, 0), )); } @@ -776,7 +772,7 @@ fn rolling_commit_wave() { // validation index is higher will return validation task to the caller. assert!(matches!( s.finish_execution(0, 1, false), - SchedulerTask::ValidationTask((0, 1), 0) + SchedulerTask::ValidationTask(0, 1, 0) )); // finish validating txn 0 with proper wave s.finish_validation(0, 1); @@ -788,7 +784,7 @@ fn rolling_commit_wave() { // sets validation_index to 2. assert!(matches!( s.finish_execution(1, 1, true), - SchedulerTask::ValidationTask((1, 1), 1), + SchedulerTask::ValidationTask(1, 1, 1), )); // finish validating txn 1 with lower wave @@ -849,12 +845,12 @@ fn no_conflict_task_count() { loop { while tasks.len() < num_concurrent_tasks { match s.next_task(false) { - SchedulerTask::ExecutionTask((txn_idx, incarnation), _) => { + SchedulerTask::ExecutionTask(txn_idx, incarnation, _) => { assert_eq!(incarnation, 0); // true means an execution task. tasks.insert(rng.gen::(), (true, txn_idx)); }, - SchedulerTask::ValidationTask((txn_idx, incarnation), cur_wave) => { + SchedulerTask::ValidationTask(txn_idx, incarnation, cur_wave) => { assert_eq!(incarnation, 0); assert_eq!(cur_wave, 0); // false means a validation task. @@ -879,7 +875,7 @@ fn no_conflict_task_count() { num_exec_tasks += 1; // Process a task that may have been returned. - if let SchedulerTask::ValidationTask((idx, incarnation), wave) = task_res { + if let SchedulerTask::ValidationTask(idx, incarnation, wave) = task_res { assert_eq!(idx, txn_idx); assert_eq!(incarnation, 0); assert_eq!(wave, 0); diff --git a/aptos-move/block-executor/src/view.rs b/aptos-move/block-executor/src/view.rs index 9ab6f3abdfb85..19795b51d33dd 100644 --- a/aptos-move/block-executor/src/view.rs +++ b/aptos-move/block-executor/src/view.rs @@ -7,8 +7,10 @@ use crate::{ task::Transaction, txn_last_input_output::ReadDescriptor, }; -use anyhow::Result; -use aptos_aggregator::delta_change_set::serialize; +use aptos_aggregator::{ + delta_change_set::serialize, + resolver::{AggregatorReadMode, TAggregatorView}, +}; use aptos_logger::error; use aptos_mvhashmap::{ types::{MVDataError, MVDataOutput, MVModulesError, MVModulesOutput, TxnIndex}, @@ -19,10 +21,14 @@ use aptos_state_view::{StateViewId, TStateView}; use aptos_types::{ executable::{Executable, ModulePath}, state_store::{state_storage_usage::StateStorageUsage, state_value::StateValue}, - vm_status::{StatusCode, VMStatus}, write_set::TransactionWrite, }; use aptos_vm_logging::{log_schema::AdapterLogSchema, prelude::*}; +use aptos_vm_types::resolver::{StateStorageView, TModuleView, TResourceView}; +use move_core_types::{ + value::MoveTypeLayout, + vm_status::{StatusCode, VMStatus}, +}; use std::{ cell::RefCell, fmt::Debug, @@ -37,16 +43,17 @@ pub(crate) enum ReadResult { Value(Arc), // Similar to above, but the value was aggregated and is an integer. U128(u128), - // Read could not resolve the delta (no base value). - Unresolved, - // Parallel execution halts. - ExecutionHalted, // Read did not return anything. - None, + Uninitialized, + // Must half the execution of the calling transaction. This might be because + // there was an inconsistency in observed speculative state, or dependency + // waiting indicated that the parallel execution had been halted. The String + // parameter provides more context (error description / message). + HaltSpeculativeExecution(String), } pub(crate) struct ParallelState<'a, T: Transaction, X: Executable> { - versioned_map: &'a MVHashMap, + versioned_map: &'a MVHashMap, scheduler: &'a Scheduler, _counter: &'a AtomicU32, captured_reads: RefCell>>, @@ -54,7 +61,7 @@ pub(crate) struct ParallelState<'a, T: Transaction, X: Executable> { impl<'a, T: Transaction, X: Executable> ParallelState<'a, T, X> { pub(crate) fn new( - shared_map: &'a MVHashMap, + shared_map: &'a MVHashMap, shared_scheduler: &'a Scheduler, shared_counter: &'a AtomicU32, ) -> Self { @@ -77,11 +84,7 @@ impl<'a, T: Transaction, X: Executable> ParallelState<'a, T, X> { .borrow_mut() .push(ReadDescriptor::from_module(key.clone())); - self.versioned_map.fetch_module(key, txn_idx) - } - - fn set_aggregator_base_value(&self, key: &T::Key, value: u128) { - self.versioned_map.set_aggregator_base_value(key, value); + self.versioned_map.modules().fetch_module(key, txn_idx) } /// Captures a read from the VM execution, but not unresolved deltas, as in this case it is the @@ -91,12 +94,11 @@ impl<'a, T: Transaction, X: Executable> ParallelState<'a, T, X> { use MVDataOutput::*; loop { - match self.versioned_map.fetch_data(key, txn_idx) { + match self.versioned_map.data().fetch_data(key, txn_idx) { Ok(Versioned(version, v)) => { - let (idx, incarnation) = version; self.captured_reads .borrow_mut() - .push(ReadDescriptor::from_version(key.clone(), idx, incarnation)); + .push(ReadDescriptor::from_versioned(key.clone(), version)); return ReadResult::Value(v); }, Ok(Resolved(value)) => { @@ -105,13 +107,13 @@ impl<'a, T: Transaction, X: Executable> ParallelState<'a, T, X> { .push(ReadDescriptor::from_resolved(key.clone(), value)); return ReadResult::U128(value); }, - Err(NotFound) => { - self.captured_reads - .borrow_mut() - .push(ReadDescriptor::from_storage(key.clone())); - return ReadResult::None; + Err(Uninitialized) | Err(Unresolved(_)) => { + // The underlying assumption here for not recording anything about the read is + // that the caller is expected to initialize the contents and serve the reads + // solely via the 'fetch_read' interface. Thus, the later, successful read, + // will make the needed recordings. + return ReadResult::Uninitialized; }, - Err(Unresolved(_)) => return ReadResult::Unresolved, Err(Dependency(dep_idx)) => { // `self.txn_idx` estimated to depend on a write from `dep_idx`. match self.scheduler.wait_for_dependency(txn_idx, dep_idx) { @@ -137,11 +139,15 @@ impl<'a, T: Transaction, X: Executable> ParallelState<'a, T, X> { dep_resolved = cvar.wait(dep_resolved).unwrap(); } if let DependencyStatus::ExecutionHalted = *dep_resolved { - return ReadResult::ExecutionHalted; + return ReadResult::HaltSpeculativeExecution( + "Speculative error to halt BlockSTM early.".to_string(), + ); } }, DependencyResult::ExecutionHalted => { - return ReadResult::ExecutionHalted; + return ReadResult::HaltSpeculativeExecution( + "Speculative error to halt BlockSTM early.".to_string(), + ); }, DependencyResult::Resolved => continue, } @@ -150,10 +156,14 @@ impl<'a, T: Transaction, X: Executable> ParallelState<'a, T, X> { // Delta application failure currently should never happen. Here, we assume it // happened because of speculation and return 0 to the Move-VM. Validation will // ensure the transaction re-executes if 0 wasn't the right number. + self.captured_reads .borrow_mut() - .push(ReadDescriptor::from_delta_application_failure(key.clone())); - return ReadResult::U128(0); + .push(ReadDescriptor::from_speculative_failure(key.clone())); + + return ReadResult::HaltSpeculativeExecution( + "Delta application failure (must be speculative)".to_string(), + ); }, }; } @@ -221,63 +231,96 @@ impl<'a, T: Transaction, S: TStateView, X: Executable> LatestView< } } -impl<'a, T: Transaction, S: TStateView, X: Executable> TStateView +impl<'a, T: Transaction, S: TStateView, X: Executable> TResourceView for LatestView<'a, T, S, X> { type Key = T::Key; + type Layout = MoveTypeLayout; + + fn get_resource_state_value( + &self, + state_key: &Self::Key, + _maybe_layout: Option<&Self::Layout>, + ) -> anyhow::Result> { + debug_assert!( + state_key.module_path().is_none(), + "Reading a module {:?} using ResourceView", + state_key, + ); - fn get_state_value(&self, state_key: &T::Key) -> anyhow::Result> { match &self.latest_view { - ViewState::Sync(state) => match state_key.module_path() { - Some(_) => { - use MVModulesError::*; - use MVModulesOutput::*; - - match state.fetch_module(state_key, self.txn_idx) { - Ok(Executable(_)) => unreachable!("Versioned executable not implemented"), - Ok(Module((v, _))) => Ok(v.as_state_value()), - Err(Dependency(_)) => { - // Return anything (e.g. module does not exist) to avoid waiting, - // because parallel execution will fall back to sequential anyway. - Ok(None) - }, - Err(NotFound) => self.base_view.get_state_value(state_key), - } - }, - None => { - let mut mv_value = state.fetch_data(state_key, self.txn_idx); + ViewState::Sync(state) => { + let mut mv_value = state.fetch_data(state_key, self.txn_idx); - if matches!(mv_value, ReadResult::Unresolved) { - let from_storage = self - .base_view - .get_state_value_u128(state_key)? - .ok_or(VMStatus::error(StatusCode::STORAGE_ERROR, None))?; + if matches!(mv_value, ReadResult::Uninitialized) { + let from_storage = self.base_view.get_state_value(state_key)?; - // Store base value in the versioned data-structure directly, so subsequent - // reads can be resolved to U128 directly without storage calls. - state.set_aggregator_base_value(state_key, from_storage); + // This base value can also be used to resolve AggregatorV1 directly from + // the versioned data-structure (without more storage calls). + state.versioned_map.data().provide_base_value( + state_key.clone(), + TransactionWrite::from_state_value(from_storage), + ); - mv_value = state.fetch_data(state_key, self.txn_idx); - } + mv_value = state.fetch_data(state_key, self.txn_idx); + } - match mv_value { - ReadResult::Value(v) => Ok(v.as_state_value()), - ReadResult::U128(v) => Ok(Some(StateValue::new_legacy(serialize(&v)))), - // ExecutionHalted indicates that the parallel execution is halted. - // The read should return immediately and log the error. - // For now we use STORAGE_ERROR as the VM will not log the speculative eror, - // so no actual error will be logged once the execution is halted and - // the speculative logging is flushed. - ReadResult::ExecutionHalted => Err(anyhow::Error::new(VMStatus::error( - StatusCode::STORAGE_ERROR, - Some("Speculative error to halt BlockSTM early.".to_string()), - ))), - ReadResult::None => self.get_base_value(state_key), - ReadResult::Unresolved => unreachable!( - "Must be resolved as base value is recorded in the MV data structure" - ), - } + match mv_value { + ReadResult::Value(v) => Ok(v.as_state_value()), + ReadResult::U128(v) => Ok(Some(StateValue::new_legacy(serialize(&v).into()))), + // ExecutionHalted indicates that the parallel execution is halted. + // The read should return immediately and log the error. + // For now we use STORAGE_ERROR as the VM will not log the speculative eror, + // so no actual error will be logged once the execution is halted and + // the speculative logging is flushed. + ReadResult::HaltSpeculativeExecution(msg) => Err(anyhow::Error::new( + VMStatus::error(StatusCode::STORAGE_ERROR, Some(msg)), + )), + ReadResult::Uninitialized => { + unreachable!("base value must already be recorded in the MV data structure") + }, + } + }, + ViewState::Unsync(state) => state.unsync_map.fetch_data(state_key).map_or_else( + || { + // TODO: AggregatorV2 ID for sequential must be replaced in this flow. + self.get_base_value(state_key) }, + |v| Ok(v.as_state_value()), + ), + } + } + + // TODO: implement here fn get_resource_state_value_metadata & resource_exists. +} + +impl<'a, T: Transaction, S: TStateView, X: Executable> TModuleView + for LatestView<'a, T, S, X> +{ + type Key = T::Key; + + fn get_module_state_value(&self, state_key: &Self::Key) -> anyhow::Result> { + debug_assert!( + state_key.module_path().is_some(), + "Reading a resource {:?} using ModuleView", + state_key, + ); + + match &self.latest_view { + ViewState::Sync(state) => { + use MVModulesError::*; + use MVModulesOutput::*; + + match state.fetch_module(state_key, self.txn_idx) { + Ok(Executable(_)) => unreachable!("Versioned executable not implemented"), + Ok(Module((v, _))) => Ok(v.as_state_value()), + Err(Dependency(_)) => { + // Return anything (e.g. module does not exist) to avoid waiting, + // because parallel execution will fall back to sequential anyway. + Ok(None) + }, + Err(NotFound) => self.base_view.get_state_value(state_key), + } }, ViewState::Unsync(state) => state.unsync_map.fetch_data(state_key).map_or_else( || self.get_base_value(state_key), @@ -285,12 +328,32 @@ impl<'a, T: Transaction, S: TStateView, X: Executable> TStateView ), } } +} +impl<'a, T: Transaction, S: TStateView, X: Executable> StateStorageView + for LatestView<'a, T, S, X> +{ fn id(&self) -> StateViewId { self.base_view.id() } - fn get_usage(&self) -> Result { + fn get_usage(&self) -> anyhow::Result { self.base_view.get_usage() } } + +impl<'a, T: Transaction, S: TStateView, X: Executable> TAggregatorView + for LatestView<'a, T, S, X> +{ + type IdentifierV1 = T::Key; + type IdentifierV2 = T::Identifier; + + fn get_aggregator_v1_state_value( + &self, + state_key: &Self::IdentifierV1, + _mode: AggregatorReadMode, + ) -> anyhow::Result> { + // TODO: Integrate aggregators. + self.get_resource_state_value(state_key, None) + } +} diff --git a/aptos-move/e2e-move-tests/Cargo.toml b/aptos-move/e2e-move-tests/Cargo.toml index 36eebf6e94bfe..371c3f9945c8b 100644 --- a/aptos-move/e2e-move-tests/Cargo.toml +++ b/aptos-move/e2e-move-tests/Cargo.toml @@ -30,7 +30,6 @@ aptos-state-view = { workspace = true } aptos-types = { workspace = true } aptos-vm = { workspace = true, features = ["testing"] } aptos-vm-genesis = { workspace = true } -aptos-writeset-generator = { workspace = true } bcs = { workspace = true } hex = { workspace = true } itertools = { workspace = true } diff --git a/aptos-move/e2e-move-tests/src/aggregator.rs b/aptos-move/e2e-move-tests/src/aggregator.rs index c58f839e3174e..d1b4fb8c736c8 100644 --- a/aptos-move/e2e-move-tests/src/aggregator.rs +++ b/aptos-move/e2e-move-tests/src/aggregator.rs @@ -12,7 +12,7 @@ pub fn initialize(path: PathBuf) -> (MoveHarness, Account) { let mut harness = MoveHarness::new_with_executor(executor); let account = harness.new_account_at(AccountAddress::ONE); - assert_success!(harness.publish_package(&account, &path)); + assert_success!(harness.publish_package_cache_building(&account, &path)); assert_success!(harness.run_entry_function( &account, str::parse("0x1::aggregator_test::initialize").unwrap(), diff --git a/aptos-move/e2e-move-tests/src/aggregator_v2.rs b/aptos-move/e2e-move-tests/src/aggregator_v2.rs index b5cfbef24a667..1a416c652b190 100644 --- a/aptos-move/e2e-move-tests/src/aggregator_v2.rs +++ b/aptos-move/e2e-move-tests/src/aggregator_v2.rs @@ -15,7 +15,7 @@ pub fn initialize(path: PathBuf) -> (MoveHarness, Account) { let mut harness = MoveHarness::new_with_executor(executor); harness.enable_features(vec![FeatureFlag::AGGREGATOR_SNAPSHOTS], vec![]); let account = harness.new_account_at(AccountAddress::ONE); - assert_success!(harness.publish_package(&account, &path)); + assert_success!(harness.publish_package_cache_building(&account, &path)); (harness, account) } diff --git a/aptos-move/e2e-move-tests/src/harness.rs b/aptos-move/e2e-move-tests/src/harness.rs index 4298c60b9f616..bdb10ac6abec5 100644 --- a/aptos-move/e2e-move-tests/src/harness.rs +++ b/aptos-move/e2e-move-tests/src/harness.rs @@ -37,16 +37,24 @@ use move_core_types::{ value::MoveValue, }; use move_package::package_hooks::register_package_hooks; +use once_cell::sync::Lazy; use project_root::get_project_root; use rand::{ rngs::{OsRng, StdRng}, Rng, SeedableRng, }; use serde::{de::DeserializeOwned, Serialize}; -use std::{collections::BTreeMap, path::Path}; +use std::{ + collections::{BTreeMap, HashMap}, + path::{Path, PathBuf}, + sync::{Arc, Mutex}, +}; const DEFAULT_GAS_UNIT_PRICE: u64 = 100; +static CACHED_BUILT_PACKAGES: Lazy>>>> = + Lazy::new(|| Mutex::new(HashMap::new())); + /// A simple test harness for defining Move e2e tests. /// /// Tests defined via this harness typically live in the `/tests` directory, the standard @@ -330,19 +338,16 @@ impl MoveHarness { output.gas_used() } - /// Creates a transaction which publishes the Move Package found at the given path on behalf + /// Creates a transaction which publishes the passed already-built Move Package on behalf /// of the given account. /// /// The passed function allows to manipulate the generated metadata for testing purposes. - pub fn create_publish_package( + pub fn create_publish_built_package( &mut self, account: &Account, - path: &Path, - options: Option, + package: &BuiltPackage, mut patch_metadata: impl FnMut(&mut PackageMetadata), ) -> SignedTransaction { - let package = BuiltPackage::build(path.to_owned(), options.unwrap_or_default()) - .expect("building package must succeed"); let code = package.extract_code(); let mut metadata = package .extract_metadata() @@ -357,6 +362,55 @@ impl MoveHarness { ) } + /// Creates a transaction which publishes the Move Package found at the given path on behalf + /// of the given account. + /// + /// The passed function allows to manipulate the generated metadata for testing purposes. + pub fn create_publish_package( + &mut self, + account: &Account, + path: &Path, + options: Option, + patch_metadata: impl FnMut(&mut PackageMetadata), + ) -> SignedTransaction { + let package = BuiltPackage::build(path.to_owned(), options.unwrap_or_default()) + .expect("building package must succeed"); + self.create_publish_built_package(account, &package, patch_metadata) + } + + pub fn create_publish_package_cache_building( + &mut self, + account: &Account, + path: &Path, + patch_metadata: impl FnMut(&mut PackageMetadata), + ) -> SignedTransaction { + let package_arc = { + let mut cache = CACHED_BUILT_PACKAGES.lock().unwrap(); + + Arc::clone(cache.entry(path.to_owned()).or_insert_with(|| { + Arc::new(BuiltPackage::build( + path.to_owned(), + BuildOptions::default(), + )) + })) + }; + let package_ref = package_arc + .as_ref() + .as_ref() + .expect("building package must succeed"); + self.create_publish_built_package(account, package_ref, patch_metadata) + } + + /// Runs transaction which publishes the Move Package. + pub fn publish_package_cache_building( + &mut self, + account: &Account, + path: &Path, + ) -> TransactionStatus { + let txn = self.create_publish_package_cache_building(account, path, |_| {}); + self.run(txn) + } + /// Runs transaction which publishes the Move Package. pub fn publish_package(&mut self, account: &Account, path: &Path) -> TransactionStatus { let txn = self.create_publish_package(account, path, None, |_| {}); @@ -451,7 +505,8 @@ impl MoveHarness { } pub fn read_state_value_bytes(&self, state_key: &StateKey) -> Option> { - self.read_state_value(state_key).map(StateValue::into_bytes) + self.read_state_value(state_key) + .map(|val| val.bytes().to_vec()) } /// Reads the raw, serialized data of a resource. @@ -679,16 +734,37 @@ impl MoveHarness { /// Helper to assert transaction is successful #[macro_export] macro_rules! assert_success { - ($s:expr) => {{ - use aptos_types::transaction::*; - assert_eq!($s, TransactionStatus::Keep(ExecutionStatus::Success)) + ($s:expr $(,)?) => {{ + assert_eq!($s, aptos_types::transaction::TransactionStatus::Keep( + aptos_types::transaction::ExecutionStatus::Success)) + }}; + ($s:expr, $($arg:tt)+) => {{ + assert_eq!( + $s, + aptos_types::transaction::TransactionStatus::Keep( + aptos_types::transaction::ExecutionStatus::Success), + $($arg)+ + ) }}; } /// Helper to assert transaction aborts. #[macro_export] macro_rules! assert_abort { - ($s:expr, $c:pat) => {{ + // identity needs to be before pattern (both with and without message), + // as if we pass variable - it matches the pattern arm, but value is not used, but overriden. + // Opposite order and test_asserts_variable_used / test_asserts_variable_used_with_message tests + // would fail + ($s:expr, $c:ident $(,)?) => {{ + assert!(matches!( + $s, + aptos_types::transaction::TransactionStatus::Keep( + aptos_types::transaction::ExecutionStatus::MoveAbort { code, .. } + ) + if code == $c, + )); + }}; + ($s:expr, $c:pat $(,)?) => {{ assert!(matches!( $s, aptos_types::transaction::TransactionStatus::Keep( @@ -696,23 +772,54 @@ macro_rules! assert_abort { ), )); }}; + ($s:expr, $c:ident, $($arg:tt)+) => {{ + assert!( + matches!( + $s, + aptos_types::transaction::TransactionStatus::Keep( + aptos_types::transaction::ExecutionStatus::MoveAbort { code, .. } + ) + if code == $c, + ), + $($arg)+ + ); + }}; + ($s:expr, $c:pat, $($arg:tt)+) => {{ + assert!( + matches!( + $s, + aptos_types::transaction::TransactionStatus::Keep( + aptos_types::transaction::ExecutionStatus::MoveAbort { code: $c, .. } + ), + ), + $($arg)+ + ); + }}; } /// Helper to assert vm status code. #[macro_export] macro_rules! assert_vm_status { - ($s:expr, $c:expr) => {{ + ($s:expr, $c:expr $(,)?) => {{ use aptos_types::transaction::*; assert_eq!( $s, TransactionStatus::Keep(ExecutionStatus::MiscellaneousError(Some($c))) ); }}; + ($s:expr, $c:expr, $($arg:tt)+) => {{ + use aptos_types::transaction::*; + assert_eq!( + $s, + TransactionStatus::Keep(ExecutionStatus::MiscellaneousError(Some($c))), + $($arg)+, + ); + }}; } #[macro_export] macro_rules! assert_move_abort { - ($s:expr, $c:ident) => {{ + ($s:expr, $c:ident $(,)?) => {{ use aptos_types::transaction::*; assert!(match $s { TransactionStatus::Keep(ExecutionStatus::MoveAbort { @@ -723,4 +830,81 @@ macro_rules! assert_move_abort { _ => false, }); }}; + ($s:expr, $c:ident, $($arg:tt)+) => {{ + use aptos_types::transaction::*; + assert!( + match $s { + TransactionStatus::Keep(ExecutionStatus::MoveAbort { + location: _, + code: _, + info, + }) => info == $c, + _ => false, + }, + $($arg)+ + ); + }}; +} + +#[cfg(test)] +mod tests { + use aptos_types::transaction::{ExecutionStatus, TransactionStatus}; + use move_core_types::vm_status::AbortLocation; + + #[test] + fn test_asserts() { + let success = TransactionStatus::Keep(ExecutionStatus::Success); + + let abort_13 = TransactionStatus::Keep(ExecutionStatus::MoveAbort { + code: 13, + location: AbortLocation::Script, + info: None, + }); + + assert_success!(success); + assert_success!(success,); + assert_success!(success, "success"); + assert_success!(success, "message {}", 0); + assert_success!(success, "message {}", 0,); + + let x = 13; + assert_abort!(abort_13, 13); + assert_abort!(abort_13, 13,); + assert_abort!(abort_13, x); + assert_abort!(abort_13, _); + assert_abort!(abort_13, 13 | 14); + assert_abort!(abort_13, 13, "abort"); + assert_abort!(abort_13, 13, "abort {}", 0); + assert_abort!(abort_13, x, "abort"); + assert_abort!(abort_13, x, "abort {}", 0); + assert_abort!(abort_13, _, "abort"); + assert_abort!(abort_13, 13 | 14, "abort"); + assert_abort!(abort_13, 13 | 14, "abort",); + } + + #[test] + #[should_panic] + fn test_asserts_variable_used() { + let abort_13 = TransactionStatus::Keep(ExecutionStatus::MoveAbort { + code: 13, + location: AbortLocation::Script, + info: None, + }); + + let x = 14; + assert_abort!(abort_13, x); + } + + #[test] + #[should_panic] + fn test_asserts_variable_used_with_message() { + let abort_13 = TransactionStatus::Keep(ExecutionStatus::MoveAbort { + code: 13, + location: AbortLocation::Script, + info: None, + }); + + let x = 14; + assert_abort!(abort_13, x, "abort"); + } } diff --git a/aptos-move/e2e-move-tests/src/tests/chain_id.rs b/aptos-move/e2e-move-tests/src/tests/chain_id.rs index 3c3cb526ec15d..de7a042ac4b3c 100644 --- a/aptos-move/e2e-move-tests/src/tests/chain_id.rs +++ b/aptos-move/e2e-move-tests/src/tests/chain_id.rs @@ -56,7 +56,7 @@ fn setup(harness: &mut MoveHarness) -> Account { let account = harness.new_account_at(AccountAddress::ONE); - assert_success!(harness.publish_package(&account, &path)); + assert_success!(harness.publish_package_cache_building(&account, &path)); account } diff --git a/aptos-move/e2e-move-tests/src/tests/code_publishing.rs b/aptos-move/e2e-move-tests/src/tests/code_publishing.rs index 12b6585cbdeac..2e3eab08c42a2 100644 --- a/aptos-move/e2e-move-tests/src/tests/code_publishing.rs +++ b/aptos-move/e2e-move-tests/src/tests/code_publishing.rs @@ -38,7 +38,7 @@ struct StateWithCoins { fn code_publishing_basic(enabled: Vec, disabled: Vec) { let mut h = MoveHarness::new_with_features(enabled, disabled); let acc = h.new_account_at(AccountAddress::from_hex_literal("0xcafe").unwrap()); - assert_success!(h.publish_package( + assert_success!(h.publish_package_cache_building( &acc, &common::test_dir_path("code_publishing.data/pack_initial"), )); @@ -77,13 +77,13 @@ fn code_publishing_upgrade_success_compat() { let acc = h.new_account_at(AccountAddress::from_hex_literal("0xcafe").unwrap()); // Install the initial version with compat requirements - assert_success!(h.publish_package( + assert_success!(h.publish_package_cache_building( &acc, &common::test_dir_path("code_publishing.data/pack_initial"), )); // We should be able to upgrade it with the compatible version - assert_success!(h.publish_package( + assert_success!(h.publish_package_cache_building( &acc, &common::test_dir_path("code_publishing.data/pack_upgrade_compat"), )); @@ -95,13 +95,13 @@ fn code_publishing_upgrade_fail_compat() { let acc = h.new_account_at(AccountAddress::from_hex_literal("0xcafe").unwrap()); // Install the initial version with compat requirements - assert_success!(h.publish_package( + assert_success!(h.publish_package_cache_building( &acc, &common::test_dir_path("code_publishing.data/pack_initial"), )); // We should not be able to upgrade it with the incompatible version - let status = h.publish_package( + let status = h.publish_package_cache_building( &acc, &common::test_dir_path("code_publishing.data/pack_upgrade_incompat"), ); @@ -114,13 +114,13 @@ fn code_publishing_upgrade_fail_immutable() { let acc = h.new_account_at(AccountAddress::from_hex_literal("0xcafe").unwrap()); // Install the initial version with immutable requirements - assert_success!(h.publish_package( + assert_success!(h.publish_package_cache_building( &acc, &common::test_dir_path("code_publishing.data/pack_initial_immutable"), )); // We should not be able to upgrade it with the compatible version - let status = h.publish_package( + let status = h.publish_package_cache_building( &acc, &common::test_dir_path("code_publishing.data/pack_upgrade_compat"), ); @@ -133,13 +133,13 @@ fn code_publishing_upgrade_fail_overlapping_module() { let acc = h.new_account_at(AccountAddress::from_hex_literal("0xcafe").unwrap()); // Install the initial version - assert_success!(h.publish_package( + assert_success!(h.publish_package_cache_building( &acc, &common::test_dir_path("code_publishing.data/pack_initial"), )); // Install a different package with the same module. - let status = h.publish_package( + let status = h.publish_package_cache_building( &acc, &common::test_dir_path("code_publishing.data/pack_other_name"), ); @@ -160,26 +160,23 @@ fn code_publishing_upgrade_loader_cache_consistency() { // Create a sequence of package upgrades let txns = vec![ - h.create_publish_package( + h.create_publish_package_cache_building( &acc, &common::test_dir_path("code_publishing.data/pack_initial"), - None, |_| {}, ), // Compatible with above package - h.create_publish_package( + h.create_publish_package_cache_building( &acc, &common::test_dir_path("code_publishing.data/pack_upgrade_compat"), - None, |_| {}, ), // Not compatible with above package, but with first one. // Correct behavior: should create backward_incompatible error // Bug behavior: succeeds because is compared with the first module - h.create_publish_package( + h.create_publish_package_cache_building( &acc, &common::test_dir_path("code_publishing.data/pack_compat_first_not_second"), - None, |_| {}, ), ]; @@ -196,7 +193,7 @@ fn code_publishing_framework_upgrade() { // We should be able to upgrade move-stdlib, as our local package has only // compatible changes. (We added a new function to string.move.) - assert_success!(h.publish_package( + assert_success!(h.publish_package_cache_building( &acc, &common::test_dir_path("code_publishing.data/pack_stdlib"), )); @@ -209,7 +206,7 @@ fn code_publishing_framework_upgrade_fail() { // We should not be able to upgrade move-stdlib because we removed a function // from the string module. - let result = h.publish_package( + let result = h.publish_package_cache_building( &acc, &common::test_dir_path("code_publishing.data/pack_stdlib_incompat"), ); @@ -267,16 +264,14 @@ fn code_publishing_with_two_attempts_and_verify_loader_is_invalidated() { // // Depending on how the loader cache is flushed, the second attempt might even fail if the // entire init_module from the first attempt still lingers around and will fail if invoked. - let failed_module_publish = h.create_publish_package( + let failed_module_publish = h.create_publish_package_cache_building( &acc, &common::test_dir_path("code_publishing.data/pack_init_module_failed"), - None, |_| {}, ); - let module_publish_second_attempt = h.create_publish_package( + let module_publish_second_attempt = h.create_publish_package_cache_building( &acc, &common::test_dir_path("code_publishing.data/pack_init_module_second_attempt"), - None, |_| {}, ); let results = h.run_block(vec![failed_module_publish, module_publish_second_attempt]); diff --git a/aptos-move/e2e-move-tests/src/tests/constructor_args.rs b/aptos-move/e2e-move-tests/src/tests/constructor_args.rs index f4fa91804d09d..2ed937038e709 100644 --- a/aptos-move/e2e-move-tests/src/tests/constructor_args.rs +++ b/aptos-move/e2e-move-tests/src/tests/constructor_args.rs @@ -41,7 +41,10 @@ fn success_generic( // Load the code let acc = h.new_account_at(AccountAddress::from_hex_literal("0xcafe").unwrap()); - assert_success!(h.publish_package(&acc, &common::test_dir_path("constructor_args.data/pack"))); + assert_success!(h.publish_package_cache_building( + &acc, + &common::test_dir_path("constructor_args.data/pack") + )); // Check in initial state, resource does not exist. assert!(!h.exists_resource(acc.address(), module_data())); @@ -72,7 +75,10 @@ fn success_generic_view( ) { // Load the code let acc = h.new_account_at(AccountAddress::from_hex_literal("0xcafe").unwrap()); - assert_success!(h.publish_package(&acc, &common::test_dir_path("constructor_args.data/pack"))); + assert_success!(h.publish_package_cache_building( + &acc, + &common::test_dir_path("constructor_args.data/pack") + )); // Check in initial state, resource does not exist. assert!(!h.exists_resource(acc.address(), module_data())); @@ -97,7 +103,10 @@ fn fail_generic(ty_args: Vec, tests: Vec<(&str, Vec>, Closure)> // Load the code let acc = h.new_account_at(AccountAddress::from_hex_literal("0xcafe").unwrap()); - assert_success!(h.publish_package(&acc, &common::test_dir_path("constructor_args.data/pack"))); + assert_success!(h.publish_package_cache_building( + &acc, + &common::test_dir_path("constructor_args.data/pack") + )); // Check in initial state, resource does not exist. assert!(!h.exists_resource(acc.address(), module_data())); diff --git a/aptos-move/e2e-move-tests/src/tests/fee_payer.rs b/aptos-move/e2e-move-tests/src/tests/fee_payer.rs index 6dbca4d79b3c3..8608e0ed45648 100644 --- a/aptos-move/e2e-move-tests/src/tests/fee_payer.rs +++ b/aptos-move/e2e-move-tests/src/tests/fee_payer.rs @@ -224,7 +224,9 @@ fn test_normal_tx_with_signer_with_fee_payer() { // Load the code let acc = h.new_account_at(AccountAddress::from_hex_literal("0xcafe").unwrap()); - assert_success!(h.publish_package(&acc, &common::test_dir_path("string_args.data/pack"))); + assert_success!( + h.publish_package_cache_building(&acc, &common::test_dir_path("string_args.data/pack")) + ); let fun: MemberId = str::parse("0xcafe::test::hi").unwrap(); let entry = EntryFunction::new(fun.module_id, fun.member_id, vec![], vec![bcs::to_bytes( @@ -264,7 +266,9 @@ fn test_normal_tx_without_signer_with_fee_payer() { // Load the code let acc = h.new_account_at(AccountAddress::from_hex_literal("0xcafe").unwrap()); - assert_success!(h.publish_package(&acc, &common::test_dir_path("string_args.data/pack"))); + assert_success!( + h.publish_package_cache_building(&acc, &common::test_dir_path("string_args.data/pack")) + ); let fun: MemberId = str::parse("0xcafe::test::nothing").unwrap(); let entry = EntryFunction::new(fun.module_id, fun.member_id, vec![], vec![]); @@ -298,7 +302,9 @@ fn test_normal_tx_with_fee_payer_insufficient_funds() { // Load the code let acc = h.new_account_at(AccountAddress::from_hex_literal("0xcafe").unwrap()); - assert_success!(h.publish_package(&acc, &common::test_dir_path("string_args.data/pack"))); + assert_success!( + h.publish_package_cache_building(&acc, &common::test_dir_path("string_args.data/pack")) + ); let fun: MemberId = str::parse("0xcafe::test::nothing").unwrap(); let entry = EntryFunction::new(fun.module_id, fun.member_id, vec![], vec![]); diff --git a/aptos-move/e2e-move-tests/src/tests/infinite_loop.rs b/aptos-move/e2e-move-tests/src/tests/infinite_loop.rs index e79f373e06703..859eb10767e4f 100644 --- a/aptos-move/e2e-move-tests/src/tests/infinite_loop.rs +++ b/aptos-move/e2e-move-tests/src/tests/infinite_loop.rs @@ -17,7 +17,7 @@ fn empty_while_loop() { // Load the code let acc = h.new_account_at(AccountAddress::from_hex_literal("0xbeef").unwrap()); - assert_success!(h.publish_package( + assert_success!(h.publish_package_cache_building( &acc, &common::test_dir_path("infinite_loop.data/empty_loop"), )); diff --git a/aptos-move/e2e-move-tests/src/tests/init_module.rs b/aptos-move/e2e-move-tests/src/tests/init_module.rs index 3cd8edd3b7923..95ce1f1b06271 100644 --- a/aptos-move/e2e-move-tests/src/tests/init_module.rs +++ b/aptos-move/e2e-move-tests/src/tests/init_module.rs @@ -19,7 +19,9 @@ fn init_module() { // Load the code let acc = h.aptos_framework_account(); - assert_success!(h.publish_package(&acc, &common::test_dir_path("init_module.data/pack"))); + assert_success!( + h.publish_package_cache_building(&acc, &common::test_dir_path("init_module.data/pack")) + ); // Verify that init_module was called. let module_data = parse_struct_tag("0x1::test::ModuleData").unwrap(); @@ -32,7 +34,9 @@ fn init_module() { // Republish to show that init_module is not called again. If init_module would be called again, // we would get an abort here because the first time, it used move_to for initialization. - assert_success!(h.publish_package(&acc, &common::test_dir_path("init_module.data/pack"))); + assert_success!( + h.publish_package_cache_building(&acc, &common::test_dir_path("init_module.data/pack")) + ); assert_eq!( h.read_resource::(acc.address(), module_data) .unwrap() @@ -47,13 +51,15 @@ fn init_module_when_republishing_package() { // Deploy a package that initially does not have the module that has the init_module function. let acc = h.aptos_framework_account(); - assert_success!(h.publish_package( + assert_success!(h.publish_package_cache_building( &acc, &common::test_dir_path("init_module.data/pack_initial") )); // Now republish the package with the new module that has init_module. - assert_success!(h.publish_package(&acc, &common::test_dir_path("init_module.data/pack"))); + assert_success!( + h.publish_package_cache_building(&acc, &common::test_dir_path("init_module.data/pack")) + ); // Verify that init_module was called. let module_data = parse_struct_tag("0x1::test::ModuleData").unwrap(); diff --git a/aptos-move/e2e-move-tests/src/tests/per_category_gas_limits.rs b/aptos-move/e2e-move-tests/src/tests/per_category_gas_limits.rs index 99f7bae8cf6ab..af4fae4dbc37d 100644 --- a/aptos-move/e2e-move-tests/src/tests/per_category_gas_limits.rs +++ b/aptos-move/e2e-move-tests/src/tests/per_category_gas_limits.rs @@ -19,7 +19,7 @@ fn execution_limit_reached() { // Publish the infinite loop module. let acc = h.new_account_at(AccountAddress::from_hex_literal("0xbeef").unwrap()); - assert_success!(h.publish_package( + assert_success!(h.publish_package_cache_building( &acc, &common::test_dir_path("infinite_loop.data/empty_loop"), )); @@ -58,7 +58,7 @@ fn io_limit_reached() { // Publish the test module. let acc = h.new_account_at(AccountAddress::from_hex_literal("0xbeef").unwrap()); - assert_success!(h.publish_package(&acc, &common::test_dir_path("execution_limit.data/test"),)); + assert_success!(h.publish_package_cache_building(&acc, &common::test_dir_path("execution_limit.data/test"),)); // Lower the max io gas to lower than a single load_resource h.modify_gas_schedule(|gas_params| { @@ -81,7 +81,7 @@ fn storage_limit_reached() { // Publish the test module. let acc = h.new_account_at(AccountAddress::from_hex_literal("0xbeef").unwrap()); - assert_success!(h.publish_package(&acc, &common::test_dir_path("execution_limit.data/test"),)); + assert_success!(h.publish_package_cache_building(&acc, &common::test_dir_path("execution_limit.data/test"),)); // Lower the max storage fee to 10 Octa. h.modify_gas_schedule(|gas_params| gas_params.vm.txn.max_storage_fee = Fee::new(10)); diff --git a/aptos-move/e2e-move-tests/src/tests/string_args.rs b/aptos-move/e2e-move-tests/src/tests/string_args.rs index 0910f2cf82aec..74fec58b7dcff 100644 --- a/aptos-move/e2e-move-tests/src/tests/string_args.rs +++ b/aptos-move/e2e-move-tests/src/tests/string_args.rs @@ -29,7 +29,9 @@ fn success_generic(ty_args: Vec, tests: Vec<(&str, Vec<(Vec>, & // Load the code let acc = h.new_account_at(AccountAddress::from_hex_literal("0xcafe").unwrap()); - assert_success!(h.publish_package(&acc, &common::test_dir_path("string_args.data/pack"))); + assert_success!( + h.publish_package_cache_building(&acc, &common::test_dir_path("string_args.data/pack")) + ); let mut module_data = parse_struct_tag("0xCAFE::test::ModuleData").unwrap(); let string_struct = StructTag { @@ -90,7 +92,9 @@ fn fail_generic( // Load the code let acc = h.new_account_at(AccountAddress::from_hex_literal("0xcafe").unwrap()); - assert_success!(h.publish_package(&acc, &common::test_dir_path("string_args.data/pack"))); + assert_success!( + h.publish_package_cache_building(&acc, &common::test_dir_path("string_args.data/pack")) + ); let module_data = parse_struct_tag("0xCAFE::test::ModuleData").unwrap(); diff --git a/aptos-move/e2e-tests/Cargo.toml b/aptos-move/e2e-tests/Cargo.toml index 7c5ddf86fc0fe..0731148c437ce 100644 --- a/aptos-move/e2e-tests/Cargo.toml +++ b/aptos-move/e2e-tests/Cargo.toml @@ -15,6 +15,7 @@ rust-version = { workspace = true } [dependencies] anyhow = { workspace = true } aptos-abstract-gas-usage = { workspace = true } +aptos-aggregator = { workspace = true } aptos-bitvec = { workspace = true } aptos-block-executor = { workspace = true } aptos-cached-packages ={ workspace = true } @@ -35,6 +36,7 @@ aptos-vm-genesis = { workspace = true } aptos-vm-logging = { workspace = true } aptos-vm-types = { workspace = true } bcs = { workspace = true } +bytes = { workspace = true } goldenfile = { workspace = true } hex = { workspace = true } move-binary-format = { workspace = true } diff --git a/aptos-move/e2e-tests/src/account.rs b/aptos-move/e2e-tests/src/account.rs index 14b7ab8a5ec0d..686d87d53f85f 100644 --- a/aptos-move/e2e-tests/src/account.rs +++ b/aptos-move/e2e-tests/src/account.rs @@ -480,11 +480,11 @@ impl AccountData { let write_set = vec![ ( StateKey::access_path(self.make_account_access_path()), - WriteOp::Modification(self.to_bytes()), + WriteOp::Modification(self.to_bytes().into()), ), ( StateKey::access_path(self.make_coin_store_access_path()), - WriteOp::Modification(self.coin_store.to_bytes()), + WriteOp::Modification(self.coin_store.to_bytes().into()), ), ]; diff --git a/aptos-move/e2e-tests/src/data_store.rs b/aptos-move/e2e-tests/src/data_store.rs index aac19fbdcbf08..fb56bbaf4faf1 100644 --- a/aptos-move/e2e-tests/src/data_store.rs +++ b/aptos-move/e2e-tests/src/data_store.rs @@ -35,10 +35,10 @@ pub static GENESIS_CHANGE_SET_TESTNET: Lazy = pub static GENESIS_CHANGE_SET_MAINNET: Lazy = Lazy::new(|| generate_genesis_change_set_for_mainnet(GenesisOptions::Mainnet)); -/// An in-memory implementation of `StateView` and `RemoteCache` for the VM. +/// An in-memory implementation of `StateView` and `ExecutorView` for the VM. /// /// Tests use this to set up state, and pass in a reference to the cache whenever a `StateView` or -/// `RemoteCache` is needed. +/// `ExecutorView` is needed. #[derive(Debug, Default, Clone, Serialize, Deserialize)] pub struct FakeDataStore { state_data: HashMap, @@ -50,7 +50,7 @@ impl FakeDataStore { FakeDataStore { state_data: data .into_iter() - .map(|(k, v)| (k, StateValue::new_legacy(v))) + .map(|(k, v)| (k, StateValue::new_legacy(v.into()))) .collect(), } } @@ -70,7 +70,7 @@ impl FakeDataStore { /// Returns the previous data if the key was occupied. pub fn set_legacy(&mut self, state_key: StateKey, bytes: Vec) -> Option { self.state_data - .insert(state_key, StateValue::new_legacy(bytes)) + .insert(state_key, StateValue::new_legacy(bytes.into())) } /// Sets a (key, value) pair within this data store. @@ -107,7 +107,7 @@ impl FakeDataStore { let access_path = AccessPath::from(module_id); self.set( StateKey::access_path(access_path), - StateValue::new_legacy(blob), + StateValue::new_legacy(blob.into()), ); } } diff --git a/aptos-move/e2e-tests/src/executor.rs b/aptos-move/e2e-tests/src/executor.rs index 16c57b612cf67..67d6ac52e7f65 100644 --- a/aptos-move/e2e-tests/src/executor.rs +++ b/aptos-move/e2e-tests/src/executor.rs @@ -49,13 +49,14 @@ use aptos_types::{ }; use aptos_vm::{ block_executor::{AptosTransactionOutput, BlockAptosVM}, - data_cache::{AsMoveResolver, StorageAdapter}, + data_cache::AsMoveResolver, move_vm_ext::{MoveVmExt, SessionId}, AptosVM, VMExecutor, VMValidator, }; use aptos_vm_genesis::{generate_genesis_change_set_for_testing_with_count, GenesisOptions}; use aptos_vm_logging::log_schema::AdapterLogSchema; use aptos_vm_types::storage::{ChangeSetConfigs, StorageGasParameters}; +use bytes::Bytes; use move_core_types::{ account_address::AccountAddress, identifier::Identifier, @@ -72,7 +73,6 @@ use std::{ sync::{Arc, Mutex}, time::Instant, }; - static RNG_SEED: [u8; 32] = [9u8; 32]; const ENV_TRACE_DIR: &str = "TRACE"; @@ -357,7 +357,7 @@ impl FakeExecutor { TStateView::get_state_value_bytes(&self.data_store, &StateKey::access_path(ap)) .expect("account must exist in data store") .unwrap_or_else(|| panic!("Can't fetch {} resource for {}", T::STRUCT_NAME, addr)); - bcs::from_bytes(data_blob.as_slice()).ok() + bcs::from_bytes(&data_blob).ok() } /// Reads the resource `Value` for an account under the given address from @@ -537,36 +537,39 @@ impl FakeExecutor { let log_context = AdapterLogSchema::new(self.data_store.id(), 0); - let (_status, output, gas_profiler) = - AptosVM::execute_user_transaction_with_custom_gas_meter( - &self.data_store, - &txn, - &log_context, - |gas_feature_version, gas_params, storage_gas_params, balance| { - let gas_meter = - MemoryTrackedGasMeter::new(StandardGasMeter::new(StandardGasAlgebra::new( - gas_feature_version, - gas_params, - storage_gas_params, - balance, - ))); - let gas_profiler = match txn.payload() { - TransactionPayload::Script(_) => GasProfiler::new_script(gas_meter), - TransactionPayload::EntryFunction(entry_func) => GasProfiler::new_function( - gas_meter, - entry_func.module().clone(), - entry_func.function().to_owned(), - entry_func.ty_args().to_vec(), - ), - TransactionPayload::ModuleBundle(..) => unreachable!("not supported"), - TransactionPayload::Multisig(..) => unimplemented!("not supported yet"), - }; - Ok(gas_profiler) - }, - )?; + // TODO(Gas): revisit this. + let vm = AptosVM::new_from_state_view(&self.data_store); + + let resolver = self.data_store.as_move_resolver(); + let (_status, output, gas_profiler) = vm.execute_user_transaction_with_custom_gas_meter( + &resolver, + &txn, + &log_context, + |gas_feature_version, gas_params, storage_gas_params, balance| { + let gas_meter = + MemoryTrackedGasMeter::new(StandardGasMeter::new(StandardGasAlgebra::new( + gas_feature_version, + gas_params, + storage_gas_params, + balance, + ))); + let gas_profiler = match txn.payload() { + TransactionPayload::Script(_) => GasProfiler::new_script(gas_meter), + TransactionPayload::EntryFunction(entry_func) => GasProfiler::new_function( + gas_meter, + entry_func.module().clone(), + entry_func.function().to_owned(), + entry_func.ty_args().to_vec(), + ), + TransactionPayload::ModuleBundle(..) => unreachable!("not supported"), + TransactionPayload::Multisig(..) => unimplemented!("not supported yet"), + }; + Ok(gas_profiler) + }, + )?; Ok(( - output.try_into_transaction_output(self.get_state_view())?, + output.try_into_transaction_output(&resolver)?, gas_profiler.finish(), )) } @@ -595,14 +598,14 @@ impl FakeExecutor { } /// Get the blob for the associated AccessPath - pub fn read_state_value_bytes(&self, state_key: &StateKey) -> Option> { + pub fn read_state_value_bytes(&self, state_key: &StateKey) -> Option { TStateView::get_state_value_bytes(&self.data_store, state_key).unwrap() } /// Set the blob for the associated AccessPath pub fn write_state_value(&mut self, state_key: StateKey, data_blob: Vec) { self.data_store - .set(state_key, StateValue::new_legacy(data_blob)); + .set(state_key, StateValue::new_legacy(data_blob.into())); } /// Verifies the given transaction by running it through the VM verifier. @@ -723,13 +726,13 @@ impl FakeExecutor { timed_features, ) .unwrap(); - let remote_view = StorageAdapter::new(&self.data_store); + let resolver = self.data_store.as_move_resolver(); // start measuring here to reduce measurement errors (i.e., the time taken to load vm, module, etc.) let mut i = 0; let mut times = Vec::new(); while i < iterations { - let mut session = vm.new_session(&remote_view, SessionId::void()); + let mut session = vm.new_session(&resolver, SessionId::void()); // load function name into cache to ensure cache is hot let _ = session.load_function(module, &Self::name(function_name), &type_params.clone()); @@ -797,8 +800,8 @@ impl FakeExecutor { }), ) .unwrap(); - let remote_view = StorageAdapter::new(&self.data_store); - let mut session = vm.new_session(&remote_view, SessionId::void()); + let resolver = self.data_store.as_move_resolver(); + let mut session = vm.new_session(&resolver, SessionId::void()); let fun_name = Self::name(function_name); let should_error = fun_name.clone().into_string().ends_with(POSTFIX); @@ -868,8 +871,8 @@ impl FakeExecutor { timed_features, ) .unwrap(); - let remote_view = StorageAdapter::new(&self.data_store); - let mut session = vm.new_session(&remote_view, SessionId::void()); + let resolver = self.data_store.as_move_resolver(); + let mut session = vm.new_session(&resolver, SessionId::void()); session .execute_function_bypass_visibility( &Self::module(module_name), @@ -919,8 +922,8 @@ impl FakeExecutor { TimedFeatures::enable_all(), ) .unwrap(); - let remote_view = StorageAdapter::new(&self.data_store); - let mut session = vm.new_session(&remote_view, SessionId::void()); + let resolver = self.data_store.as_move_resolver(); + let mut session = vm.new_session(&resolver, SessionId::void()); session .execute_function_bypass_visibility( &Self::module(module_name), diff --git a/aptos-move/e2e-testsuite/Cargo.toml b/aptos-move/e2e-testsuite/Cargo.toml index 3a1a357170a7b..18136cb761e56 100644 --- a/aptos-move/e2e-testsuite/Cargo.toml +++ b/aptos-move/e2e-testsuite/Cargo.toml @@ -30,7 +30,6 @@ aptos-vm = { workspace = true, features = ['failpoints'] } aptos-vm-genesis = { workspace = true } aptos-vm-logging = { workspace = true } aptos-vm-types = { workspace = true } -aptos-writeset-generator = { workspace = true } bcs = { workspace = true } fail = { workspace = true, features = ['failpoints'] } itertools = { workspace = true } diff --git a/aptos-move/e2e-testsuite/src/tests/scripts.rs b/aptos-move/e2e-testsuite/src/tests/scripts.rs index 9c11ed2ea56d3..80a386eb90f27 100644 --- a/aptos-move/e2e-testsuite/src/tests/scripts.rs +++ b/aptos-move/e2e-testsuite/src/tests/scripts.rs @@ -127,10 +127,10 @@ fn script_none_existing_module_dep() { } assert_eq!( status.status(), - //StatusCode::LINKER_ERROR Ok(ExecutionStatus::MiscellaneousError(Some( StatusCode::LINKER_ERROR - ))) + ))), + "Linker Error: Transaction executed at a non-existent external module" ); executor.apply_write_set(output.write_set()); @@ -352,7 +352,8 @@ fn script_type_argument_module_does_not_exist() { let status = output.status(); assert_eq!( status, - &TransactionStatus::Keep(ExecutionStatus::MiscellaneousError(Some(LINKER_ERROR))) + &TransactionStatus::Keep(ExecutionStatus::MiscellaneousError(Some(LINKER_ERROR))), + "Linker Error: Transaction executed at a non-existent external module" ); executor.apply_write_set(output.write_set()); @@ -419,7 +420,8 @@ fn script_nested_type_argument_module_does_not_exist() { let status = output.status(); assert_eq!( status, - &TransactionStatus::Keep(ExecutionStatus::MiscellaneousError(Some(LINKER_ERROR))) + &TransactionStatus::Keep(ExecutionStatus::MiscellaneousError(Some(LINKER_ERROR))), + "Linker Error: Transaction executed at a non-existent external module" ); executor.apply_write_set(output.write_set()); diff --git a/aptos-move/framework/Cargo.toml b/aptos-move/framework/Cargo.toml index b99be2bfb78fa..f4abe6f0d7f1c 100644 --- a/aptos-move/framework/Cargo.toml +++ b/aptos-move/framework/Cargo.toml @@ -14,8 +14,8 @@ rust-version = { workspace = true } [dependencies] anyhow = { workspace = true } -aptos-aggregator = { workspace = true, features = ["testing"] } -aptos-crypto = { workspace = true, features = ["fuzzing"] } +aptos-aggregator = { workspace = true } +aptos-crypto = { workspace = true } aptos-gas-algebra = { workspace = true } aptos-gas-schedule = { workspace = true } aptos-move-stdlib = { workspace = true } @@ -24,6 +24,7 @@ aptos-sdk-builder = { workspace = true } aptos-state-view = { workspace = true } aptos-table-natives = { workspace = true } aptos-types = { workspace = true } +aptos-vm-types = { workspace = true } ark-bls12-381 = { workspace = true } ark-ec = { workspace = true } ark-ff = { workspace = true } @@ -49,6 +50,7 @@ include_dir = { workspace = true } itertools = { workspace = true } libsecp256k1 = { workspace = true } log = { workspace = true } +lru = { workspace = true } merlin = { workspace = true } move-binary-format = { workspace = true } move-command-line-common = { workspace = true } @@ -85,6 +87,8 @@ thiserror = { workspace = true } tiny-keccak = { workspace = true } [dev-dependencies] +aptos-aggregator = { workspace = true, features = ["testing"] } +aptos-crypto = { workspace = true, features = ["fuzzing"] } aptos-gas-meter = { workspace = true } aptos-vm = { workspace = true, features = ["testing"] } claims = { workspace = true } @@ -95,7 +99,7 @@ move-unit-test = { workspace = true } [features] default = [] fuzzing = ["aptos-types/fuzzing", "proptest", "proptest-derive"] -testing = ["aptos-move-stdlib/testing"] +testing = ["aptos-move-stdlib/testing", "aptos-crypto/fuzzing"] [lib] doctest = false diff --git a/aptos-move/framework/aptos-framework/doc/transaction_fee.md b/aptos-move/framework/aptos-framework/doc/transaction_fee.md index de37f9c5844f2..b98b4a3b660da 100644 --- a/aptos-move/framework/aptos-framework/doc/transaction_fee.md +++ b/aptos-move/framework/aptos-framework/doc/transaction_fee.md @@ -152,7 +152,23 @@ collected when executing the block. ## Struct `FeeStatement` -Summary of the fees charged and refunds issued for a transaction. +Breakdown of fee charge and refund for a transaction. +The structure is: + +- Net charge or refund (not in the statement) +- total charge: total_charge_gas_units, matches gas_used in the on-chain TransactionInfo. +This is the sum of the sub-items below. Notice that there's potential precision loss when +the conversion between internal and external gas units and between native token and gas +units, so it's possible that the numbers don't add up exactly. -- This number is the final +charge, while the break down is merely informational. +- gas charge for execution (CPU time): execution_gas_units +- gas charge for IO (storage random access): io_gas_units +- storage fee charge (storage space): storage_fee_octas, to be included in +total_charge_gas_unit, this number is converted to gas units according to the user +specified gas_unit_price on the transaction. +- storage deletion refund: storage_fee_refund_octas, this is not included in gas_used or +total_charge_gas_units, the net charge / refund is calculated by +total_charge_gas_units * gas_unit_price - storage_fee_refund_octas. This is meant to emitted as a module event. diff --git a/aptos-move/framework/aptos-framework/sources/transaction_fee.move b/aptos-move/framework/aptos-framework/sources/transaction_fee.move index 54449b735a730..d95cb3b9471ff 100644 --- a/aptos-move/framework/aptos-framework/sources/transaction_fee.move +++ b/aptos-move/framework/aptos-framework/sources/transaction_fee.move @@ -39,7 +39,23 @@ module aptos_framework::transaction_fee { } #[event] - /// Summary of the fees charged and refunds issued for a transaction. + /// Breakdown of fee charge and refund for a transaction. + /// The structure is: + /// + /// - Net charge or refund (not in the statement) + /// - total charge: total_charge_gas_units, matches `gas_used` in the on-chain `TransactionInfo`. + /// This is the sum of the sub-items below. Notice that there's potential precision loss when + /// the conversion between internal and external gas units and between native token and gas + /// units, so it's possible that the numbers don't add up exactly. -- This number is the final + /// charge, while the break down is merely informational. + /// - gas charge for execution (CPU time): `execution_gas_units` + /// - gas charge for IO (storage random access): `io_gas_units` + /// - storage fee charge (storage space): `storage_fee_octas`, to be included in + /// `total_charge_gas_unit`, this number is converted to gas units according to the user + /// specified `gas_unit_price` on the transaction. + /// - storage deletion refund: `storage_fee_refund_octas`, this is not included in `gas_used` or + /// `total_charge_gas_units`, the net charge / refund is calculated by + /// `total_charge_gas_units` * `gas_unit_price` - `storage_fee_refund_octas`. /// /// This is meant to emitted as a module event. struct FeeStatement has drop, store { diff --git a/aptos-move/framework/cached-packages/Cargo.toml b/aptos-move/framework/cached-packages/Cargo.toml index 07c5beadd8ef4..d3839cf018f6d 100644 --- a/aptos-move/framework/cached-packages/Cargo.toml +++ b/aptos-move/framework/cached-packages/Cargo.toml @@ -14,7 +14,7 @@ rust-version = { workspace = true } [dependencies] aptos-framework = { workspace = true } -aptos-types = { workspace = true, features = ["fuzzing"] } +aptos-types = { workspace = true } bcs = { workspace = true } include_dir = { workspace = true } move-core-types = { workspace = true } diff --git a/aptos-move/framework/src/built_package.rs b/aptos-move/framework/src/built_package.rs index ee4635d41a3e2..c0d703aab2e96 100644 --- a/aptos-move/framework/src/built_package.rs +++ b/aptos-move/framework/src/built_package.rs @@ -194,10 +194,17 @@ impl BuiltPackage { bail!("extended checks failed") } } + + let compiled_pkg_path = package + .compiled_package_info + .build_flags + .install_dir + .as_ref() + .unwrap_or(&package_path) + .join(CompiledPackageLayout::Root.path()) + .join(package.compiled_package_info.package_name.as_str()); inject_runtime_metadata( - package_path - .join(CompiledPackageLayout::Root.path()) - .join(package.compiled_package_info.package_name.as_str()), + compiled_pkg_path, &mut package, runtime_metadata, bytecode_version, diff --git a/aptos-move/framework/src/module_metadata.rs b/aptos-move/framework/src/module_metadata.rs index 1dff1e66b3428..fddf32fb5b00d 100644 --- a/aptos-move/framework/src/module_metadata.rs +++ b/aptos-move/framework/src/module_metadata.rs @@ -6,6 +6,7 @@ use aptos_types::{ on_chain_config::{FeatureFlag, Features, TimedFeatures}, transaction::AbortInfo, }; +use lru::LruCache; use move_binary_format::{ access::ModuleAccess, file_format::{ @@ -23,7 +24,7 @@ use move_core_types::{ }; use move_vm_runtime::move_vm::MoveVM; use serde::{Deserialize, Serialize}; -use std::{collections::BTreeMap, env}; +use std::{cell::RefCell, collections::BTreeMap, env, sync::Arc}; use thiserror::Error; /// The minimal file format version from which the V1 metadata is supported @@ -148,31 +149,64 @@ impl KnownAttribute { } } +const METADATA_CACHE_SIZE: usize = 1024; + +thread_local! { + static V1_METADATA_CACHE: RefCell, Option>>> = RefCell::new(LruCache::new(METADATA_CACHE_SIZE)); + + static V0_METADATA_CACHE: RefCell, Option>>> = RefCell::new(LruCache::new(METADATA_CACHE_SIZE)); +} + /// Extract metadata from the VM, upgrading V0 to V1 representation as needed -pub fn get_metadata(md: &[Metadata]) -> Option { +pub fn get_metadata(md: &[Metadata]) -> Option> { if let Some(data) = md.iter().find(|md| md.key == APTOS_METADATA_KEY_V1) { - bcs::from_bytes::(&data.value).ok() + V1_METADATA_CACHE.with(|ref_cell| { + let mut cache = ref_cell.borrow_mut(); + if let Some(meta) = cache.get(&data.value) { + meta.clone() + } else { + let meta = bcs::from_bytes::(&data.value) + .ok() + .map(Arc::new); + cache.put(data.value.clone(), meta.clone()); + meta + } + }) } else { get_metadata_v0(md) } } -pub fn get_metadata_v0(md: &[Metadata]) -> Option { +pub fn get_metadata_v0(md: &[Metadata]) -> Option> { if let Some(data) = md.iter().find(|md| md.key == APTOS_METADATA_KEY) { - let data_v0 = bcs::from_bytes::(&data.value).ok()?; - Some(data_v0.upgrade()) + V0_METADATA_CACHE.with(|ref_cell| { + let mut cache = ref_cell.borrow_mut(); + if let Some(meta) = cache.get(&data.value) { + meta.clone() + } else { + let meta = bcs::from_bytes::(&data.value) + .ok() + .map(RuntimeModuleMetadata::upgrade) + .map(Arc::new); + cache.put(data.value.clone(), meta.clone()); + meta + } + }) } else { None } } /// Extract metadata from the VM, upgrading V0 to V1 representation as needed -pub fn get_vm_metadata(vm: &MoveVM, module_id: &ModuleId) -> Option { +pub fn get_vm_metadata(vm: &MoveVM, module_id: &ModuleId) -> Option> { vm.with_module_metadata(module_id, get_metadata) } /// Extract metadata from the VM, legacy V0 format upgraded to V1 -pub fn get_vm_metadata_v0(vm: &MoveVM, module_id: &ModuleId) -> Option { +pub fn get_vm_metadata_v0( + vm: &MoveVM, + module_id: &ModuleId, +) -> Option> { vm.with_module_metadata(module_id, get_metadata_v0) } diff --git a/aptos-move/framework/src/natives/aggregator_natives/aggregator.rs b/aptos-move/framework/src/natives/aggregator_natives/aggregator.rs index 8092095eeabcb..32a9fb122293e 100644 --- a/aptos-move/framework/src/natives/aggregator_natives/aggregator.rs +++ b/aptos-move/framework/src/natives/aggregator_natives/aggregator.rs @@ -68,7 +68,7 @@ fn native_read( // Get aggregator. let aggregator_context = context.extensions().get::(); let mut aggregator_data = aggregator_context.aggregator_data.borrow_mut(); - let aggregator = aggregator_data.get_aggregator(id, limit)?; + let aggregator = aggregator_data.get_aggregator(id.clone(), limit)?; let value = aggregator.read_and_materialize(aggregator_context.resolver, &id)?; diff --git a/aptos-move/framework/src/natives/code.rs b/aptos-move/framework/src/natives/code.rs index aace1aac6baf1..158717a3ecad5 100644 --- a/aptos-move/framework/src/natives/code.rs +++ b/aptos-move/framework/src/natives/code.rs @@ -1,7 +1,7 @@ // Copyright © Aptos Foundation // SPDX-License-Identifier: Apache-2.0 -use crate::natives::any::Any; +use crate::{natives::any::Any, unzip_metadata_str}; use anyhow::bail; use aptos_gas_schedule::gas_params::natives::aptos_framework::*; use aptos_native_interface::{ @@ -78,6 +78,28 @@ pub struct PackageMetadata { pub extension: MoveOption, } +impl fmt::Display for PackageMetadata { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + writeln!(f, "Package name:{}", self.name)?; + writeln!(f, "Upgrade policy:{}", self.upgrade_policy)?; + writeln!(f, "Upgrade number:{}", self.upgrade_number)?; + writeln!(f, "Source digest:{}", self.source_digest)?; + let manifest_str = unzip_metadata_str(&self.manifest).unwrap(); + writeln!(f, "Manifest:")?; + writeln!(f, "{}", manifest_str)?; + writeln!(f, "Package Dependency:")?; + for dep in &self.deps { + writeln!(f, "{:?}", dep)?; + } + writeln!(f, "extension:{:?}", self.extension)?; + writeln!(f, "Modules:")?; + for module in &self.modules { + writeln!(f, "{}", module)?; + } + Ok(()) + } +} + #[derive(Clone, Debug, Serialize, Deserialize, Eq, PartialEq, Ord, PartialOrd)] pub struct PackageDep { pub account: AccountAddress, @@ -94,6 +116,24 @@ pub struct ModuleMetadata { pub extension: MoveOption, } +impl fmt::Display for ModuleMetadata { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + writeln!(f, "Module name:{}", self.name)?; + if !self.source.is_empty() { + writeln!(f, "Source code:")?; + let source = unzip_metadata_str(&self.source).unwrap(); + writeln!(f, "{}", source)?; + } + if !self.source_map.is_empty() { + writeln!(f, "Source map:")?; + let source_map = unzip_metadata_str(&self.source_map).unwrap(); + writeln!(f, "{}", source_map)?; + } + writeln!(f, "Module extension:{:?}", self.extension)?; + Ok(()) + } +} + #[derive(Clone, Copy, Debug, Serialize, Deserialize, PartialEq, Eq)] pub struct UpgradePolicy { pub policy: u8, diff --git a/aptos-move/framework/src/natives/state_storage.rs b/aptos-move/framework/src/natives/state_storage.rs index dd7c267843a10..6f65cf3bc70c3 100644 --- a/aptos-move/framework/src/natives/state_storage.rs +++ b/aptos-move/framework/src/natives/state_storage.rs @@ -5,7 +5,8 @@ use aptos_gas_schedule::gas_params::natives::aptos_framework::*; use aptos_native_interface::{ RawSafeNative, SafeNativeBuilder, SafeNativeContext, SafeNativeResult, }; -use aptos_types::{state_store::state_storage_usage::StateStorageUsage, vm_status::StatusCode}; +use aptos_types::vm_status::StatusCode; +use aptos_vm_types::resolver::StateStorageView; use better_any::{Tid, TidAble}; use move_binary_format::errors::PartialVMError; use move_vm_runtime::native_functions::NativeFunction; @@ -16,25 +17,20 @@ use move_vm_types::{ use smallvec::{smallvec, SmallVec}; use std::collections::VecDeque; -/// Ability to reveal the state storage utilization info. -pub trait StateStorageUsageResolver { - fn get_state_storage_usage(&self) -> anyhow::Result; -} - /// Exposes the ability to query state storage utilization info to native functions. #[derive(Tid)] pub struct NativeStateStorageContext<'a> { - resolver: &'a dyn StateStorageUsageResolver, + resolver: &'a dyn StateStorageView, } impl<'a> NativeStateStorageContext<'a> { - pub fn new(resolver: &'a dyn StateStorageUsageResolver) -> Self { + pub fn new(resolver: &'a dyn StateStorageView) -> Self { Self { resolver } } } /*************************************************************************************************** - * native get_state_storage_usage_only_at_eopch_beginning + * native get_state_storage_usage_only_at_epoch_beginning * * gas cost: base_cost * @@ -54,7 +50,7 @@ fn native_get_usage( context.charge(STATE_STORAGE_GET_USAGE_BASE_COST)?; let ctx = context.extensions().get::(); - let usage = ctx.resolver.get_state_storage_usage().map_err(|err| { + let usage = ctx.resolver.get_usage().map_err(|err| { PartialVMError::new(StatusCode::VM_EXTENSION_ERROR) .with_message(format!("Failed to get state storage usage: {}", err)) })?; diff --git a/aptos-move/framework/table-natives/src/lib.rs b/aptos-move/framework/table-natives/src/lib.rs index abb5270cb4100..a01283818ddae 100644 --- a/aptos-move/framework/table-natives/src/lib.rs +++ b/aptos-move/framework/table-natives/src/lib.rs @@ -119,11 +119,11 @@ impl<'a> NativeTableContext<'a> { match op { Op::New(val) => { let bytes = serialize(&value_layout, &val)?; - entries.insert(key, Op::New(bytes)); + entries.insert(key, Op::New(bytes.into())); }, Op::Modify(val) => { let bytes = serialize(&value_layout, &val)?; - entries.insert(key, Op::Modify(bytes)); + entries.insert(key, Op::Modify(bytes.into())); }, Op::Delete => { entries.insert(key, Op::Delete); diff --git a/aptos-move/move-examples/Cargo.toml b/aptos-move/move-examples/Cargo.toml index bef94422971ef..7380a8bccfc5f 100644 --- a/aptos-move/move-examples/Cargo.toml +++ b/aptos-move/move-examples/Cargo.toml @@ -25,4 +25,5 @@ move-unit-test = { workspace = true } move-vm-runtime = { workspace = true } [dev-dependencies] +aptos-framework = { workspace = true, features = ["testing"] } tempfile = { workspace = true } diff --git a/aptos-move/move-examples/mint_nft/2-Using-Resource-Account/sources/create_nft_with_resource_account.move b/aptos-move/move-examples/mint_nft/2-Using-Resource-Account/sources/create_nft_with_resource_account.move index 37c81c83da7b8..e47f251afe7b8 100644 --- a/aptos-move/move-examples/mint_nft/2-Using-Resource-Account/sources/create_nft_with_resource_account.move +++ b/aptos-move/move-examples/mint_nft/2-Using-Resource-Account/sources/create_nft_with_resource_account.move @@ -150,7 +150,7 @@ module mint_nft::create_nft_with_resource_account { public entry fun mint_event_ticket(receiver: &signer) acquires ModuleData { let module_data = borrow_global_mut(@mint_nft); - // Create a signer of the resource account from the signer capabiity stored in this module. + // Create a signer of the resource account from the signer capability stored in this module. // Using a resource account and storing its signer capability within the module allows the module to programmatically // sign transactions on behalf of the module. let resource_signer = account::create_signer_with_capability(&module_data.signer_cap); diff --git a/aptos-move/mvhashmap/Cargo.toml b/aptos-move/mvhashmap/Cargo.toml index a794df6afd171..f350b6018b320 100644 --- a/aptos-move/mvhashmap/Cargo.toml +++ b/aptos-move/mvhashmap/Cargo.toml @@ -19,11 +19,15 @@ aptos-crypto = { workspace = true } aptos-infallible = { workspace = true } aptos-types = { workspace = true } bcs = { workspace = true } +bytes = { workspace = true } +claims = { workspace = true } crossbeam = { workspace = true } dashmap = { workspace = true } +serde = { workspace = true } [dev-dependencies] -claims = { workspace = true } +aptos-aggregator = { workspace = true, features = ["testing"] } proptest = { workspace = true } proptest-derive = { workspace = true } rayon = { workspace = true } +test-case = { workspace = true } diff --git a/aptos-move/mvhashmap/src/lib.rs b/aptos-move/mvhashmap/src/lib.rs index 5dd3ca4d93cd8..8d55e324640d9 100644 --- a/aptos-move/mvhashmap/src/lib.rs +++ b/aptos-move/mvhashmap/src/lib.rs @@ -3,22 +3,21 @@ // SPDX-License-Identifier: Apache-2.0 use crate::{ - types::{MVDataError, MVDataOutput, MVModulesError, MVModulesOutput, TxnIndex}, - versioned_data::VersionedData, + versioned_data::VersionedData, versioned_group_data::VersionedGroupData, versioned_modules::VersionedModules, }; -use aptos_aggregator::delta_change_set::DeltaOp; -use aptos_crypto::hash::HashValue; use aptos_types::{ executable::{Executable, ModulePath}, write_set::TransactionWrite, }; +use serde::Serialize; use std::{fmt::Debug, hash::Hash}; pub mod types; pub mod unsync_map; mod utils; pub mod versioned_data; +pub mod versioned_group_data; pub mod versioned_modules; #[cfg(test)] @@ -33,101 +32,52 @@ mod unit_tests; /// /// TODO: separate V into different generic types for data and code modules with specialized /// traits (currently both WriteOp for executor). -pub struct MVHashMap { +pub struct MVHashMap { data: VersionedData, + group_data: VersionedGroupData, modules: VersionedModules, } -impl - MVHashMap +impl< + K: ModulePath + Hash + Clone + Eq + Debug, + T: Hash + Clone + Eq + Debug + Serialize, + V: TransactionWrite, + X: Executable, + > MVHashMap { // ----------------------------------- // Functions shared for data and modules. - pub fn new() -> MVHashMap { + pub fn new() -> MVHashMap { MVHashMap { data: VersionedData::new(), + group_data: VersionedGroupData::new(), modules: VersionedModules::new(), } } - pub fn take(self) -> (VersionedData, VersionedModules) { - (self.data, self.modules) - } - + /// Contains 'simple' versioned data (nothing contained in groups). pub fn data(&self) -> &VersionedData { &self.data } - pub fn modules(&self) -> &VersionedModules { - &self.modules - } - - // ----------------------------------------------- - // Functions specific to the multi-versioned data. - - /// Add a delta at a specified key. - pub fn add_delta(&self, key: K, txn_idx: TxnIndex, delta: DeltaOp) { - debug_assert!( - key.module_path().is_none(), - "Delta must be stored at a path corresponding to data" - ); - - self.data.add_delta(key, txn_idx, delta); - } - - pub fn materialize_delta(&self, key: &K, txn_idx: TxnIndex) -> Result { - debug_assert!( - key.module_path().is_none(), - "Delta must be stored at a path corresponding to data" - ); - - self.data.materialize_delta(key, txn_idx) + /// Contains data representing resource groups, or more generically, internally + /// containing different values mapped to tags of type T. + pub fn group_data(&self) -> &VersionedGroupData { + &self.group_data } - pub fn set_aggregator_base_value(&self, key: &K, value: u128) { - debug_assert!( - key.module_path().is_none(), - "Delta must be stored at a path corresponding to data" - ); - - self.data.set_aggregator_base_value(key, value); - } - - /// Read data at access path 'key', from the perspective of transaction 'txn_idx'. - pub fn fetch_data( - &self, - key: &K, - txn_idx: TxnIndex, - ) -> anyhow::Result, MVDataError> { - self.data.fetch_data(key, txn_idx) - } - - // ---------------------------------------------- - // Functions specific to the multi-versioned modules map. - - /// Adds a new executable to the multi-version data-structure. The executable is either - /// storage-version (and fixed) or uniquely identified by the (cryptographic) hash of the - /// module published during the block. - pub fn store_executable(&self, key: &K, descriptor_hash: HashValue, executable: X) { - self.modules - .store_executable(key, descriptor_hash, executable); - } - - /// Fetches the latest module stored at the given key, either as in an executable form, - /// if already cached, or in a raw module format that the VM can convert to an executable. - /// The errors are returned if no module is found, or if a dependency is encountered. - pub fn fetch_module( - &self, - key: &K, - txn_idx: TxnIndex, - ) -> anyhow::Result, MVModulesError> { - self.modules.fetch_module(key, txn_idx) + pub fn modules(&self) -> &VersionedModules { + &self.modules } } -impl Default - for MVHashMap +impl< + K: ModulePath + Hash + Clone + Debug + Eq, + T: Hash + Clone + Debug + Eq + Serialize, + V: TransactionWrite, + X: Executable, + > Default for MVHashMap { fn default() -> Self { Self::new() diff --git a/aptos-move/mvhashmap/src/types.rs b/aptos-move/mvhashmap/src/types.rs index 628fe31bab22b..5ab4dfd92c5f4 100644 --- a/aptos-move/mvhashmap/src/types.rs +++ b/aptos-move/mvhashmap/src/types.rs @@ -8,7 +8,15 @@ use std::sync::Arc; pub type TxnIndex = u32; pub type Incarnation = u32; -pub type Version = (TxnIndex, Incarnation); + +/// Custom error type representing storage version. Result +/// then represents either index of some type (i.e. TxnIndex, Version), or a +/// version corresponding to the storage (pre-block) state. +#[derive(Clone, Debug, PartialEq, Eq)] +pub struct StorageVersion; + +// TODO: Find better representations for this, a similar one for TxnIndex. +pub type Version = Result<(TxnIndex, Incarnation), StorageVersion>; #[derive(Clone, Copy, PartialEq)] pub(crate) enum Flag { @@ -16,11 +24,23 @@ pub(crate) enum Flag { Estimate, } +#[derive(Debug, PartialEq, Eq)] +pub enum MVGroupError { + /// The base group contents are not initialized. + Uninitialized, + /// Entry corresponding to the tag was not found. + TagNotFound, + /// A dependency on other transaction has been found during the read. + Dependency(TxnIndex), + /// Tag serialization is needed for group size computation + TagSerializationError, +} + /// Returned as Err(..) when failed to read from the multi-version data-structure. #[derive(Debug, PartialEq, Eq)] pub enum MVDataError { /// No prior entry is found. - NotFound, + Uninitialized, /// Read resulted in an unresolved delta value. Unresolved(DeltaOp), /// A dependency on other transaction has been found during the read. @@ -61,3 +81,136 @@ pub enum MVModulesOutput { /// if the latest entry corresponded to the module deletion. Module((Arc, HashValue)), } + +// In order to store base vales at the lowest index, i.e. at index 0, without conflicting +// with actual transaction index 0, the following struct wraps the index and internally +// increments it by 1. +#[derive(PartialEq, Eq, PartialOrd, Ord, Clone, Debug)] +pub(crate) struct ShiftedTxnIndex { + idx: TxnIndex, +} + +impl ShiftedTxnIndex { + pub fn new(real_idx: TxnIndex) -> Self { + Self { idx: real_idx + 1 } + } + + pub(crate) fn idx(&self) -> Result { + if self.idx > 0 { + Ok(self.idx - 1) + } else { + Err(StorageVersion) + } + } + + pub(crate) fn zero() -> Self { + Self { idx: 0 } + } +} + +#[cfg(test)] +pub(crate) mod test { + use super::*; + use aptos_aggregator::delta_change_set::serialize; + use aptos_types::{ + access_path::AccessPath, executable::ModulePath, state_store::state_value::StateValue, + write_set::TransactionWrite, + }; + use bytes::Bytes; + use claims::{assert_err, assert_ok_eq}; + use std::{fmt::Debug, hash::Hash, sync::Arc}; + + #[derive(Clone, Eq, Hash, PartialEq, Debug)] + pub(crate) struct KeyType( + /// Wrapping the types used for testing to add ModulePath trait implementation. + pub K, + ); + + impl ModulePath for KeyType { + fn module_path(&self) -> Option { + None + } + } + + #[test] + fn test_shifted_idx() { + let zero = ShiftedTxnIndex::zero(); + let shifted_indices: Vec<_> = (0..20).map(ShiftedTxnIndex::new).collect(); + for (i, shifted_idx) in shifted_indices.iter().enumerate() { + assert_ne!(zero, *shifted_idx); + for j in 0..i { + assert_ne!(ShiftedTxnIndex::new(j as TxnIndex), *shifted_idx); + } + assert_eq!(ShiftedTxnIndex::new(i as TxnIndex), *shifted_idx); + } + assert_eq!(ShiftedTxnIndex::zero(), zero); + assert_err!(zero.idx()); + + for (i, shifted_idx) in shifted_indices.into_iter().enumerate() { + assert_ok_eq!(shifted_idx.idx(), i as TxnIndex); + } + } + + #[derive(Debug, PartialEq, Eq)] + pub(crate) struct TestValue { + bytes: Bytes, + } + + impl TestValue { + pub(crate) fn deletion() -> Self { + Self { + bytes: vec![].into(), + } + } + + pub fn new(mut seed: Vec) -> Self { + seed.resize(4, 0); + Self { + bytes: seed.into_iter().flat_map(|v| v.to_be_bytes()).collect(), + } + } + + pub(crate) fn from_u128(value: u128) -> Self { + Self { + bytes: serialize(&value).into(), + } + } + + pub(crate) fn with_len(len: usize) -> Self { + assert!(len > 0, "0 is deletion"); + Self { + bytes: vec![100_u8; len].into(), + } + } + } + + impl TransactionWrite for TestValue { + fn bytes(&self) -> Option<&Bytes> { + (!self.bytes.is_empty()).then_some(&self.bytes) + } + + fn from_state_value(_maybe_state_value: Option) -> Self { + unimplemented!("Irrelevant for the test") + } + + fn as_state_value(&self) -> Option { + unimplemented!("Irrelevant for the test") + } + } + + // Generate a Vec deterministically based on txn_idx and incarnation. + pub(crate) fn value_for(txn_idx: TxnIndex, incarnation: Incarnation) -> TestValue { + TestValue::new(vec![txn_idx * 5, txn_idx + incarnation, incarnation * 5]) + } + + // Generate the value_for txn_idx and incarnation in arc. + pub(crate) fn arc_value_for(txn_idx: TxnIndex, incarnation: Incarnation) -> Arc { + // Generate a Vec deterministically based on txn_idx and incarnation. + Arc::new(value_for(txn_idx, incarnation)) + } + + // Convert value for txn_idx and incarnation into u128. + pub(crate) fn u128_for(txn_idx: TxnIndex, incarnation: Incarnation) -> u128 { + value_for(txn_idx, incarnation).as_u128().unwrap().unwrap() + } +} diff --git a/aptos-move/mvhashmap/src/unit_tests/mod.rs b/aptos-move/mvhashmap/src/unit_tests/mod.rs index 8b28bf89e6fe3..7c02916caf8ff 100644 --- a/aptos-move/mvhashmap/src/unit_tests/mod.rs +++ b/aptos-move/mvhashmap/src/unit_tests/mod.rs @@ -3,64 +3,20 @@ // SPDX-License-Identifier: Apache-2.0 use super::{ - types::{Incarnation, MVDataError, MVDataOutput, TxnIndex}, + types::{ + test::{arc_value_for, u128_for, value_for, KeyType, TestValue}, + MVDataError, MVDataOutput, + }, unsync_map::UnsyncMap, *, }; -use aptos_aggregator::{ - delta_change_set::{delta_add, delta_sub, DeltaOp, DeltaUpdate}, - transaction::AggregatorValue, -}; -use aptos_types::{ - access_path::AccessPath, - executable::{ExecutableTestType, ModulePath}, - state_store::state_value::StateValue, -}; +use aptos_aggregator::delta_change_set::{delta_add, delta_sub, DeltaOp, DeltaUpdate}; +use aptos_types::executable::ExecutableTestType; use claims::{assert_err_eq, assert_none, assert_ok_eq, assert_some_eq}; -use std::sync::Arc; - mod proptest_types; -#[derive(Debug, PartialEq, Eq)] -struct Value(Vec); - -impl TransactionWrite for Value { - fn extract_raw_bytes(&self) -> Option> { - let mut v: Vec = self - .0 - .clone() - .into_iter() - .flat_map(|element| element.to_be_bytes()) - .collect(); - v.resize(16, 0); - Some(v) - } - - fn as_state_value(&self) -> Option { - unimplemented!() - } -} - -// Generate a Vec deterministically based on txn_idx and incarnation. -fn value_for(txn_idx: TxnIndex, incarnation: Incarnation) -> Value { - Value(vec![txn_idx * 5, txn_idx + incarnation, incarnation * 5]) -} - -// Generate the value_for txn_idx and incarnation in arc. -fn arc_value_for(txn_idx: TxnIndex, incarnation: Incarnation) -> Arc { - // Generate a Vec deterministically based on txn_idx and incarnation. - Arc::new(value_for(txn_idx, incarnation)) -} - -// Convert value for txn_idx and incarnation into u128. -fn u128_for(txn_idx: TxnIndex, incarnation: Incarnation) -> u128 { - AggregatorValue::from_write(&value_for(txn_idx, incarnation)) - .unwrap() - .into() -} - fn match_unresolved( - read_result: anyhow::Result, MVDataError>, + read_result: anyhow::Result, MVDataError>, update: DeltaUpdate, ) { match read_result { @@ -69,21 +25,9 @@ fn match_unresolved( }; } -#[derive(Clone, Eq, Hash, PartialEq, Debug)] -pub(crate) struct KeyType( - /// Wrapping the types used for testing to add ModulePath trait implementation. - pub K, -); - -impl ModulePath for KeyType { - fn module_path(&self) -> Option { - None - } -} - #[test] fn unsync_map_data_basic() { - let map: UnsyncMap>, Value, ExecutableTestType> = UnsyncMap::new(); + let map: UnsyncMap>, TestValue, ExecutableTestType> = UnsyncMap::new(); let ap = KeyType(b"/foo/b".to_vec()); @@ -106,73 +50,79 @@ fn create_write_read_placeholder_struct() { let ap2 = KeyType(b"/foo/c".to_vec()); let ap3 = KeyType(b"/foo/d".to_vec()); - let mvtbl: MVHashMap>, Value, ExecutableTestType> = MVHashMap::new(); + let mvtbl: MVHashMap>, usize, TestValue, ExecutableTestType> = MVHashMap::new(); - // Reads that should go the DB return Err(NotFound) - let r_db = mvtbl.fetch_data(&ap1, 5); - assert_eq!(Err(NotFound), r_db); + // Reads that should go the DB return Err(Uninitialized) + let r_db = mvtbl.data().fetch_data(&ap1, 5); + assert_eq!(Err(Uninitialized), r_db); // Write by txn 10. - mvtbl.data().write(ap1.clone(), (10, 1), value_for(10, 1)); + mvtbl.data().write(ap1.clone(), 10, 1, value_for(10, 1)); - // Reads that should go the DB return Err(NotFound) - let r_db = mvtbl.fetch_data(&ap1, 9); - assert_eq!(Err(NotFound), r_db); + // Reads that should go the DB return Err(Uninitialized) + let r_db = mvtbl.data().fetch_data(&ap1, 9); + assert_eq!(Err(Uninitialized), r_db); // Reads return entries from smaller txns, not txn 10. - let r_db = mvtbl.fetch_data(&ap1, 10); - assert_eq!(Err(NotFound), r_db); + let r_db = mvtbl.data().fetch_data(&ap1, 10); + assert_eq!(Err(Uninitialized), r_db); // Reads for a higher txn return the entry written by txn 10. - let r_10 = mvtbl.fetch_data(&ap1, 15); - assert_eq!(Ok(Versioned((10, 1), arc_value_for(10, 1))), r_10); + let r_10 = mvtbl.data().fetch_data(&ap1, 15); + assert_eq!(Ok(Versioned(Ok((10, 1)), arc_value_for(10, 1))), r_10); // More deltas. - mvtbl.add_delta(ap1.clone(), 11, delta_add(11, u128::MAX)); - mvtbl.add_delta(ap1.clone(), 12, delta_add(12, u128::MAX)); - mvtbl.add_delta(ap1.clone(), 13, delta_sub(74, u128::MAX)); + mvtbl + .data() + .add_delta(ap1.clone(), 11, delta_add(11, u128::MAX)); + mvtbl + .data() + .add_delta(ap1.clone(), 12, delta_add(12, u128::MAX)); + mvtbl + .data() + .add_delta(ap1.clone(), 13, delta_sub(74, u128::MAX)); // Reads have to go traverse deltas until a write is found. - let r_sum = mvtbl.fetch_data(&ap1, 14); + let r_sum = mvtbl.data().fetch_data(&ap1, 14); assert_eq!(Ok(Resolved(u128_for(10, 1) + 11 + 12 - (61 + 13))), r_sum); // More writes. - mvtbl.data().write(ap1.clone(), (12, 0), value_for(12, 0)); - mvtbl.data().write(ap1.clone(), (8, 3), value_for(8, 3)); + mvtbl.data().write(ap1.clone(), 12, 0, value_for(12, 0)); + mvtbl.data().write(ap1.clone(), 8, 3, value_for(8, 3)); // Verify reads. - let r_12 = mvtbl.fetch_data(&ap1, 15); + let r_12 = mvtbl.data().fetch_data(&ap1, 15); assert_eq!(Ok(Resolved(u128_for(12, 0) - (61 + 13))), r_12); - let r_10 = mvtbl.fetch_data(&ap1, 11); - assert_eq!(Ok(Versioned((10, 1), arc_value_for(10, 1))), r_10); - let r_8 = mvtbl.fetch_data(&ap1, 10); - assert_eq!(Ok(Versioned((8, 3), arc_value_for(8, 3))), r_8); + let r_10 = mvtbl.data().fetch_data(&ap1, 11); + assert_eq!(Ok(Versioned(Ok((10, 1)), arc_value_for(10, 1))), r_10); + let r_8 = mvtbl.data().fetch_data(&ap1, 10); + assert_eq!(Ok(Versioned(Ok((8, 3)), arc_value_for(8, 3))), r_8); // Mark the entry written by 10 as an estimate. mvtbl.data().mark_estimate(&ap1, 10); // Read for txn 11 must observe a dependency. - let r_10 = mvtbl.fetch_data(&ap1, 11); + let r_10 = mvtbl.data().fetch_data(&ap1, 11); assert_eq!(Err(Dependency(10)), r_10); // Read for txn 12 must observe a dependency when resolving deltas at txn 11. - let r_11 = mvtbl.fetch_data(&ap1, 12); + let r_11 = mvtbl.data().fetch_data(&ap1, 12); assert_eq!(Err(Dependency(10)), r_11); // Delete the entry written by 10, write to a different ap. mvtbl.data().delete(&ap1, 10); - mvtbl.data().write(ap2.clone(), (10, 2), value_for(10, 2)); + mvtbl.data().write(ap2.clone(), 10, 2, value_for(10, 2)); // Read by txn 11 no longer observes entry from txn 10. - let r_8 = mvtbl.fetch_data(&ap1, 11); - assert_eq!(Ok(Versioned((8, 3), arc_value_for(8, 3))), r_8); + let r_8 = mvtbl.data().fetch_data(&ap1, 11); + assert_eq!(Ok(Versioned(Ok((8, 3)), arc_value_for(8, 3))), r_8); // Reads, writes for ap2 and ap3. - mvtbl.data().write(ap2.clone(), (5, 0), value_for(5, 0)); - mvtbl.data().write(ap3.clone(), (20, 4), value_for(20, 4)); - let r_5 = mvtbl.fetch_data(&ap2, 10); - assert_eq!(Ok(Versioned((5, 0), arc_value_for(5, 0))), r_5); - let r_20 = mvtbl.fetch_data(&ap3, 21); - assert_eq!(Ok(Versioned((20, 4), arc_value_for(20, 4))), r_20); + mvtbl.data().write(ap2.clone(), 5, 0, value_for(5, 0)); + mvtbl.data().write(ap3.clone(), 20, 4, value_for(20, 4)); + let r_5 = mvtbl.data().fetch_data(&ap2, 10); + assert_eq!(Ok(Versioned(Ok((5, 0)), arc_value_for(5, 0))), r_5); + let r_20 = mvtbl.data().fetch_data(&ap3, 21); + assert_eq!(Ok(Versioned(Ok((20, 4)), arc_value_for(20, 4))), r_20); // Clear ap1 and ap3. mvtbl.data().delete(&ap1, 12); @@ -181,28 +131,30 @@ fn create_write_read_placeholder_struct() { // Reads from ap1 and ap3 go to db. match_unresolved( - mvtbl.fetch_data(&ap1, 30), + mvtbl.data().fetch_data(&ap1, 30), DeltaUpdate::Minus((61 + 13) - 11), ); - let r_db = mvtbl.fetch_data(&ap3, 30); - assert_eq!(Err(NotFound), r_db); + let r_db = mvtbl.data().fetch_data(&ap3, 30); + assert_eq!(Err(Uninitialized), r_db); // Read entry by txn 10 at ap2. - let r_10 = mvtbl.fetch_data(&ap2, 15); - assert_eq!(Ok(Versioned((10, 2), arc_value_for(10, 2))), r_10); + let r_10 = mvtbl.data().fetch_data(&ap2, 15); + assert_eq!(Ok(Versioned(Ok((10, 2)), arc_value_for(10, 2))), r_10); // Both delta-write and delta-delta application failures are detected. - mvtbl.add_delta(ap1.clone(), 30, delta_add(30, 32)); - mvtbl.add_delta(ap1.clone(), 31, delta_add(31, 32)); - let r_33 = mvtbl.fetch_data(&ap1, 33); + mvtbl.data().add_delta(ap1.clone(), 30, delta_add(30, 32)); + mvtbl.data().add_delta(ap1.clone(), 31, delta_add(31, 32)); + let r_33 = mvtbl.data().fetch_data(&ap1, 33); assert_eq!(Err(DeltaApplicationFailure), r_33); let val = value_for(10, 3); // sub base sub_for for which should underflow. - let sub_base = AggregatorValue::from_write(&val).unwrap().into(); - mvtbl.data().write(ap2.clone(), (10, 3), val); - mvtbl.add_delta(ap2.clone(), 30, delta_sub(30 + sub_base, u128::MAX)); - let r_31 = mvtbl.fetch_data(&ap2, 31); + let sub_base = val.as_u128().unwrap().unwrap(); + mvtbl.data().write(ap2.clone(), 10, 3, val); + mvtbl + .data() + .add_delta(ap2.clone(), 30, delta_sub(30 + sub_base, u128::MAX)); + let r_31 = mvtbl.data().fetch_data(&ap2, 31); assert_eq!(Err(DeltaApplicationFailure), r_31); } @@ -210,7 +162,7 @@ fn create_write_read_placeholder_struct() { fn materialize_delta_shortcut() { use MVDataOutput::*; - let vd: VersionedData>, Value> = VersionedData::new(); + let vd: VersionedData>, TestValue> = VersionedData::new(); let ap = KeyType(b"/foo/b".to_vec()); let limit = 10000; @@ -223,9 +175,9 @@ fn materialize_delta_shortcut() { vd.materialize_delta(&ap, 8), DeltaOp::new(DeltaUpdate::Plus(30), limit, 30, 0) ); - vd.set_aggregator_base_value(&ap, 5); + vd.provide_base_value(ap.clone(), TestValue::from_u128(5)); // Multiple calls are idempotent. - vd.set_aggregator_base_value(&ap, 5); + vd.provide_base_value(ap.clone(), TestValue::from_u128(5)); // With base set, commit delta should now succeed. assert_ok_eq!(vd.materialize_delta(&ap, 8), 35); @@ -244,28 +196,19 @@ fn materialize_delta_shortcut() { #[test] #[should_panic] fn aggregator_base_mismatch() { - let vd: VersionedData>, Value> = VersionedData::new(); - let ap = KeyType(b"/foo/b".to_vec()); - - vd.set_aggregator_base_value(&ap, 10); - // This call must panic, because it provides a mismatching base value. - vd.set_aggregator_base_value(&ap, 11); -} - -#[test] -#[should_panic] -fn aggregator_base_without_deltas() { - let vd: VersionedData>, Value> = VersionedData::new(); + let vd: VersionedData>, TestValue> = VersionedData::new(); let ap = KeyType(b"/foo/b".to_vec()); - // Must panic as there are no deltas at all. - vd.set_aggregator_base_value(&ap, 10); + vd.provide_base_value(ap.clone(), TestValue::with_len(1)); + // This call must panic, because it provides a mismatching base value: + // However, only base value length is compared in assert. + vd.provide_base_value(ap, TestValue::with_len(2)); } #[test] #[should_panic] fn commit_without_deltas() { - let vd: VersionedData>, Value> = VersionedData::new(); + let vd: VersionedData>, TestValue> = VersionedData::new(); let ap = KeyType(b"/foo/b".to_vec()); // Must panic as there are no deltas at all. @@ -275,11 +218,11 @@ fn commit_without_deltas() { #[test] #[should_panic] fn commit_without_entry() { - let vd: VersionedData>, Value> = VersionedData::new(); + let vd: VersionedData>, TestValue> = VersionedData::new(); let ap = KeyType(b"/foo/b".to_vec()); vd.add_delta(ap.clone(), 8, delta_add(20, 1000)); - vd.set_aggregator_base_value(&ap, 10); + vd.provide_base_value(ap.clone(), TestValue::from_u128(10)); // Must panic as there is no delta at provided index. let _ = vd.materialize_delta(&ap, 9); diff --git a/aptos-move/mvhashmap/src/unit_tests/proptest_types.rs b/aptos-move/mvhashmap/src/unit_tests/proptest_types.rs index a9b9e959f8274..9807fb9947d78 100644 --- a/aptos-move/mvhashmap/src/unit_tests/proptest_types.rs +++ b/aptos-move/mvhashmap/src/unit_tests/proptest_types.rs @@ -3,24 +3,25 @@ // SPDX-License-Identifier: Apache-2.0 use super::{ - types::{MVDataError, MVDataOutput, TxnIndex}, + types::{test::KeyType, MVDataError, MVDataOutput, MVGroupError, TxnIndex}, MVHashMap, }; -use crate::unit_tests::KeyType; -use aptos_aggregator::{ - delta_change_set::{delta_add, delta_sub, DeltaOp}, - transaction::AggregatorValue, -}; +use aptos_aggregator::delta_change_set::{delta_add, delta_sub, DeltaOp}; use aptos_types::{ executable::ExecutableTestType, state_store::state_value::StateValue, write_set::TransactionWrite, }; +use bytes::Bytes; +use claims::assert_none; use proptest::{collection::vec, prelude::*, sample::Index, strategy::Strategy}; use std::{ collections::{BTreeMap, HashMap}, fmt::Debug, hash::Hash, - sync::atomic::{AtomicUsize, Ordering}, + sync::{ + atomic::{AtomicUsize, Ordering}, + Arc, + }, }; const DEFAULT_TIMEOUT: u64 = 30; @@ -43,25 +44,36 @@ enum ExpectedOutput { Failure, } -struct Value(Option); - -impl> + Clone> TransactionWrite for Value { - fn extract_raw_bytes(&self) -> Option> { - if self.0.is_none() { - None - } else { - let mut bytes = match self.0.clone().map(|v| v.into()) { - Some(v) => v, - None => vec![], - }; +struct Value { + maybe_value: Option, + maybe_bytes: Option, +} +impl> + Clone> Value { + fn new(maybe_value: Option) -> Self { + let maybe_bytes = maybe_value.clone().map(|v| { + let mut bytes = v.into(); bytes.resize(16, 0); - Some(bytes) + bytes.into() + }); + Self { + maybe_value, + maybe_bytes, } } +} + +impl> + Clone> TransactionWrite for Value { + fn bytes(&self) -> Option<&Bytes> { + self.maybe_bytes.as_ref() + } + + fn from_state_value(_maybe_state_value: Option) -> Self { + unimplemented!("Irrelevant for the test") + } fn as_state_value(&self) -> Option { - unimplemented!() + unimplemented!("Irrelevant for the test") } } @@ -76,13 +88,18 @@ where K: Hash + Eq + Clone + Debug, V: Clone + Into> + Debug + PartialEq, { - pub fn new(txns: &[(K, Operator)]) -> Self { + pub fn new(txns: &[(K, Operator)], ignore_updates: bool) -> Self { let mut baseline: HashMap>> = HashMap::new(); for (idx, (k, op)) in txns.iter().enumerate() { let value_to_update = match op { - Operator::Insert(v) => Data::Write(Value(Some(v.clone()))), - Operator::Remove => Data::Write(Value(None)), - Operator::Update(d) => Data::Delta(*d), + Operator::Insert(v) => Data::Write(Value::new(Some(v.clone()))), + Operator::Remove => Data::Write(Value::new(None)), + Operator::Update(d) => { + if ignore_updates { + continue; + } + Data::Delta(*d) + }, Operator::Read => continue, }; @@ -104,30 +121,30 @@ where match data { Data::Write(v) => match acc { Some(d) => { - let maybe_value = - AggregatorValue::from_write(v).map(|value| value.into()); - if maybe_value.is_none() { - // v must be a deletion. - assert!(matches!(v, Value(None))); - return ExpectedOutput::Deleted; - } - - assert!(!failure); // acc should be none. - - match d.apply_to(maybe_value.unwrap()) { - Err(_) => return ExpectedOutput::Failure, - Ok(i) => return ExpectedOutput::Resolved(i), + match v.as_u128().unwrap() { + Some(value) => { + assert!(!failure); // acc should be none. + match d.apply_to(value) { + Err(_) => return ExpectedOutput::Failure, + Ok(i) => return ExpectedOutput::Resolved(i), + } + }, + None => { + // v must be a deletion. + assert_none!(v.bytes()); + return ExpectedOutput::Deleted; + }, } }, - None => match v { - Value(Some(w)) => { + None => match v.maybe_value.as_ref() { + Some(w) => { return if failure { ExpectedOutput::Failure } else { ExpectedOutput::Value(w.clone()) }; }, - Value(None) => return ExpectedOutput::Deleted, + None => return ExpectedOutput::Deleted, }, }, Data::Delta(d) => match acc.as_mut() { @@ -175,9 +192,13 @@ fn operator_strategy() -> impl Strategy( universe: Vec, transaction_gens: Vec<(Index, Operator)>, + test_group: bool, ) -> Result<(), TestCaseError> where K: PartialOrd + Send + Clone + Hash + Eq + Sync + Debug, @@ -188,9 +209,9 @@ where .map(|(idx, op)| (idx.get(&universe).clone(), op)) .collect::>(); - let baseline = Baseline::new(transactions.as_slice()); + let baseline = Baseline::new(transactions.as_slice(), test_group); // Only testing data, provide executable type (). - let map = MVHashMap::, Value, ExecutableTestType>::new(); + let map = MVHashMap::, usize, Value, ExecutableTestType>::new(); // make ESTIMATE placeholders for all versions to be updated. // allows to test that correct values appear at the end of concurrent execution. @@ -199,15 +220,22 @@ where .enumerate() .filter_map(|(idx, (key, op))| match op { Operator::Read => None, - Operator::Insert(_) | Operator::Remove | Operator::Update(_) => { - Some((key.clone(), idx)) - }, + Operator::Insert(_) | Operator::Remove => Some((key.clone(), idx)), + Operator::Update(_) => (!test_group).then_some((key.clone(), idx)), }) .collect::>(); for (key, idx) in versions_to_write { - map.data() - .write(KeyType(key.clone()), (idx as TxnIndex, 0), Value(None)); - map.data().mark_estimate(&KeyType(key), idx as TxnIndex); + let key = KeyType(key); + let value = Value::new(None); + let idx = idx as TxnIndex; + if test_group { + map.group_data() + .write(key.clone(), idx, 0, vec![(5, value)]); + map.group_data().mark_estimate(&key, idx); + } else { + map.data().write(key.clone(), idx, 0, value); + map.data().mark_estimate(&key, idx); + } } let current_idx = AtomicUsize::new(0); @@ -229,52 +257,71 @@ where use MVDataOutput::*; let baseline = baseline.get(key, idx as TxnIndex); + let assert_value = |v: Arc>| match v.maybe_value.as_ref() { + Some(w) => { + assert_eq!(baseline, ExpectedOutput::Value(w.clone()), "{:?}", idx); + }, + None => { + assert_eq!(baseline, ExpectedOutput::Deleted, "{:?}", idx); + }, + }; + let mut retry_attempts = 0; loop { - match map.fetch_data(&KeyType(key.clone()), idx as TxnIndex) { - Ok(Versioned(_, v)) => { - match &*v { - Value(Some(w)) => { - assert_eq!( - baseline, - ExpectedOutput::Value(w.clone()), - "{:?}", - idx - ); - }, - Value(None) => { - assert_eq!( - baseline, - ExpectedOutput::Deleted, - "{:?}", - idx - ); - }, - } - break; - }, - Ok(Resolved(v)) => { - assert_eq!(baseline, ExpectedOutput::Resolved(v), "{:?}", idx); - break; - }, - Err(NotFound) => { - assert_eq!(baseline, ExpectedOutput::NotInMap, "{:?}", idx); - break; - }, - Err(DeltaApplicationFailure) => { - assert_eq!(baseline, ExpectedOutput::Failure, "{:?}", idx); - break; - }, - Err(Unresolved(d)) => { - assert_eq!( - baseline, - ExpectedOutput::Unresolved(d), - "{:?}", - idx - ); - break; - }, - Err(Dependency(_i)) => (), + if test_group { + match map.group_data().read_from_group( + &KeyType(key.clone()), + idx as TxnIndex, + &5, + ) { + Ok((v, _)) => { + assert_value(v); + break; + }, + Err(MVGroupError::Uninitialized) => { + assert_eq!(baseline, ExpectedOutput::NotInMap, "{:?}", idx); + break; + }, + Err(MVGroupError::Dependency(_i)) => (), + Err(_) => unreachable!("Unreachable error cases for test"), + } + } else { + match map + .data() + .fetch_data(&KeyType(key.clone()), idx as TxnIndex) + { + Ok(Versioned(_, v)) => { + assert_value(v); + break; + }, + Ok(Resolved(v)) => { + assert_eq!( + baseline, + ExpectedOutput::Resolved(v), + "{:?}", + idx + ); + break; + }, + Err(Uninitialized) => { + assert_eq!(baseline, ExpectedOutput::NotInMap, "{:?}", idx); + break; + }, + Err(DeltaApplicationFailure) => { + assert_eq!(baseline, ExpectedOutput::Failure, "{:?}", idx); + break; + }, + Err(Unresolved(d)) => { + assert_eq!( + baseline, + ExpectedOutput::Unresolved(d), + "{:?}", + idx + ); + break; + }, + Err(Dependency(_i)) => (), + } } retry_attempts += 1; if retry_attempts > DEFAULT_TIMEOUT { @@ -284,18 +331,30 @@ where } }, Operator::Remove => { - map.data() - .write(KeyType(key.clone()), (idx as TxnIndex, 1), Value(None)); + let key = KeyType(key.clone()); + let value = Value::new(None); + if test_group { + map.group_data() + .write(key, idx as TxnIndex, 1, vec![(5, value)]); + } else { + map.data().write(key, idx as TxnIndex, 1, value); + } }, Operator::Insert(v) => { - map.data().write( - KeyType(key.clone()), - (idx as TxnIndex, 1), - Value(Some(v.clone())), - ); + let key = KeyType(key.clone()); + let value = Value::new(Some(v.clone())); + if test_group { + map.group_data() + .write(key, idx as TxnIndex, 1, vec![(5, value)]); + } else { + map.data().write(key, idx as TxnIndex, 1, value); + } }, Operator::Update(delta) => { - map.add_delta(KeyType(key.clone()), idx as TxnIndex, *delta) + if !test_group { + map.data() + .add_delta(KeyType(key.clone()), idx as TxnIndex, *delta) + } }, } }) @@ -312,7 +371,7 @@ proptest! { universe in vec(any::<[u8; 32]>(), 1), transactions in vec((any::(), operator_strategy::<[u8; 32]>()), 100), ) { - run_and_assert(universe, transactions)?; + run_and_assert(universe, transactions, false)?; } #[test] @@ -320,7 +379,7 @@ proptest! { universe in vec(any::<[u8; 32]>(), 1), transactions in vec((any::(), operator_strategy::<[u8; 32]>()), 2000), ) { - run_and_assert(universe, transactions)?; + run_and_assert(universe, transactions, false)?; } #[test] @@ -328,6 +387,14 @@ proptest! { universe in vec(any::<[u8; 32]>(), 10), transactions in vec((any::(), operator_strategy::<[u8; 32]>()), 100), ) { - run_and_assert(universe, transactions)?; + run_and_assert(universe, transactions, false)?; + } + + #[test] + fn multi_key_proptest_group( + universe in vec(any::<[u8; 32]>(), 3), + transactions in vec((any::(), operator_strategy::<[u8; 32]>()), 200), + ) { + run_and_assert(universe, transactions, true)?; } } diff --git a/aptos-move/mvhashmap/src/versioned_data.rs b/aptos-move/mvhashmap/src/versioned_data.rs index d8d62faa190af..afa64f94ddf4c 100644 --- a/aptos-move/mvhashmap/src/versioned_data.rs +++ b/aptos-move/mvhashmap/src/versioned_data.rs @@ -1,10 +1,12 @@ // Copyright © Aptos Foundation +// Parts of the project are originally copyright © Meta Platforms, Inc. // SPDX-License-Identifier: Apache-2.0 -use crate::types::{Flag, Incarnation, MVDataError, MVDataOutput, TxnIndex, Version}; +use crate::types::{Flag, Incarnation, MVDataError, MVDataOutput, ShiftedTxnIndex, TxnIndex}; use anyhow::Result; -use aptos_aggregator::{delta_change_set::DeltaOp, transaction::AggregatorValue}; +use aptos_aggregator::delta_change_set::DeltaOp; use aptos_types::write_set::TransactionWrite; +use claims::assert_some; use crossbeam::utils::CachePadded; use dashmap::DashMap; use std::{collections::btree_map::BTreeMap, fmt::Debug, hash::Hash, sync::Arc}; @@ -33,18 +35,13 @@ enum EntryCell { Delta(DeltaOp, Option), } -/// A VersionedValue internally contains a BTreeMap from indices of transactions -/// that update the given access path alongside the corresponding entries. It may -/// also contain a base value (value from storage) as u128 if the key corresponds -/// to an aggregator. +/// A versioned value internally is represented as a BTreeMap from indices of +/// transactions that update the given access path & the corresponding entries. struct VersionedValue { - versioned_map: BTreeMap>>, - - // An aggregator value from storage can be here to avoid redundant storage calls. - aggregator_base_value: Option, + versioned_map: BTreeMap>>, } -/// Maps each key (access path) to an internal VersionedValue. +/// Maps each key (access path) to an internal versioned value representation. pub struct VersionedData { values: DashMap>, } @@ -90,19 +87,22 @@ impl Entry { } } -impl VersionedValue { - fn new() -> Self { +impl Default for VersionedValue { + fn default() -> Self { Self { versioned_map: BTreeMap::new(), - aggregator_base_value: None, } } +} +impl VersionedValue { fn read(&self, txn_idx: TxnIndex) -> anyhow::Result, MVDataError> { use MVDataError::*; use MVDataOutput::*; - let mut iter = self.versioned_map.range(0..txn_idx); + let mut iter = self + .versioned_map + .range(ShiftedTxnIndex::zero()..ShiftedTxnIndex::new(txn_idx)); // If read encounters a delta, it must traverse the block of transactions // (top-down) until it encounters a write or reaches the end of the block. @@ -111,25 +111,34 @@ impl VersionedValue { while let Some((idx, entry)) = iter.next_back() { if entry.flag() == Flag::Estimate { // Found a dependency. - return Err(Dependency(*idx)); + return Err(Dependency( + idx.idx().expect("May not depend on storage version"), + )); } match (&entry.cell, accumulator.as_mut()) { (EntryCell::Write(incarnation, data), None) => { // Resolve to the write if no deltas were applied in between. - let write_version = (*idx, *incarnation); - return Ok(Versioned(write_version, data.clone())); + return Ok(Versioned( + idx.idx().map(|idx| (idx, *incarnation)), + data.clone(), + )); }, (EntryCell::Write(incarnation, data), Some(accumulator)) => { // Deltas were applied. We must deserialize the value // of the write and apply the aggregated delta accumulator. - return match AggregatorValue::from_write(data.as_ref()) { + return match data + .as_u128() + .expect("Aggregator value must deserialize to u128") + { None => { // Resolve to the write if the WriteOp was deletion // (MoveVM will observe 'deletion'). This takes precedence // over any speculative delta accumulation errors on top. - let write_version = (*idx, *incarnation); - Ok(Versioned(write_version, data.clone())) + Ok(Versioned( + idx.idx().map(|idx| (idx, *incarnation)), + data.clone(), + )) }, Some(value) => { // Panics if the data can't be resolved to an aggregator value. @@ -137,7 +146,7 @@ impl VersionedValue { .map_err(|_| DeltaApplicationFailure) .and_then(|a| { // Apply accumulated delta to resolve the aggregator value. - a.apply_to(value.into()) + a.apply_to(value) .map(|result| Resolved(result)) .map_err(|_| DeltaApplicationFailure) }) @@ -184,25 +193,13 @@ impl VersionedValue { // deltas the actual written value has not been seen yet (i.e. // it is not added as an entry to the data-structure). match accumulator { - Some(Ok(accumulator)) => match self.aggregator_base_value { - Some(base_value) => accumulator - .apply_to(base_value) - .map(|result| Resolved(result)) - .map_err(|_| DeltaApplicationFailure), - None => Err(Unresolved(accumulator)), - }, + Some(Ok(accumulator)) => Err(Unresolved(accumulator)), Some(Err(_)) => Err(DeltaApplicationFailure), - None => Err(NotFound), + None => Err(Uninitialized), } } } -impl Default for VersionedValue { - fn default() -> Self { - VersionedValue::new() - } -} - impl VersionedData { pub(crate) fn new() -> Self { Self { @@ -210,17 +207,12 @@ impl VersionedData { } } - pub(crate) fn set_aggregator_base_value(&self, key: &K, value: u128) { - let mut v = self.values.get_mut(key).expect("Path must exist"); - - // Record base value. If a value was added by another thread, assert they're equal. - assert_eq!(*v.aggregator_base_value.get_or_insert(value), value); - } - - pub(crate) fn add_delta(&self, key: K, txn_idx: TxnIndex, delta: DeltaOp) { + pub fn add_delta(&self, key: K, txn_idx: TxnIndex, delta: DeltaOp) { let mut v = self.values.entry(key).or_default(); - v.versioned_map - .insert(txn_idx, CachePadded::new(Entry::new_delta_from(delta))); + v.versioned_map.insert( + ShiftedTxnIndex::new(txn_idx), + CachePadded::new(Entry::new_delta_from(delta)), + ); } /// Mark an entry from transaction 'txn_idx' at access path 'key' as an estimated write @@ -228,7 +220,7 @@ impl VersionedData { pub fn mark_estimate(&self, key: &K, txn_idx: TxnIndex) { let mut v = self.values.get_mut(key).expect("Path must exist"); v.versioned_map - .get_mut(&txn_idx) + .get_mut(&ShiftedTxnIndex::new(txn_idx)) .expect("Entry by the txn must exist to mark estimate") .mark_estimate(); } @@ -238,13 +230,13 @@ impl VersionedData { pub fn delete(&self, key: &K, txn_idx: TxnIndex) { // TODO: investigate logical deletion. let mut v = self.values.get_mut(key).expect("Path must exist"); - assert!( - v.versioned_map.remove(&txn_idx).is_some(), - "Entry must exist to be deleted" + assert_some!( + v.versioned_map.remove(&ShiftedTxnIndex::new(txn_idx)), + "Entry for key / idx must exist to be deleted" ); } - pub(crate) fn fetch_data( + pub fn fetch_data( &self, key: &K, txn_idx: TxnIndex, @@ -252,16 +244,35 @@ impl VersionedData { self.values .get(key) .map(|v| v.read(txn_idx)) - .unwrap_or(Err(MVDataError::NotFound)) + .unwrap_or(Err(MVDataError::Uninitialized)) } - /// Versioned write of data at a given key (and version). - pub fn write(&self, key: K, version: Version, data: V) { - let (txn_idx, incarnation) = version; + pub fn provide_base_value(&self, key: K, data: V) { + let mut v = self.values.entry(key).or_default(); + let bytes_len = data.bytes_len(); + // For base value, incarnation is irrelevant, set to 0. + let prev_entry = v.versioned_map.insert( + ShiftedTxnIndex::zero(), + CachePadded::new(Entry::new_write_from(0, data)), + ); + assert!(prev_entry.map_or(true, |entry| -> bool { + if let EntryCell::Write(i, v) = &entry.cell { + // base value may have already been provided due to a concurrency race, + // but it has to be the same as being set. + // Assert the length of bytes for efficiency (instead of full equality) + *i == 0 && v.bytes_len() == bytes_len + } else { + true + } + })); + } + + /// Versioned write of data at a given key (and version). + pub fn write(&self, key: K, txn_idx: TxnIndex, incarnation: Incarnation, data: V) { let mut v = self.values.entry(key).or_default(); let prev_entry = v.versioned_map.insert( - txn_idx, + ShiftedTxnIndex::new(txn_idx), CachePadded::new(Entry::new_write_from(incarnation, data)), ); @@ -282,14 +293,14 @@ impl VersionedData { /// transaction has indeed produced a delta recorded at the given key. /// /// If the result is Err(op), it means the base value to apply DeltaOp op hadn't been set. - pub(crate) fn materialize_delta(&self, key: &K, txn_idx: TxnIndex) -> Result { + pub fn materialize_delta(&self, key: &K, txn_idx: TxnIndex) -> Result { let mut v = self.values.get_mut(key).expect("Path must exist"); // +1 makes sure we include the delta from txn_idx. match v.read(txn_idx + 1) { Ok(MVDataOutput::Resolved(value)) => { v.versioned_map - .get_mut(&txn_idx) + .get_mut(&ShiftedTxnIndex::new(txn_idx)) .expect("Entry by the txn must exist to commit delta") .record_delta_shortcut(value); diff --git a/aptos-move/mvhashmap/src/versioned_group_data.rs b/aptos-move/mvhashmap/src/versioned_group_data.rs new file mode 100644 index 0000000000000..8e30bc3ffd748 --- /dev/null +++ b/aptos-move/mvhashmap/src/versioned_group_data.rs @@ -0,0 +1,654 @@ +// Copyright © Aptos Foundation +// SPDX-License-Identifier: Apache-2.0 + +use crate::types::{Flag, Incarnation, MVGroupError, ShiftedTxnIndex, TxnIndex, Version}; +use aptos_types::write_set::TransactionWrite; +use claims::assert_some; +use crossbeam::utils::CachePadded; +use dashmap::DashMap; +use serde::Serialize; +use std::{ + collections::{btree_map::BTreeMap, HashMap}, + fmt::Debug, + hash::Hash, + sync::Arc, +}; + +struct GroupEntry { + // Note: can be a raw pointer (different data-structure holds the value during the + // lifetime), but would require unsafe access. + value: Arc, + incarnation: Incarnation, + flag: Flag, +} + +impl GroupEntry { + fn new(value: Arc, incarnation: Incarnation) -> Self { + Self { + value, + incarnation, + flag: Flag::Done, + } + } +} + +/// Represents a group value, i.e. a key that does not correspond to a single value, +/// but instead a collection of values each associated with a tag. +/// +/// Implementation note: due to DashMap in VersionedGroupData, the updates are atomic. +/// If this changes, we must maintain invariants on insertion / deletion order among +/// members (e.g. versioned_map then idx_to_update, deletion vice versa). +pub(crate) struct VersionedGroupValue { + /// While versioned_map maps tags to versioned entries for the tag, idx_to_update + /// maps a transaction index to all corresponding group updates. ShiftedTxnIndex is used + /// to dedicated index 0 for base (storage version, prior to block execution) values. + versioned_map: HashMap>>>, + /// Mapping transaction indices to the set of group member updates. As it is required + /// to provide base values from storage, and since all versions including storage are + /// represented in the same data-structure, the key set corresponds to all relevant + /// tags (group membership is not fixed, see aip-9). + /// Note: if we do not garbage collect final idx_to_update contents until the end of + /// block execution (lifetime of the data-structure), then we can have other structures + /// hold raw pointers to the values as an optimization. + idx_to_update: BTreeMap>>>, + + /// Group contents corresponding to the latest committed version. + committed_group: HashMap>, +} + +/// Maps each key (access path) to an internal VersionedValue. +pub struct VersionedGroupData { + group_values: DashMap>, +} + +impl Default + for VersionedGroupValue +{ + fn default() -> Self { + Self { + versioned_map: HashMap::new(), + idx_to_update: BTreeMap::new(), + committed_group: HashMap::new(), + } + } +} + +impl VersionedGroupValue { + fn write( + &mut self, + shifted_idx: ShiftedTxnIndex, + incarnation: Incarnation, + values: impl IntoIterator, + ) { + let arc_map = values + .into_iter() + .map(|(tag, v)| { + let arc_v = Arc::new(v); + + // Update versioned_map. + let tag_entry = self.versioned_map.entry(tag.clone()).or_default(); + tag_entry.insert( + shifted_idx.clone(), + CachePadded::new(GroupEntry::new(arc_v.clone(), incarnation)), + ); + + (tag, arc_v) + }) + .collect(); + + let zero = ShiftedTxnIndex::zero(); + let base_idx = shifted_idx == zero; + + self.idx_to_update + .insert(shifted_idx, CachePadded::new(arc_map)); + if base_idx { + self.commit_idx(zero); + } + } + + fn mark_estimate(&mut self, txn_idx: TxnIndex) { + let shifted_idx = ShiftedTxnIndex::new(txn_idx); + let idx_updates = self + .idx_to_update + .get(&shifted_idx) + .expect("Group updates must exist at the index to mark estimate"); + + // estimate flag lives in GroupEntry, w. value in versioned_map to simplify reading + // based on txn_idx and tag. marking estimates occurs per txn (data MVHashMap exposes + // the interface for txn_idx & key). Hence, we must mark tags individually. + for (tag, _) in idx_updates.iter() { + self.versioned_map + .get_mut(tag) + .expect("Versioned entry must exist for tag") + .get_mut(&shifted_idx) + .expect("Versioned entry must exist") + .flag = Flag::Estimate; + } + } + + fn delete(&mut self, txn_idx: TxnIndex) { + let shifted_idx = ShiftedTxnIndex::new(txn_idx); + // Delete idx updates first, then entries. + let idx_updates = self + .idx_to_update + .remove(&shifted_idx) + .expect("Group updates must exist at the index to mark estimate"); + + // Similar to mark_estimate, need to delete an individual entry for each tag. + for (tag, _) in idx_updates.iter() { + assert_some!( + self.versioned_map + .get_mut(tag) + .expect("Versioned entry must exist for tag") + .remove(&shifted_idx), + "Entry for tag / idx must exist to be deleted" + ); + } + } + + // Records and returns pointers for the latest committed value for each tag in the group. + fn commit_idx(&mut self, shifted_idx: ShiftedTxnIndex) -> HashMap> { + let idx_updates = self + .idx_to_update + .get(&shifted_idx) + .expect("Group updates must exist at the index to commit"); + for (tag, v) in idx_updates.iter() { + if v.is_deletion() { + self.committed_group.remove(tag); + } else { + self.committed_group.insert(tag.clone(), v.clone()); + } + } + + self.committed_group.clone() + } + + fn get_latest_tagged_value( + &self, + txn_idx: TxnIndex, + tag: &T, + ) -> Result<(Arc, Version), MVGroupError> { + let common_error = || -> MVGroupError { + if self.idx_to_update.contains_key(&ShiftedTxnIndex::zero()) { + MVGroupError::TagNotFound + } else { + MVGroupError::Uninitialized + } + }; + + self.versioned_map + .get(tag) + .ok_or(common_error()) + .and_then(|tree| { + match tree + .range(ShiftedTxnIndex::zero()..ShiftedTxnIndex::new(txn_idx)) + .next_back() + { + Some((idx, entry)) => { + if entry.flag == Flag::Estimate { + Err(MVGroupError::Dependency( + idx.idx() + .expect("Base version cannot be marked as estimate"), + )) + } else { + Ok(( + entry.value.clone(), + idx.idx().map(|idx| (idx, entry.incarnation)), + )) + } + }, + None => Err(common_error()), + } + }) + } + + fn get_latest_group_size(&self, txn_idx: TxnIndex) -> Result { + if !self.idx_to_update.contains_key(&ShiftedTxnIndex::zero()) { + return Err(MVGroupError::Uninitialized); + } + + self.versioned_map + .iter() + .try_fold(0_u64, |len, (tag, tree)| { + match tree + .range(ShiftedTxnIndex::zero()..ShiftedTxnIndex::new(txn_idx)) + .next_back() + { + Some((idx, entry)) => { + if entry.flag == Flag::Estimate { + Err(MVGroupError::Dependency( + idx.idx().expect("May not depend on storage version"), + )) + } else { + let delta = entry.value.bytes_len() as u64 + + bcs::serialized_size(tag) + .map_err(|_| MVGroupError::TagSerializationError)? + as u64; + Ok(len + delta) + } + }, + None => Ok(len), + } + }) + } +} + +impl< + K: Hash + Clone + Debug + Eq, + T: Hash + Clone + Debug + Eq + Serialize, + V: TransactionWrite, + > VersionedGroupData +{ + pub(crate) fn new() -> Self { + Self { + group_values: DashMap::new(), + } + } + + pub fn provide_base_values(&self, key: K, base_values: impl IntoIterator) { + // Incarnation is irrelevant for storage version, set to 0. + self.group_values + .entry(key) + .or_default() + .write(ShiftedTxnIndex::zero(), 0, base_values); + } + + pub fn write( + &self, + key: K, + txn_idx: TxnIndex, + incarnation: Incarnation, + values: impl IntoIterator, + ) { + self.group_values.entry(key).or_default().write( + ShiftedTxnIndex::new(txn_idx), + incarnation, + values, + ); + } + + /// Mark all entry from transaction 'txn_idx' at access path 'key' as an estimated write + /// (for future incarnation). Will panic if the entry is not in the data-structure. + pub fn mark_estimate(&self, key: &K, txn_idx: TxnIndex) { + self.group_values + .get_mut(key) + .expect("Path must exist") + .mark_estimate(txn_idx); + } + + /// Delete all entries from transaction 'txn_idx' at access path 'key'. Will panic + /// if the corresponding entry does not exist. + pub fn delete(&self, key: &K, txn_idx: TxnIndex) { + self.group_values + .get_mut(key) + .expect("Path must exist") + .delete(txn_idx); + } + + /// Read the latest value corresponding to a tag at a given group (identified by key). + /// Return the size of the group (if requested), as defined above, alongside the version + /// information (None if storage/pre-block version). + pub fn read_from_group( + &self, + key: &K, + txn_idx: TxnIndex, + tag: &T, + ) -> anyhow::Result<(Arc, Version), MVGroupError> { + match self.group_values.get(key) { + Some(g) => g.get_latest_tagged_value(txn_idx, tag), + None => Err(MVGroupError::Uninitialized), + } + } + + /// Returns the sum of latest sizes of all group members (and their respective tags), + /// collected based on the list of recorded tags. If the latest entry at any tag was + /// marked as an estimate, a dependency is returned. Note: it would be possible to + /// process estimated entry sizes, but would have to mark that if after the re-execution + /// the entry size changes, then re-execution must reduce validation idx. + pub fn get_group_size(&self, key: &K, txn_idx: TxnIndex) -> Result { + match self.group_values.get(key) { + Some(g) => g.get_latest_group_size(txn_idx), + None => Err(MVGroupError::Uninitialized), + } + } + + /// For a given key that corresponds to a group, and an index of a transaction the last + /// incarnation of which wrote to at least one tag of the group, finalizes the latest + /// contents of the group. This method works on pointers only and is relatively lighweight, + /// while subsequent post-processing can clone and serialize the whole group. Note: required + /// since the output of the block executor still needs to return the whole group contents. + /// + /// The method must be called when all transactions <= txn_idx are actually committed, and + /// the values pointed by weak are guaranteed to be fixed and available during the lifetime + /// of the data-structure itself. + pub fn commit_group(&self, key: &K, txn_idx: TxnIndex) -> HashMap> { + let mut v = self.group_values.get_mut(key).expect("Path must exist"); + + v.commit_idx(ShiftedTxnIndex::new(txn_idx)) + } +} + +#[cfg(test)] +mod test { + use super::*; + use crate::types::{ + test::{KeyType, TestValue}, + StorageVersion, + }; + use claims::{assert_matches, assert_none, assert_ok_eq, assert_some_eq}; + use test_case::test_case; + + #[should_panic] + #[test_case(0)] + #[test_case(1)] + #[test_case(2)] + fn group_no_path_exists(test_idx: usize) { + let ap = KeyType(b"/foo/b".to_vec()); + let map = VersionedGroupData::>, usize, TestValue>::new(); + + match test_idx { + 0 => { + map.mark_estimate(&ap, 1); + }, + 1 => { + map.delete(&ap, 2); + }, + 2 => { + map.commit_group(&ap, 0); + }, + _ => unreachable!("Wrong test index"), + } + } + + #[test] + fn group_uninitialized() { + let ap_0 = KeyType(b"/foo/a".to_vec()); + let ap_1 = KeyType(b"/foo/b".to_vec()); + let ap_2 = KeyType(b"/foo/c".to_vec()); + + let map = VersionedGroupData::>, usize, TestValue>::new(); + assert_matches!( + map.get_group_size(&ap_0, 3), + Err(MVGroupError::Uninitialized) + ); + + map.write( + ap_1.clone(), + 3, + 1, + // tags 0, 1, 2. + (0..2).map(|i| (i, TestValue::with_len(1))), + ); + + // Size should be uninitialized even if the output of lower txn is stored + // (as long as the base isn't provided). + assert_matches!( + map.get_group_size(&ap_1, 3), + Err(MVGroupError::Uninitialized) + ); + assert_matches!( + map.get_group_size(&ap_1, 4), + Err(MVGroupError::Uninitialized) + ); + // for reading a tag at ap_1, w.o. returning size, idx = 3 is Uninitialized. + assert_matches!( + map.read_from_group(&ap_1, 3, &1), + Err(MVGroupError::Uninitialized) + ); + // ... but idx = 4 should find the previously stored value. + assert_eq!( + map.read_from_group(&ap_1, 4, &1).unwrap(), + // Arc compares by value, no return size, incarnation. + (Arc::new(TestValue::with_len(1)), Ok((3, 1))) + ); + // ap_0 should still be uninitialized. + assert_matches!( + map.read_from_group(&ap_0, 3, &1), + Err(MVGroupError::Uninitialized) + ); + + map.write( + ap_2.clone(), + 4, + 0, + // tags 1, 2. + (1..3).map(|i| (i, TestValue::with_len(4))), + ); + assert_matches!( + map.read_from_group(&ap_2, 4, &2), + Err(MVGroupError::Uninitialized) + ); + map.provide_base_values( + ap_2.clone(), + // base tags 0, 1. + (0..2).map(|i| (i, TestValue::with_len(2))), + ); + + // Tag not found vs not initialized, + assert_matches!( + map.read_from_group(&ap_2, 4, &2), + Err(MVGroupError::TagNotFound) + ); + assert_matches!( + map.read_from_group(&ap_2, 5, &4), + Err(MVGroupError::TagNotFound) + ); + // vs finding a versioned entry from txn 4, vs from storage. + assert_eq!( + map.read_from_group(&ap_2, 5, &2).unwrap(), + (Arc::new(TestValue::with_len(4)), Ok((4, 0))) + ); + assert_eq!( + map.read_from_group(&ap_2, 5, &0).unwrap(), + (Arc::new(TestValue::with_len(2)), Err(StorageVersion)) + ); + } + + #[test] + fn group_read_write_estimate() { + use MVGroupError::*; + let ap = KeyType(b"/foo/f".to_vec()); + let map = VersionedGroupData::>, usize, TestValue>::new(); + + map.write( + ap.clone(), + 5, + 3, + // tags 0, 1, values are derived from [txn_idx, incarnation] seed. + (0..2).map(|i| (i, TestValue::new(vec![5, 3]))), + ); + assert_eq!( + map.read_from_group(&ap, 12, &1).unwrap(), + (Arc::new(TestValue::new(vec![5, 3])), Ok((5, 3))) + ); + map.write( + ap.clone(), + 10, + 1, + // tags 1, 2, values are derived from [txn_idx, incarnation] seed. + (1..3).map(|i| (i, TestValue::new(vec![10, 1]))), + ); + assert_eq!( + map.read_from_group(&ap, 12, &1).unwrap(), + (Arc::new(TestValue::new(vec![10, 1])), Ok((10, 1))) + ); + + map.mark_estimate(&ap, 10); + assert_matches!(map.read_from_group(&ap, 12, &1), Err(Dependency(10))); + assert_matches!(map.read_from_group(&ap, 12, &2), Err(Dependency(10))); + assert_matches!(map.read_from_group(&ap, 12, &3), Err(Uninitialized)); + assert_eq!( + map.read_from_group(&ap, 12, &0).unwrap(), + (Arc::new(TestValue::new(vec![5, 3])), Ok((5, 3))) + ); + + map.delete(&ap, 10); + assert_eq!( + map.read_from_group(&ap, 12, &0).unwrap(), + (Arc::new(TestValue::new(vec![5, 3])), Ok((5, 3))) + ); + assert_eq!( + map.read_from_group(&ap, 12, &1).unwrap(), + (Arc::new(TestValue::new(vec![5, 3])), Ok((5, 3))) + ); + } + + #[test] + fn latest_group_size() { + use MVGroupError::*; + let ap = KeyType(b"/foo/f".to_vec()); + let map = VersionedGroupData::>, usize, TestValue>::new(); + + map.write( + ap.clone(), + 5, + 3, + // tags 0, 1 + (0..2).map(|i| (i, TestValue::with_len(2))), + ); + assert_matches!(map.get_group_size(&ap, 12), Err(Uninitialized)); + + map.provide_base_values( + ap.clone(), + // base tag 1, 2, 3, 4 + (1..5).map(|i| (i, TestValue::with_len(1))), + ); + + let tag: usize = 5; + let tag_len = bcs::serialized_size(&tag).unwrap(); + let one_entry_len = TestValue::with_len(1).bytes_len(); + let two_entry_len = TestValue::with_len(2).bytes_len(); + let three_entry_len = TestValue::with_len(3).bytes_len(); + let four_entry_len = TestValue::with_len(4).bytes_len(); + let exp_size = 2 * two_entry_len + 3 * one_entry_len + 5 * tag_len; + assert_ok_eq!(map.get_group_size(&ap, 12), exp_size as u64); + + map.write( + ap.clone(), + 10, + 1, + // tags 4, 5 + (4..6).map(|i| (i, TestValue::with_len(3))), + ); + let exp_size_12 = exp_size + 2 * three_entry_len + tag_len - one_entry_len; + assert_ok_eq!(map.get_group_size(&ap, 12), exp_size_12 as u64); + assert_ok_eq!(map.get_group_size(&ap, 10), exp_size as u64); + + map.mark_estimate(&ap, 5); + assert_matches!(map.get_group_size(&ap, 12), Err(Dependency(5))); + let exp_size_4 = 4 * (tag_len + one_entry_len); + assert_ok_eq!(map.get_group_size(&ap, 4), exp_size_4 as u64); + + map.write( + ap.clone(), + 6, + 1, + (0..2).map(|i| (i, TestValue::with_len(4))), + ); + let exp_size_7 = 2 * four_entry_len + 3 * one_entry_len + 5 * tag_len; + assert_ok_eq!(map.get_group_size(&ap, 7), exp_size_7 as u64); + assert_matches!(map.get_group_size(&ap, 6), Err(Dependency(5))); + + map.delete(&ap, 5); + assert_ok_eq!(map.get_group_size(&ap, 6), exp_size_4 as u64); + } + + #[test] + fn group_commit_idx() { + let ap = KeyType(b"/foo/f".to_vec()); + let map = VersionedGroupData::>, usize, TestValue>::new(); + + map.provide_base_values( + ap.clone(), + // base tag 1, 2, 3 + (1..4).map(|i| (i, TestValue::from_u128(i as u128))), + ); + map.write( + ap.clone(), + 7, + 3, + // insert at 0, remove at 1. + vec![ + (0, TestValue::from_u128(100_u128)), + (1, TestValue::deletion()), + ], + ); + map.write( + ap.clone(), + 3, + 0, + // tags 2, 3 + (2..4).map(|i| (i, TestValue::from_u128(200 + i as u128))), + ); + let committed_3 = map.commit_group(&ap, 3); + // The value at tag 1 is from base, while 2 and 3 are from txn 3. + // (Arc compares with value equality) + assert_eq!(committed_3.len(), 3); + assert_some_eq!(committed_3.get(&1), &Arc::new(TestValue::from_u128(1))); + assert_some_eq!( + committed_3.get(&2), + &Arc::new(TestValue::from_u128(200 + 2)) + ); + assert_some_eq!( + committed_3.get(&3), + &Arc::new(TestValue::from_u128(200 + 3)) + ); + + map.write( + ap.clone(), + 5, + 3, + // tags 3, 4 + (3..5).map(|i| (i, TestValue::from_u128(300 + i as u128))), + ); + let committed_5 = map.commit_group(&ap, 5); + assert_eq!(committed_5.len(), 4); + assert_some_eq!(committed_5.get(&1), &Arc::new(TestValue::from_u128(1))); + assert_some_eq!( + committed_5.get(&2), + &Arc::new(TestValue::from_u128(200 + 2)) + ); + assert_some_eq!( + committed_5.get(&3), + &Arc::new(TestValue::from_u128(300 + 3)) + ); + assert_some_eq!( + committed_5.get(&4), + &Arc::new(TestValue::from_u128(300 + 4)) + ); + + let committed_7 = map.commit_group(&ap, 7); + assert_eq!(committed_7.len(), 4); + assert_some_eq!(committed_7.get(&0), &Arc::new(TestValue::from_u128(100))); + assert_none!(committed_7.get(&1)); + assert_some_eq!( + committed_7.get(&2), + &Arc::new(TestValue::from_u128(200 + 2)) + ); + assert_some_eq!( + committed_7.get(&3), + &Arc::new(TestValue::from_u128(300 + 3)) + ); + assert_some_eq!( + committed_7.get(&4), + &Arc::new(TestValue::from_u128(300 + 4)) + ); + + map.write( + ap.clone(), + 8, + 0, + // re-insert at 1, delete everything else + vec![ + (0, TestValue::deletion()), + (1, TestValue::from_u128(400_u128)), + (2, TestValue::deletion()), + (3, TestValue::deletion()), + (4, TestValue::deletion()), + ], + ); + let committed_8 = map.commit_group(&ap, 8); + assert_eq!(committed_8.len(), 1); + assert_some_eq!(committed_8.get(&1), &Arc::new(TestValue::from_u128(400))); + } +} diff --git a/aptos-move/mvhashmap/src/versioned_modules.rs b/aptos-move/mvhashmap/src/versioned_modules.rs index 364e46c88fb25..86e3aa32ef078 100644 --- a/aptos-move/mvhashmap/src/versioned_modules.rs +++ b/aptos-move/mvhashmap/src/versioned_modules.rs @@ -124,6 +124,9 @@ impl VersionedModules< .insert(txn_idx, CachePadded::new(Entry::new_write_from(data))); } + /// Adds a new executable to the multi-version data-structure. The executable is either + /// storage-version (and fixed) or uniquely identified by the (cryptographic) hash of the + /// module published during the block. pub fn store_executable(&self, key: &K, descriptor_hash: HashValue, executable: X) { let mut v = self.values.get_mut(key).expect("Path must exist"); v.executables @@ -131,6 +134,9 @@ impl VersionedModules< .or_insert_with(|| Arc::new(executable)); } + /// Fetches the latest module stored at the given key, either as in an executable form, + /// if already cached, or in a raw module format that the VM can convert to an executable. + /// The errors are returned if no module is found, or if a dependency is encountered. pub fn fetch_module( &self, key: &K, diff --git a/aptos-move/vm-genesis/Cargo.toml b/aptos-move/vm-genesis/Cargo.toml index dec643259de9a..41961e1f3c3c5 100644 --- a/aptos-move/vm-genesis/Cargo.toml +++ b/aptos-move/vm-genesis/Cargo.toml @@ -23,6 +23,7 @@ aptos-types = { workspace = true } aptos-vm = { workspace = true } aptos-vm-types = { workspace = true } bcs = { workspace = true } +bytes = { workspace = true } move-core-types = { workspace = true } move-vm-types = { workspace = true } once_cell = { workspace = true } diff --git a/aptos-move/vm-genesis/src/genesis_context.rs b/aptos-move/vm-genesis/src/genesis_context.rs index b73ef3c2248bc..7e68d806105d7 100644 --- a/aptos-move/vm-genesis/src/genesis_context.rs +++ b/aptos-move/vm-genesis/src/genesis_context.rs @@ -12,6 +12,7 @@ use aptos_types::{ state_key::StateKey, state_storage_usage::StateStorageUsage, state_value::StateValue, }, }; +use bytes::Bytes; use move_core_types::language_storage::ModuleId; use std::collections::HashMap; @@ -42,8 +43,7 @@ impl TStateView for GenesisStateView { Ok(self .state_data .get(state_key) - .cloned() - .map(StateValue::new_legacy)) + .map(|bytes| StateValue::new_legacy(Bytes::copy_from_slice(bytes)))) } fn get_usage(&self) -> Result { diff --git a/aptos-move/writeset-transaction-generator/src/writeset_builder.rs b/aptos-move/writeset-transaction-generator/src/writeset_builder.rs index 1c395b1b33544..ff8da1cb1d02a 100644 --- a/aptos-move/writeset-transaction-generator/src/writeset_builder.rs +++ b/aptos-move/writeset-transaction-generator/src/writeset_builder.rs @@ -13,7 +13,7 @@ use aptos_types::{ transaction::{ChangeSet, Script, Version}, }; use aptos_vm::{ - data_cache::StorageAdapter, + data_cache::AsMoveResolver, move_vm_ext::{MoveVmExt, SessionExt, SessionId}, }; use aptos_vm_types::storage::ChangeSetConfigs; @@ -118,13 +118,12 @@ where TimedFeatures::enable_all(), ) .unwrap(); - let state_view_storage = StorageAdapter::new(state_view); + let resolver = state_view.as_move_resolver(); let change_set = { // TODO: specify an id by human and pass that in. let genesis_id = HashValue::zero(); - let mut session = GenesisSession( - move_vm.new_session(&state_view_storage, SessionId::genesis(genesis_id)), - ); + let mut session = + GenesisSession(move_vm.new_session(&resolver, SessionId::genesis(genesis_id))); session.disable_reconfiguration(); procedure(&mut session); session.enable_reconfiguration(); diff --git a/aptos-node/Cargo.toml b/aptos-node/Cargo.toml index 1ee9db0cd4a6e..a33b10190915a 100644 --- a/aptos-node/Cargo.toml +++ b/aptos-node/Cargo.toml @@ -40,6 +40,7 @@ aptos-logger = { workspace = true } aptos-mempool = { workspace = true } aptos-mempool-notifications = { workspace = true } aptos-network = { workspace = true } +aptos-network-benchmark = { workspace = true } aptos-network-builder = { workspace = true } aptos-node-identity = { workspace = true } aptos-peer-monitoring-service-client = { workspace = true } @@ -61,6 +62,7 @@ aptos-types = { workspace = true } aptos-vm = { workspace = true } bcs = { workspace = true } clap = { workspace = true } +either = { workspace = true } fail = { workspace = true } futures = { workspace = true } hex = { workspace = true } diff --git a/aptos-node/src/lib.rs b/aptos-node/src/lib.rs index 9a8dc9fd0d8b1..3c0485482509c 100644 --- a/aptos-node/src/lib.rs +++ b/aptos-node/src/lib.rs @@ -29,8 +29,8 @@ use hex::{FromHex, FromHexError}; use rand::{rngs::StdRng, SeedableRng}; use std::{ fs, - io::{Read, Write}, - path::PathBuf, + io::Write, + path::{Path, PathBuf}, sync::{ atomic::{AtomicBool, Ordering}, Arc, @@ -123,6 +123,7 @@ impl AptosNodeArgs { setup_test_environment_and_start_node( self.config, self.test_config_override, + None, self.test_dir, self.random_ports, self.lazy, @@ -192,6 +193,11 @@ pub fn start( // Instantiate the global logger let (remote_log_receiver, logger_filter_update) = logger::create_logger(&config, log_file); + assert!( + !cfg!(feature = "testing") && !cfg!(feature = "fuzzing"), + "Testing features shouldn't be compiled" + ); + // Ensure failpoints are configured correctly if fail::has_failpoints() { warn!("Failpoints are enabled!"); @@ -226,6 +232,7 @@ pub fn start( pub fn setup_test_environment_and_start_node( config_path: Option, test_config_override_path: Option, + config: Option, test_dir: Option, random_ports: bool, enable_lazy_mode: bool, @@ -248,46 +255,22 @@ where let aptos_root_key_path = test_dir.join("mint.key"); // If there's already a config, use it. Otherwise create a test one. - let config = if validator_config_path.exists() { + let config = if let Some(config) = config { + config + } else if validator_config_path.exists() { NodeConfig::load_from_path(&validator_config_path) .map_err(|error| anyhow!("Unable to load config: {:?}", error))? } else { - // Create a test only config for a single validator node - let node_config = create_single_node_test_config( - config_path.clone(), - test_config_override_path.clone(), + // Create a test only config for a single validator node. + create_single_node_test_config( + &config_path, + &test_config_override_path, + &test_dir, + random_ports, enable_lazy_mode, - )?; - - // Build genesis and the validator node - let builder = aptos_genesis::builder::Builder::new(&test_dir, framework.clone())? - .with_init_config(Some(Arc::new(move |_, config, _| { - *config = node_config.clone(); - }))) - .with_init_genesis_config(Some(Arc::new(|genesis_config| { - genesis_config.allow_new_validators = true; - genesis_config.epoch_duration_secs = EPOCH_LENGTH_SECS; - genesis_config.recurring_lockup_duration_secs = 7200; - }))) - .with_randomize_first_validator_ports(random_ports); - let (root_key, _genesis, genesis_waypoint, mut validators) = builder.build(rng)?; - - // Write the mint key to disk - let serialized_keys = bcs::to_bytes(&root_key)?; - let mut key_file = fs::File::create(&aptos_root_key_path)?; - key_file.write_all(&serialized_keys)?; - - // Build a waypoint file so that clients / docker can grab it easily - let waypoint_file_path = test_dir.join("waypoint.txt"); - Write::write_all( - &mut fs::File::create(waypoint_file_path)?, - genesis_waypoint.to_string().as_bytes(), - )?; - - aptos_config::config::sanitize_node_config(&mut validators[0].config)?; - - // Return the validator config - validators[0].config.clone() + framework, + rng, + )? }; // Prepare log file since we cannot automatically route logs to stderr @@ -311,7 +294,7 @@ where println!("\tTest dir: {:?}", test_dir); println!("\tAptos root key path: {:?}", aptos_root_key_path); println!("\tWaypoint: {}", config.base.waypoint.genesis_waypoint()); - println!("\tChainId: {}", ChainId::test()); + println!("\tChainId: {}", ChainId::test().id()); println!("\tREST API endpoint: http://{}", &config.api.address); println!( "\tMetrics endpoint: http://{}:{}/metrics", @@ -322,9 +305,10 @@ where &config.full_node_networks[0].listen_address ); if config.indexer_grpc.enabled { - if let Some(ref indexer_grpc_address) = config.indexer_grpc.address { - println!("\tIndexer gRPC endpoint: {}", indexer_grpc_address); - } + println!( + "\tIndexer gRPC node stream endpoint: {}", + config.indexer_grpc.address + ); } if enable_lazy_mode { println!("\tLazy mode is enabled"); @@ -335,26 +319,31 @@ where } /// Creates a single node test config, with a few config tweaks to reduce -/// the overhead of running the node on a local machine. -fn create_single_node_test_config( - config_path: Option, - test_config_override_path: Option, +/// the overhead of running the node on a local machine. It writes necessary +/// configuration artifacts (e.g. the mint key) to disk. +pub fn create_single_node_test_config( + config_path: &Option, + test_config_override_path: &Option, + test_dir: &Path, + random_ports: bool, enable_lazy_mode: bool, -) -> anyhow::Result { + framework: &ReleaseBundle, + rng: R, +) -> anyhow::Result +where + R: rand::RngCore + rand::CryptoRng, +{ let mut node_config = match test_config_override_path { // If a config override path was provided, merge it with the default config Some(test_config_override_path) => { - let mut contents = String::new(); - fs::File::open(&test_config_override_path) - .map_err(|e| { - anyhow!( - "Unable to open config override file {:?}. Error: {}", - test_config_override_path, - e - ) - })? - .read_to_string(&mut contents)?; - let values = serde_yaml::from_str::(&contents).map_err(|e| { + let reader = fs::File::open(test_config_override_path).map_err(|e| { + anyhow!( + "Unable to open config override file {:?}. Error: {}", + test_config_override_path, + e + ) + })?; + let values: serde_yaml::Value = serde_yaml::from_reader(&reader).map_err(|e| { anyhow!( "Unable to read config override file as YAML {:?}. Error: {}", test_config_override_path, @@ -428,7 +417,7 @@ fn create_single_node_test_config( // If a config path was provided, use that as the template if let Some(config_path) = config_path { - node_config = NodeConfig::load_config(&config_path).map_err(|e| { + node_config = NodeConfig::load_config(config_path).map_err(|e| { anyhow!( "Unable to load config from path: {:?}. Error: {:?}", config_path, @@ -450,6 +439,38 @@ fn create_single_node_test_config( node_config.consensus.quorum_store_poll_time_ms = 3_600_000; } + // The validator builder puts the first node in the 0 directory + let aptos_root_key_path = test_dir.join("mint.key"); + + // Build genesis and the validator node + let builder = aptos_genesis::builder::Builder::new(test_dir, framework.clone())? + .with_init_config(Some(Arc::new(move |_, config, _| { + *config = node_config.clone(); + }))) + .with_init_genesis_config(Some(Arc::new(|genesis_config| { + genesis_config.allow_new_validators = true; + genesis_config.epoch_duration_secs = EPOCH_LENGTH_SECS; + genesis_config.recurring_lockup_duration_secs = 7200; + }))) + .with_randomize_first_validator_ports(random_ports); + let (root_key, _genesis, genesis_waypoint, mut validators) = builder.build(rng)?; + + // Write the mint key to disk + let serialized_keys = bcs::to_bytes(&root_key)?; + let mut key_file = fs::File::create(aptos_root_key_path)?; + key_file.write_all(&serialized_keys)?; + + // Build a waypoint file so that clients / docker can grab it easily + let waypoint_file_path = test_dir.join("waypoint.txt"); + Write::write_all( + &mut fs::File::create(waypoint_file_path)?, + genesis_waypoint.to_string().as_bytes(), + )?; + + aptos_config::config::sanitize_node_config(validators[0].config.override_config_mut())?; + + let node_config = validators[0].config.override_config().clone(); + Ok(node_config) } diff --git a/aptos-node/src/network.rs b/aptos-node/src/network.rs index 82dee0bf18912..d6117bf6250d8 100644 --- a/aptos-node/src/network.rs +++ b/aptos-node/src/network.rs @@ -1,6 +1,7 @@ // Copyright © Aptos Foundation // SPDX-License-Identifier: Apache-2.0 +use crate::services::start_netbench_service; use aptos_channels::{self, aptos_channel, message_queues::QueueStyle}; use aptos_config::{ config::{NetworkConfig, NodeConfig}, @@ -21,6 +22,7 @@ use aptos_network::{ }, ProtocolId, }; +use aptos_network_benchmark::NetbenchMessage; use aptos_network_builder::builder::NetworkBuilder; use aptos_peer_monitoring_service_types::PeerMonitoringServiceMessage; use aptos_storage_service_types::StorageServiceMessage; @@ -124,6 +126,34 @@ pub fn storage_service_network_configuration(node_config: &NodeConfig) -> Networ NetworkApplicationConfig::new(network_client_config, network_service_config) } +pub fn netbench_network_configuration( + node_config: &NodeConfig, +) -> Option { + let cfg = match node_config.netbench { + None => return None, + Some(x) => x, + }; + if !cfg.enabled { + return None; + } + let direct_send_protocols = vec![ProtocolId::NetbenchDirectSend]; + let rpc_protocols = vec![ProtocolId::NetbenchRpc]; + let network_client_config = + NetworkClientConfig::new(direct_send_protocols.clone(), rpc_protocols.clone()); + let max_network_channel_size = cfg.max_network_channel_size as usize; + let network_service_config = NetworkServiceConfig::new( + direct_send_protocols, + rpc_protocols, + aptos_channel::Config::new(max_network_channel_size) + .queue_style(QueueStyle::FIFO) + .counters(&aptos_network_benchmark::PENDING_NETBENCH_NETWORK_EVENTS), + ); + Some(NetworkApplicationConfig::new( + network_client_config, + network_service_config, + )) +} + /// Extracts all network configs from the given node config fn extract_network_configs(node_config: &NodeConfig) -> Vec { let mut network_configs: Vec = node_config.full_node_networks.to_vec(); @@ -173,6 +203,7 @@ pub fn setup_networks_and_get_interfaces( let mut mempool_network_handles = vec![]; let mut peer_monitoring_service_network_handles = vec![]; let mut storage_service_network_handles = vec![]; + let mut netbench_handles = Vec::>::new(); for network_config in network_configs.into_iter() { // Create a network runtime for the config let runtime = create_network_runtime(&network_config); @@ -233,6 +264,17 @@ pub fn setup_networks_and_get_interfaces( ); storage_service_network_handles.push(storage_service_network_handle); + // Register benchmark test service + if let Some(app_config) = netbench_network_configuration(node_config) { + let netbench_handle = register_client_and_service_with_network( + &mut network_builder, + network_id, + &network_config, + app_config, + ); + netbench_handles.push(netbench_handle); + } + // Build and start the network on the runtime network_builder.build(runtime.handle().clone()); network_builder.start(); @@ -255,9 +297,22 @@ pub fn setup_networks_and_get_interfaces( mempool_network_handles, peer_monitoring_service_network_handles, storage_service_network_handles, - peers_and_metadata, + peers_and_metadata.clone(), ); + if !netbench_handles.is_empty() { + let netbench_interfaces = create_network_interfaces( + netbench_handles, + netbench_network_configuration(node_config).unwrap(), + peers_and_metadata, + ); + let netbench_service_threads = node_config.netbench.unwrap().netbench_service_threads; + let netbench_runtime = + aptos_runtimes::spawn_named_runtime("benchmark".into(), netbench_service_threads); + start_netbench_service(node_config, netbench_interfaces, netbench_runtime.handle()); + network_runtimes.push(netbench_runtime); + } + ( network_runtimes, consensus_interfaces, diff --git a/aptos-node/src/services.rs b/aptos-node/src/services.rs index 168f9738cadfa..14ac100635a58 100644 --- a/aptos-node/src/services.rs +++ b/aptos-node/src/services.rs @@ -12,6 +12,7 @@ use aptos_logger::{debug, telemetry_log_writer::TelemetryLog, LoggerFilterUpdate use aptos_mempool::{network::MempoolSyncMsg, MempoolClientRequest, QuorumStoreRequest}; use aptos_mempool_notifications::MempoolNotificationListener; use aptos_network::application::{interface::NetworkClientInterface, storage::PeersAndMetadata}; +use aptos_network_benchmark::{run_netbench_service, NetbenchMessage}; use aptos_peer_monitoring_service_server::{ network::PeerMonitoringServiceNetworkEvents, storage::StorageReader, PeerMonitoringServiceServer, @@ -22,7 +23,7 @@ use aptos_time_service::TimeService; use aptos_types::chain_id::ChainId; use futures::channel::{mpsc, mpsc::Sender}; use std::{sync::Arc, time::Instant}; -use tokio::runtime::Runtime; +use tokio::runtime::{Handle, Runtime}; const AC_SMP_CHANNEL_BUFFER_SIZE: usize = 1_024; const INTRA_NODE_CHANNEL_BUFFER_SIZE: usize = 1; @@ -184,6 +185,20 @@ pub fn start_peer_monitoring_service( peer_monitoring_service_runtime } +pub fn start_netbench_service( + node_config: &NodeConfig, + network_interfaces: ApplicationNetworkInterfaces, + runtime: &Handle, +) { + let network_client = network_interfaces.network_client; + runtime.spawn(run_netbench_service( + node_config.clone(), + network_client, + network_interfaces.network_service_events, + TimeService::real(), + )); +} + /// Starts the telemetry service and grabs the build information pub fn start_telemetry_service( node_config: &NodeConfig, diff --git a/aptos-node/src/storage.rs b/aptos-node/src/storage.rs index 84b44a7b3d82a..fe75648d83389 100644 --- a/aptos-node/src/storage.rs +++ b/aptos-node/src/storage.rs @@ -1,44 +1,82 @@ // Copyright © Aptos Foundation // SPDX-License-Identifier: Apache-2.0 -use anyhow::anyhow; +use anyhow::{anyhow, Result}; +use aptos_backup_service::start_backup_service; use aptos_config::{config::NodeConfig, utils::get_genesis_txn}; -use aptos_db::AptosDB; +use aptos_db::{fast_sync_storage_wrapper::FastSyncStorageWrapper, AptosDB}; use aptos_executor::db_bootstrapper::maybe_bootstrap; use aptos_logger::{debug, info}; use aptos_storage_interface::{DbReader, DbReaderWriter}; use aptos_types::waypoint::Waypoint; use aptos_vm::AptosVM; -use std::{fs, net::SocketAddr, path::Path, sync::Arc, time::Instant}; +use either::Either; +use std::{fs, path::Path, sync::Arc, time::Instant}; use tokio::runtime::Runtime; +pub(crate) fn maybe_apply_genesis(db_rw: &DbReaderWriter, node_config: &NodeConfig) -> Result<()> { + let genesis_waypoint = node_config.base.waypoint.genesis_waypoint(); + if let Some(genesis) = get_genesis_txn(node_config) { + maybe_bootstrap::(db_rw, genesis, genesis_waypoint) + .map_err(|err| anyhow!("DB failed to bootstrap {}", err))?; + } else { + info ! ("Genesis txn not provided! This is fine only if you don't expect to apply it. Otherwise, the config is incorrect!"); + } + Ok(()) +} + #[cfg(not(feature = "consensus-only-perf-test"))] pub(crate) fn bootstrap_db( - aptos_db: AptosDB, - backup_service_address: SocketAddr, -) -> (Arc, DbReaderWriter, Option) { - use aptos_backup_service::start_backup_service; - - let (aptos_db, db_rw) = DbReaderWriter::wrap(aptos_db); - let db_backup_service = start_backup_service(backup_service_address, aptos_db.clone()); - (aptos_db, db_rw, Some(db_backup_service)) + node_config: &NodeConfig, +) -> Result<(Arc, DbReaderWriter, Option)> { + let (aptos_db_reader, db_rw, backup_service) = + match FastSyncStorageWrapper::initialize_dbs(node_config)? { + Either::Left(db) => { + let (db_arc, db_rw) = DbReaderWriter::wrap(db); + let db_backup_service = start_backup_service( + node_config.storage.backup_service_address, + db_arc.clone(), + ); + maybe_apply_genesis(&db_rw, node_config)?; + (db_arc as Arc, db_rw, Some(db_backup_service)) + }, + Either::Right(fast_sync_db_wrapper) => { + let temp_db = fast_sync_db_wrapper.get_temporary_db_with_genesis(); + maybe_apply_genesis(&DbReaderWriter::from_arc(temp_db), node_config)?; + + let (db_arc, db_rw) = DbReaderWriter::wrap(fast_sync_db_wrapper); + let fast_sync_db = db_arc.get_fast_sync_db(); + let db_backup_service = + start_backup_service(node_config.storage.backup_service_address, fast_sync_db); + + (db_arc as Arc, db_rw, Some(db_backup_service)) + }, + }; + + Ok((aptos_db_reader, db_rw, backup_service)) } /// In consensus-only mode, return a in-memory based [FakeAptosDB] and /// do not run the backup service. #[cfg(feature = "consensus-only-perf-test")] pub(crate) fn bootstrap_db( - aptos_db: AptosDB, - _backup_service_address: SocketAddr, -) -> ( - Arc, - DbReaderWriter, - Option, -) { + node_config: &NodeConfig, +) -> Result<(Arc, DbReaderWriter, Option)> { use aptos_db::fake_aptosdb::FakeAptosDB; + let aptos_db = AptosDB::open( + &node_config.storage.dir(), + false, /* readonly */ + node_config.storage.storage_pruner_config, + node_config.storage.rocksdb_configs, + node_config.storage.enable_indexer, + node_config.storage.buffered_state_target_items, + node_config.storage.max_num_nodes_per_lru_cache_shard, + ) + .map_err(|err| anyhow!("DB failed to open {}", err))?; let (aptos_db, db_rw) = DbReaderWriter::wrap(FakeAptosDB::new(aptos_db)); - (aptos_db, db_rw, None) + maybe_apply_genesis(&db_rw, node_config)?; + Ok((aptos_db, db_rw, None)) } /// Creates a RocksDb checkpoint for the consensus_db, state_sync_db, @@ -87,7 +125,7 @@ fn create_rocksdb_checkpoint_and_change_working_dir( /// the various handles. pub fn initialize_database_and_checkpoints( node_config: &mut NodeConfig, -) -> anyhow::Result<(Arc, DbReaderWriter, Option, Waypoint)> { +) -> Result<(Arc, DbReaderWriter, Option, Waypoint)> { // If required, create RocksDB checkpoints and change the working directory. // This is test-only. if let Some(working_dir) = node_config.base.working_dir.clone() { @@ -96,28 +134,7 @@ pub fn initialize_database_and_checkpoints( // Open the database let instant = Instant::now(); - let aptos_db = AptosDB::open( - &node_config.storage.dir(), - false, /* readonly */ - node_config.storage.storage_pruner_config, - node_config.storage.rocksdb_configs, - node_config.storage.enable_indexer, - node_config.storage.buffered_state_target_items, - node_config.storage.max_num_nodes_per_lru_cache_shard, - ) - .map_err(|err| anyhow!("DB failed to open {}", err))?; - let (aptos_db, db_rw, backup_service) = - bootstrap_db(aptos_db, node_config.storage.backup_service_address); - - // TODO: handle non-genesis waypoints for state sync! - // If there's a genesis txn and waypoint, commit it if the result matches. - let genesis_waypoint = node_config.base.waypoint.genesis_waypoint(); - if let Some(genesis) = get_genesis_txn(node_config) { - maybe_bootstrap::(&db_rw, genesis, genesis_waypoint) - .map_err(|err| anyhow!("DB failed to bootstrap {}", err))?; - } else { - info!("Genesis txn not provided! This is fine only if you don't expect to apply it. Otherwise, the config is incorrect!"); - } + let (aptos_db, db_rw, backup_service) = bootstrap_db(node_config)?; // Log the duration to open storage debug!( @@ -125,5 +142,10 @@ pub fn initialize_database_and_checkpoints( instant.elapsed().as_millis() ); - Ok((aptos_db, db_rw, backup_service, genesis_waypoint)) + Ok(( + aptos_db, + db_rw, + backup_service, + node_config.base.waypoint.genesis_waypoint(), + )) } diff --git a/aptos-node/src/tests.rs b/aptos-node/src/tests.rs index dd170ddd5578a..e8102374b1a07 100644 --- a/aptos-node/src/tests.rs +++ b/aptos-node/src/tests.rs @@ -8,6 +8,7 @@ use aptos_infallible::RwLock; use aptos_storage_interface::{DbReader, DbReaderWriter, DbWriter}; use aptos_temppath::TempPath; use aptos_types::{chain_id::ChainId, waypoint::Waypoint}; +use rand::SeedableRng; use std::{fs, sync::Arc}; /// A mock database implementing DbReader and DbWriter @@ -48,10 +49,11 @@ fn test_aptos_vm_does_not_have_test_natives() { aptos_vm::natives::assert_no_test_natives(crate::utils::ERROR_MSG_BAD_FEATURE_FLAGS) } +// This test confirms that the overriding behavior works as intended. #[test] fn test_create_single_node_test_config() { - // create a test config override and merge it with the default config - // this will get cleaned up by the tempdir when it goes out of scope + // Create a test config override and merge it with the default config. + // This will get cleaned up by the tempdir when it goes out of scope. let test_dir = aptos_temppath::TempPath::new().as_ref().to_path_buf(); fs::DirBuilder::new() .recursive(true) @@ -82,8 +84,16 @@ fn test_create_single_node_test_config() { // merge it let default_node_config = NodeConfig::get_default_validator_config(); - let merged_config = - create_single_node_test_config(None, Some(config_override_path), false).unwrap(); + let merged_config = create_single_node_test_config( + &None, + &Some(config_override_path), + &test_dir, + false, + false, + aptos_cached_packages::head_release_bundle(), + rand::rngs::StdRng::from_entropy(), + ) + .unwrap(); // overriden configs assert!(merged_config.storage.enable_indexer); diff --git a/config/src/config/api_config.rs b/config/src/config/api_config.rs index 621aca8adc25d..04ad04ca16553 100644 --- a/config/src/config/api_config.rs +++ b/config/src/config/api_config.rs @@ -135,7 +135,7 @@ impl ApiConfig { impl ConfigSanitizer for ApiConfig { fn sanitize( - node_config: &mut NodeConfig, + node_config: &NodeConfig, node_type: NodeType, chain_id: ChainId, ) -> Result<(), Error> { @@ -176,7 +176,7 @@ mod tests { #[test] fn test_sanitize_disabled_api() { // Create a node config with the API disabled - let mut node_config = NodeConfig { + let node_config = NodeConfig { api: ApiConfig { enabled: false, failpoints_enabled: true, @@ -186,13 +186,13 @@ mod tests { }; // Sanitize the config and verify that it succeeds - ApiConfig::sanitize(&mut node_config, NodeType::Validator, ChainId::mainnet()).unwrap(); + ApiConfig::sanitize(&node_config, NodeType::Validator, ChainId::mainnet()).unwrap(); } #[test] fn test_sanitize_failpoints_on_mainnet() { // Create a node config with failpoints enabled - let mut node_config = NodeConfig { + let node_config = NodeConfig { api: ApiConfig { enabled: true, failpoints_enabled: true, @@ -203,15 +203,15 @@ mod tests { // Sanitize the config and verify that it fails because // failpoints are not supported on mainnet. - let error = ApiConfig::sanitize(&mut node_config, NodeType::Validator, ChainId::mainnet()) - .unwrap_err(); + let error = + ApiConfig::sanitize(&node_config, NodeType::Validator, ChainId::mainnet()).unwrap_err(); assert!(matches!(error, Error::ConfigSanitizerFailed(_, _))); } #[test] fn test_sanitize_invalid_workers() { // Create a node config with failpoints enabled - let mut node_config = NodeConfig { + let node_config = NodeConfig { api: ApiConfig { enabled: true, max_runtime_workers: None, @@ -223,8 +223,8 @@ mod tests { // Sanitize the config and verify that it fails because // the runtime worker multiplier is invalid. - let error = ApiConfig::sanitize(&mut node_config, NodeType::Validator, ChainId::mainnet()) - .unwrap_err(); + let error = + ApiConfig::sanitize(&node_config, NodeType::Validator, ChainId::mainnet()).unwrap_err(); assert!(matches!(error, Error::ConfigSanitizerFailed(_, _))); } } diff --git a/config/src/config/base_config.rs b/config/src/config/base_config.rs index b06a5a5e8e268..597253d0d298e 100644 --- a/config/src/config/base_config.rs +++ b/config/src/config/base_config.rs @@ -34,7 +34,7 @@ impl Default for BaseConfig { impl ConfigSanitizer for BaseConfig { fn sanitize( - node_config: &mut NodeConfig, + node_config: &NodeConfig, _node_type: NodeType, _chain_id: ChainId, ) -> Result<(), Error> { @@ -179,7 +179,7 @@ mod test { #[test] fn test_sanitize_valid_base_config() { // Create a node config with a waypoint - let mut node_config = NodeConfig { + let node_config = NodeConfig { base: BaseConfig { waypoint: WaypointConfig::FromConfig(Waypoint::default()), ..Default::default() @@ -188,13 +188,13 @@ mod test { }; // Sanitize the config and verify that it passes - BaseConfig::sanitize(&mut node_config, NodeType::Validator, ChainId::mainnet()).unwrap(); + BaseConfig::sanitize(&node_config, NodeType::Validator, ChainId::mainnet()).unwrap(); } #[test] fn test_sanitize_missing_waypoint() { // Create a node config with a missing waypoint - let mut node_config = NodeConfig { + let node_config = NodeConfig { base: BaseConfig { waypoint: WaypointConfig::None, ..Default::default() @@ -203,7 +203,7 @@ mod test { }; // Sanitize the config and verify that it fails because of the missing waypoint - let error = BaseConfig::sanitize(&mut node_config, NodeType::Validator, ChainId::mainnet()) + let error = BaseConfig::sanitize(&node_config, NodeType::Validator, ChainId::mainnet()) .unwrap_err(); assert!(matches!(error, Error::ConfigSanitizerFailed(_, _))); } diff --git a/config/src/config/config_optimizer.rs b/config/src/config/config_optimizer.rs index a857c2b56dc05..4b4931f357dba 100644 --- a/config/src/config/config_optimizer.rs +++ b/config/src/config/config_optimizer.rs @@ -3,9 +3,9 @@ use crate::{ config::{ - node_config_loader::NodeType, utils::get_config_name, Error, InspectionServiceConfig, - LoggerConfig, MempoolConfig, NodeConfig, Peer, PeerMonitoringServiceConfig, PeerRole, - PeerSet, StateSyncConfig, + node_config_loader::NodeType, utils::get_config_name, Error, IndexerConfig, + InspectionServiceConfig, LoggerConfig, MempoolConfig, NodeConfig, Peer, + PeerMonitoringServiceConfig, PeerRole, PeerSet, StateSyncConfig, }, network_id::NetworkId, }; @@ -17,6 +17,7 @@ use std::{collections::HashMap, str::FromStr}; // Useful optimizer constants const OPTIMIZER_STRING: &str = "Optimizer"; +const ALL_NETWORKS_OPTIMIZER_NAME: &str = "AllNetworkConfigOptimizer"; const PUBLIC_NETWORK_OPTIMIZER_NAME: &str = "PublicNetworkConfigOptimizer"; const VALIDATOR_NETWORK_OPTIMIZER_NAME: &str = "ValidatorNetworkConfigOptimizer"; @@ -96,6 +97,9 @@ impl ConfigOptimizer for NodeConfig { ) -> Result { // Optimize only the relevant sub-configs let mut optimizers_with_modifications = vec![]; + if IndexerConfig::optimize(node_config, local_config_yaml, node_type, chain_id)? { + optimizers_with_modifications.push(IndexerConfig::get_optimizer_name()); + } if InspectionServiceConfig::optimize(node_config, local_config_yaml, node_type, chain_id)? { optimizers_with_modifications.push(InspectionServiceConfig::get_optimizer_name()); } @@ -116,6 +120,9 @@ impl ConfigOptimizer for NodeConfig { if StateSyncConfig::optimize(node_config, local_config_yaml, node_type, chain_id)? { optimizers_with_modifications.push(StateSyncConfig::get_optimizer_name()); } + if optimize_all_network_configs(node_config, local_config_yaml, node_type, chain_id)? { + optimizers_with_modifications.push(ALL_NETWORKS_OPTIMIZER_NAME.to_string()); + } if optimize_public_network_config(node_config, local_config_yaml, node_type, chain_id)? { optimizers_with_modifications.push(PUBLIC_NETWORK_OPTIMIZER_NAME.to_string()); } @@ -128,6 +135,30 @@ impl ConfigOptimizer for NodeConfig { } } +/// Optimizes all network configs according to the node type and chain ID +fn optimize_all_network_configs( + node_config: &mut NodeConfig, + _local_config_yaml: &Value, + _node_type: NodeType, + _chain_id: ChainId, +) -> Result { + let mut modified_config = false; + + // Set the listener address and prepare the node identities for the validator network + if let Some(validator_network) = &mut node_config.validator_network { + validator_network.set_listen_address_and_prepare_identity()?; + modified_config = true; + } + + // Set the listener address and prepare the node identities for the fullnode networks + for fullnode_network in &mut node_config.full_node_networks { + fullnode_network.set_listen_address_and_prepare_identity()?; + modified_config = true; + } + + Ok(modified_config) +} + /// Optimize the public network config according to the node type and chain ID fn optimize_public_network_config( node_config: &mut NodeConfig, diff --git a/config/src/config/config_sanitizer.rs b/config/src/config/config_sanitizer.rs index c5874c90184b8..da6ca71e20ec6 100644 --- a/config/src/config/config_sanitizer.rs +++ b/config/src/config/config_sanitizer.rs @@ -4,9 +4,9 @@ use crate::config::{ node_config_loader::NodeType, utils::{are_failpoints_enabled, get_config_name}, - ApiConfig, BaseConfig, ConsensusConfig, Error, ExecutionConfig, IndexerConfig, - IndexerGrpcConfig, InspectionServiceConfig, LoggerConfig, MempoolConfig, NodeConfig, - PeerMonitoringServiceConfig, StateSyncConfig, StorageConfig, + ApiConfig, BaseConfig, ConsensusConfig, Error, ExecutionConfig, IndexerGrpcConfig, + InspectionServiceConfig, LoggerConfig, MempoolConfig, NodeConfig, PeerMonitoringServiceConfig, + StateSyncConfig, StorageConfig, }; use aptos_types::chain_id::ChainId; use std::collections::HashSet; @@ -27,7 +27,7 @@ pub trait ConfigSanitizer { /// Validate and process the config according to the given node type and chain ID fn sanitize( - _node_config: &mut NodeConfig, + _node_config: &NodeConfig, _node_type: NodeType, _chain_id: ChainId, ) -> Result<(), Error> { @@ -37,7 +37,7 @@ pub trait ConfigSanitizer { impl ConfigSanitizer for NodeConfig { fn sanitize( - node_config: &mut NodeConfig, + node_config: &NodeConfig, node_type: NodeType, chain_id: ChainId, ) -> Result<(), Error> { @@ -48,7 +48,6 @@ impl ConfigSanitizer for NodeConfig { ExecutionConfig::sanitize(node_config, node_type, chain_id)?; sanitize_failpoints_config(node_config, node_type, chain_id)?; sanitize_fullnode_network_configs(node_config, node_type, chain_id)?; - IndexerConfig::sanitize(node_config, node_type, chain_id)?; IndexerGrpcConfig::sanitize(node_config, node_type, chain_id)?; InspectionServiceConfig::sanitize(node_config, node_type, chain_id)?; LoggerConfig::sanitize(node_config, node_type, chain_id)?; @@ -64,7 +63,7 @@ impl ConfigSanitizer for NodeConfig { /// Sanitize the failpoints config according to the node role and chain ID fn sanitize_failpoints_config( - node_config: &mut NodeConfig, + node_config: &NodeConfig, _node_type: NodeType, chain_id: ChainId, ) -> Result<(), Error> { @@ -100,12 +99,12 @@ fn sanitize_failpoints_config( /// Sanitize the fullnode network configs according to the node role and chain ID fn sanitize_fullnode_network_configs( - node_config: &mut NodeConfig, + node_config: &NodeConfig, node_type: NodeType, _chain_id: ChainId, ) -> Result<(), Error> { let sanitizer_name = FULLNODE_NETWORKS_SANITIZER_NAME.to_string(); - let fullnode_networks = &mut node_config.full_node_networks; + let fullnode_networks = &node_config.full_node_networks; // Verify that the fullnode network configs are not empty for fullnodes if fullnode_networks.is_empty() && !node_type.is_validator() { @@ -138,9 +137,6 @@ fn sanitize_fullnode_network_configs( ), )); } - - // Prepare the network id - fullnode_network_config.set_listen_address_and_prepare_identity()?; } Ok(()) @@ -148,12 +144,12 @@ fn sanitize_fullnode_network_configs( /// Sanitize the validator network config according to the node role and chain ID fn sanitize_validator_network_config( - node_config: &mut NodeConfig, + node_config: &NodeConfig, node_type: NodeType, _chain_id: ChainId, ) -> Result<(), Error> { let sanitizer_name = VALIDATOR_NETWORK_SANITIZER_NAME.to_string(); - let validator_network = &mut node_config.validator_network; + let validator_network = &node_config.validator_network; // Verify that the validator network config is not empty for validators if validator_network.is_none() && node_type.is_validator() { @@ -188,9 +184,6 @@ fn sanitize_validator_network_config( "Mutual authentication must be enabled for the validator network!".into(), )); } - - // Prepare the network id - validator_network_config.set_listen_address_and_prepare_identity()?; } Ok(()) @@ -204,14 +197,14 @@ mod tests { #[test] fn test_sanitize_missing_pfn_network_configs() { // Create a PFN config with empty fullnode network configs - let mut node_config = NodeConfig { + let node_config = NodeConfig { full_node_networks: vec![], ..Default::default() }; // Sanitize the config and verify that it fails let error = sanitize_fullnode_network_configs( - &mut node_config, + &node_config, NodeType::PublicFullnode, ChainId::mainnet(), ) @@ -222,14 +215,14 @@ mod tests { #[test] fn test_sanitize_missing_vfn_network_configs() { // Create a VFN config with empty fullnode network configs - let mut node_config = NodeConfig { + let node_config = NodeConfig { full_node_networks: vec![], ..Default::default() }; // Sanitize the PFN config and verify that it fails let error = sanitize_fullnode_network_configs( - &mut node_config, + &node_config, NodeType::ValidatorFullnode, ChainId::testnet(), ) @@ -240,7 +233,7 @@ mod tests { #[test] fn test_sanitize_validator_network_for_fullnode() { // Create a fullnode config that includes a validator network - let mut node_config = NodeConfig { + let node_config = NodeConfig { full_node_networks: vec![NetworkConfig { network_id: NetworkId::Validator, ..Default::default() @@ -250,7 +243,7 @@ mod tests { // Sanitize the config and verify that it fails let error = sanitize_fullnode_network_configs( - &mut node_config, + &node_config, NodeType::PublicFullnode, ChainId::testnet(), ) @@ -261,7 +254,7 @@ mod tests { #[test] fn test_sanitize_duplicate_fullnode_network_configs() { // Create a node config with multiple fullnode network configs with the same network id - let mut node_config = NodeConfig { + let node_config = NodeConfig { full_node_networks: vec![ NetworkConfig { network_id: NetworkId::Public, @@ -277,7 +270,7 @@ mod tests { // Sanitize the config and verify that it fails let error = sanitize_fullnode_network_configs( - &mut node_config, + &node_config, NodeType::ValidatorFullnode, ChainId::testnet(), ) @@ -288,14 +281,14 @@ mod tests { #[test] fn test_sanitize_missing_validator_network_config() { // Create a node config with an empty validator network config - let mut node_config = NodeConfig { + let node_config = NodeConfig { validator_network: None, ..Default::default() }; // Sanitize the config and verify that it fails let error = sanitize_validator_network_config( - &mut node_config, + &node_config, NodeType::Validator, ChainId::testnet(), ) @@ -306,7 +299,7 @@ mod tests { #[test] fn test_sanitize_validator_network_fullnode() { // Create a validator network config - let mut node_config = NodeConfig { + let node_config = NodeConfig { validator_network: Some(NetworkConfig { network_id: NetworkId::Validator, mutual_authentication: true, @@ -317,7 +310,7 @@ mod tests { // Sanitize the config (for a fullnode) and verify that it fails let error = sanitize_validator_network_config( - &mut node_config, + &node_config, NodeType::PublicFullnode, ChainId::testnet(), ) @@ -328,7 +321,7 @@ mod tests { #[test] fn test_sanitize_validator_disabled_authentication() { // Create a validator config with disabled mutual authentication - let mut node_config = NodeConfig { + let node_config = NodeConfig { validator_network: Some(NetworkConfig { network_id: NetworkId::Validator, mutual_authentication: false, @@ -339,7 +332,7 @@ mod tests { // Sanitize the config and verify that it fails let error = sanitize_validator_network_config( - &mut node_config, + &node_config, NodeType::Validator, ChainId::testnet(), ) @@ -350,7 +343,7 @@ mod tests { #[test] fn test_sanitize_validator_incorrect_network_id() { // Create a validator config with the wrong network ID - let mut node_config = NodeConfig { + let node_config = NodeConfig { validator_network: Some(NetworkConfig { network_id: NetworkId::Public, ..Default::default() @@ -360,7 +353,7 @@ mod tests { // Sanitize the config and verify that it fails let error = sanitize_validator_network_config( - &mut node_config, + &node_config, NodeType::Validator, ChainId::testnet(), ) diff --git a/config/src/config/consensus_config.rs b/config/src/config/consensus_config.rs index d240ab9f8d5bc..9c4f699c83d59 100644 --- a/config/src/config/consensus_config.rs +++ b/config/src/config/consensus_config.rs @@ -351,7 +351,7 @@ impl ConsensusConfig { impl ConfigSanitizer for ConsensusConfig { fn sanitize( - node_config: &mut NodeConfig, + node_config: &NodeConfig, node_type: NodeType, chain_id: ChainId, ) -> Result<(), Error> { diff --git a/config/src/config/execution_config.rs b/config/src/config/execution_config.rs index 3dbc4b5f8f9ba..cafe02d956024 100644 --- a/config/src/config/execution_config.rs +++ b/config/src/config/execution_config.rs @@ -127,7 +127,7 @@ impl ExecutionConfig { impl ConfigSanitizer for ExecutionConfig { fn sanitize( - node_config: &mut NodeConfig, + node_config: &NodeConfig, _node_type: NodeType, chain_id: ChainId, ) -> Result<(), Error> { @@ -166,7 +166,7 @@ mod test { #[test] fn test_sanitize_valid_execution_config() { // Create a node config with a valid execution config - let mut node_config = NodeConfig { + let node_config = NodeConfig { execution: ExecutionConfig { paranoid_hot_potato_verification: true, paranoid_type_verification: true, @@ -176,14 +176,13 @@ mod test { }; // Sanitize the config and verify that it succeeds - ExecutionConfig::sanitize(&mut node_config, NodeType::Validator, ChainId::mainnet()) - .unwrap(); + ExecutionConfig::sanitize(&node_config, NodeType::Validator, ChainId::mainnet()).unwrap(); } #[test] fn test_sanitize_hot_potato_mainnet() { // Create a node config with missing paranoid_hot_potato_verification on mainnet - let mut node_config = NodeConfig { + let node_config = NodeConfig { execution: ExecutionConfig { paranoid_hot_potato_verification: false, paranoid_type_verification: true, @@ -194,7 +193,7 @@ mod test { // Sanitize the config and verify that it fails let error = - ExecutionConfig::sanitize(&mut node_config, NodeType::Validator, ChainId::mainnet()) + ExecutionConfig::sanitize(&node_config, NodeType::Validator, ChainId::mainnet()) .unwrap_err(); assert!(matches!(error, Error::ConfigSanitizerFailed(_, _))); } @@ -202,7 +201,7 @@ mod test { #[test] fn test_sanitize_paranoid_type_mainnet() { // Create a node config with missing paranoid_type_verification on mainnet - let mut node_config = NodeConfig { + let node_config = NodeConfig { execution: ExecutionConfig { paranoid_hot_potato_verification: true, paranoid_type_verification: false, @@ -213,7 +212,7 @@ mod test { // Sanitize the config and verify that it fails let error = - ExecutionConfig::sanitize(&mut node_config, NodeType::Validator, ChainId::mainnet()) + ExecutionConfig::sanitize(&node_config, NodeType::Validator, ChainId::mainnet()) .unwrap_err(); assert!(matches!(error, Error::ConfigSanitizerFailed(_, _))); } diff --git a/config/src/config/gas_estimation_config.rs b/config/src/config/gas_estimation_config.rs index 2da33ed0be001..877fbd33374b4 100644 --- a/config/src/config/gas_estimation_config.rs +++ b/config/src/config/gas_estimation_config.rs @@ -50,7 +50,7 @@ impl Default for GasEstimationConfig { impl ConfigSanitizer for GasEstimationConfig { fn sanitize( - node_config: &mut NodeConfig, + node_config: &NodeConfig, _node_type: NodeType, _chain_id: ChainId, ) -> Result<(), Error> { diff --git a/config/src/config/indexer_config.rs b/config/src/config/indexer_config.rs index f2c5c192b9601..3ee4f0da949b0 100644 --- a/config/src/config/indexer_config.rs +++ b/config/src/config/indexer_config.rs @@ -2,11 +2,12 @@ // SPDX-License-Identifier: Apache-2.0 use crate::config::{ - config_sanitizer::ConfigSanitizer, node_config_loader::NodeType, Error, NodeConfig, + config_optimizer::ConfigOptimizer, node_config_loader::NodeType, Error, NodeConfig, }; use aptos_logger::warn; use aptos_types::chain_id::ChainId; use serde::{Deserialize, Serialize}; +use serde_yaml::Value; use std::fmt::{Debug, Formatter}; // Useful indexer environment variables @@ -115,20 +116,25 @@ impl Debug for IndexerConfig { } } -impl ConfigSanitizer for IndexerConfig { - fn sanitize( +impl ConfigOptimizer for IndexerConfig { + fn optimize( node_config: &mut NodeConfig, + _local_config_yaml: &Value, _node_type: NodeType, _chain_id: ChainId, - ) -> Result<(), Error> { + ) -> Result { + // If the indexer is not enabled, there's nothing to do let indexer_config = &mut node_config.indexer; - - // If the indexer is not enabled, there's nothing to validate if !indexer_config.enabled { - return Ok(()); + return Ok(false); } - // Verify the postgres uri + // TODO: we really shouldn't be overriding the configs if they are + // specified in the local node config file. This optimizer should + // migrate to the pattern used by other optimizers, but for now, we'll + // just keep the legacy behaviour to avoid breaking anything. + + // Verify and set the postgres uri indexer_config.postgres_uri = env_var_or_default( INDEXER_DATABASE_URL, indexer_config.postgres_uri.clone(), @@ -138,7 +144,7 @@ impl ConfigSanitizer for IndexerConfig { )), ); - // Verify the processor + // Verify and set the processor indexer_config.processor = env_var_or_default( PROCESSOR_NAME, indexer_config @@ -148,7 +154,7 @@ impl ConfigSanitizer for IndexerConfig { None, ); - // Verify the starting version + // Verify and set the starting version indexer_config.starting_version = match std::env::var(STARTING_VERSION).ok() { None => indexer_config.starting_version, Some(starting_version) => match starting_version.parse::() { @@ -189,7 +195,7 @@ impl ConfigSanitizer for IndexerConfig { None, ); - Ok(()) + Ok(true) } } diff --git a/config/src/config/indexer_grpc_config.rs b/config/src/config/indexer_grpc_config.rs index 79645d31c65c8..4cfa2147afdec 100644 --- a/config/src/config/indexer_grpc_config.rs +++ b/config/src/config/indexer_grpc_config.rs @@ -6,63 +6,75 @@ use crate::config::{ }; use aptos_types::chain_id::ChainId; use serde::{Deserialize, Serialize}; +use std::net::{Ipv4Addr, SocketAddr, SocketAddrV4}; // Useful indexer defaults -const DEFAULT_ADDRESS: &str = "0.0.0.0:50051"; -const DEFAULT_OUTPUT_BATCH_SIZE: u16 = 100; -const DEFAULT_PROCESSOR_BATCH_SIZE: u16 = 1000; const DEFAULT_PROCESSOR_TASK_COUNT: u16 = 20; +const DEFAULT_PROCESSOR_BATCH_SIZE: u16 = 1000; +const DEFAULT_OUTPUT_BATCH_SIZE: u16 = 100; +pub const DEFAULT_GRPC_STREAM_PORT: u16 = 50051; -#[derive(Clone, Debug, Default, Deserialize, PartialEq, Eq, Serialize)] +#[derive(Clone, Debug, Deserialize, PartialEq, Eq, Serialize)] #[serde(default, deny_unknown_fields)] pub struct IndexerGrpcConfig { pub enabled: bool, - /// The address that the grpc server will listen on - #[serde(default, skip_serializing_if = "Option::is_none")] - pub address: Option, + /// If true, the GRPC stream interface exposed by the data service will be used + /// instead of the standard fullnode GRPC stream interface. In other words, with + /// this enabled, you can use an indexer fullnode like it is an instance of the + /// indexer-grpc data service (aka the Transaction Stream Service API). + pub use_data_service_interface: bool, + + /// The address that the grpc server will listen on. + pub address: SocketAddr, /// Number of processor tasks to fan out - #[serde(default, skip_serializing_if = "Option::is_none")] - pub processor_task_count: Option, + pub processor_task_count: u16, /// Number of transactions each processor will process - #[serde(default, skip_serializing_if = "Option::is_none")] - pub processor_batch_size: Option, + pub processor_batch_size: u16, /// Number of transactions returned in a single stream response - #[serde(default, skip_serializing_if = "Option::is_none")] - pub output_batch_size: Option, + pub output_batch_size: u16, +} + +// Reminder, #[serde(default)] on IndexerGrpcConfig means that the default values for +// fields will come from this Default impl, unless the field has a specific +// #[serde(default)] on it (which none of the above do). +impl Default for IndexerGrpcConfig { + fn default() -> Self { + Self { + enabled: false, + use_data_service_interface: false, + address: SocketAddr::V4(SocketAddrV4::new( + Ipv4Addr::new(0, 0, 0, 0), + DEFAULT_GRPC_STREAM_PORT, + )), + processor_task_count: DEFAULT_PROCESSOR_TASK_COUNT, + processor_batch_size: DEFAULT_PROCESSOR_BATCH_SIZE, + output_batch_size: DEFAULT_OUTPUT_BATCH_SIZE, + } + } } impl ConfigSanitizer for IndexerGrpcConfig { fn sanitize( - node_config: &mut NodeConfig, + node_config: &NodeConfig, _node_type: NodeType, _chain_id: ChainId, ) -> Result<(), Error> { - let indexer_grpc_config = &mut node_config.indexer_grpc; + let sanitizer_name = Self::get_sanitizer_name(); - // If the indexer is not enabled, we don't need to do anything - if !indexer_grpc_config.enabled { + if !node_config.indexer_grpc.enabled { return Ok(()); } - // Set appropriate defaults - indexer_grpc_config.address = indexer_grpc_config - .address - .clone() - .or_else(|| Some(DEFAULT_ADDRESS.into())); - indexer_grpc_config.processor_task_count = indexer_grpc_config - .processor_task_count - .or(Some(DEFAULT_PROCESSOR_TASK_COUNT)); - indexer_grpc_config.processor_batch_size = indexer_grpc_config - .processor_batch_size - .or(Some(DEFAULT_PROCESSOR_BATCH_SIZE)); - indexer_grpc_config.output_batch_size = indexer_grpc_config - .output_batch_size - .or(Some(DEFAULT_OUTPUT_BATCH_SIZE)); - + if !node_config.storage.enable_indexer { + return Err(Error::ConfigSanitizerFailed( + sanitizer_name, + "storage.enable_indexer must be true if indexer_grpc.enabled is true".to_string(), + )); + } Ok(()) } } diff --git a/config/src/config/inspection_service_config.rs b/config/src/config/inspection_service_config.rs index 8afcd16ee7b12..878e5881f8cab 100644 --- a/config/src/config/inspection_service_config.rs +++ b/config/src/config/inspection_service_config.rs @@ -42,7 +42,7 @@ impl InspectionServiceConfig { impl ConfigSanitizer for InspectionServiceConfig { fn sanitize( - node_config: &mut NodeConfig, + node_config: &NodeConfig, node_type: NodeType, chain_id: ChainId, ) -> Result<(), Error> { @@ -189,7 +189,7 @@ mod tests { #[test] fn test_sanitize_valid_service_config() { // Create an inspection service config with the configuration endpoint enabled - let mut node_config = NodeConfig { + let node_config = NodeConfig { inspection_service: InspectionServiceConfig { expose_configuration: true, ..Default::default() @@ -199,7 +199,7 @@ mod tests { // Verify that the configuration is sanitized successfully InspectionServiceConfig::sanitize( - &mut node_config, + &node_config, NodeType::PublicFullnode, ChainId::mainnet(), ) @@ -209,7 +209,7 @@ mod tests { #[test] fn test_sanitize_config_mainnet() { // Create an inspection service config with the configuration endpoint enabled - let mut node_config = NodeConfig { + let node_config = NodeConfig { inspection_service: InspectionServiceConfig { expose_configuration: true, ..Default::default() @@ -219,7 +219,7 @@ mod tests { // Verify that sanitization fails for mainnet let error = InspectionServiceConfig::sanitize( - &mut node_config, + &node_config, NodeType::Validator, ChainId::mainnet(), ) diff --git a/config/src/config/logger_config.rs b/config/src/config/logger_config.rs index 12efae532d4ce..c1208f774e128 100644 --- a/config/src/config/logger_config.rs +++ b/config/src/config/logger_config.rs @@ -69,7 +69,7 @@ impl LoggerConfig { impl ConfigSanitizer for LoggerConfig { fn sanitize( - node_config: &mut NodeConfig, + node_config: &NodeConfig, _node_type: NodeType, _chain_id: ChainId, ) -> Result<(), Error> { @@ -188,7 +188,7 @@ mod tests { #[test] fn test_sanitize_missing_feature() { // Create a logger config with the tokio console port set - let mut node_config = NodeConfig { + let node_config = NodeConfig { logger: LoggerConfig { tokio_console_port: Some(100), ..Default::default() @@ -197,9 +197,8 @@ mod tests { }; // Verify that the config fails sanitization (the tokio-console feature is missing!) - let error = - LoggerConfig::sanitize(&mut node_config, NodeType::Validator, ChainId::testnet()) - .unwrap_err(); + let error = LoggerConfig::sanitize(&node_config, NodeType::Validator, ChainId::testnet()) + .unwrap_err(); assert!(matches!(error, Error::ConfigSanitizerFailed(_, _))); } } diff --git a/config/src/config/mempool_config.rs b/config/src/config/mempool_config.rs index 0093dbbe58f8b..3e6483f4d6eec 100644 --- a/config/src/config/mempool_config.rs +++ b/config/src/config/mempool_config.rs @@ -83,7 +83,7 @@ impl Default for MempoolConfig { impl ConfigSanitizer for MempoolConfig { fn sanitize( - _node_config: &mut NodeConfig, + _node_config: &NodeConfig, _node_type: NodeType, _chain_id: ChainId, ) -> Result<(), Error> { diff --git a/config/src/config/mod.rs b/config/src/config/mod.rs index b24b0321c2434..686c43b97bcbf 100644 --- a/config/src/config/mod.rs +++ b/config/src/config/mod.rs @@ -17,9 +17,11 @@ mod indexer_grpc_config; mod inspection_service_config; mod logger_config; mod mempool_config; +mod netbench; mod network_config; mod node_config; mod node_config_loader; +mod override_node_config; mod peer_monitoring_config; mod persistable_config; mod quorum_store_config; @@ -42,9 +44,11 @@ pub use indexer_grpc_config::*; pub use inspection_service_config::*; pub use logger_config::*; pub use mempool_config::*; +pub use netbench::*; pub use network_config::*; pub use node_config::*; pub use node_config_loader::sanitize_node_config; +pub use override_node_config::*; pub use peer_monitoring_config::*; pub use persistable_config::*; pub use quorum_store_config::*; diff --git a/config/src/config/netbench.rs b/config/src/config/netbench.rs new file mode 100644 index 0000000000000..41708fcdb6d0c --- /dev/null +++ b/config/src/config/netbench.rs @@ -0,0 +1,40 @@ +// Copyright © Aptos Foundation +// SPDX-License-Identifier: Apache-2.0 + +use serde::{Deserialize, Serialize}; + +#[derive(Clone, Copy, Debug, Deserialize, PartialEq, Eq, Serialize)] +#[serde(default, deny_unknown_fields)] +pub struct NetbenchConfig { + pub enabled: bool, + pub max_network_channel_size: u64, // Max num of pending network messages + pub netbench_service_threads: Option, // Number of kernel threads for tokio runtime. None default for num-cores. + + pub enable_direct_send_testing: bool, // Whether or not to enable direct send test mode + pub direct_send_data_size: usize, // The amount of data to send in each request + pub direct_send_per_second: u64, // The interval (microseconds) between requests + + pub enable_rpc_testing: bool, + pub rpc_data_size: usize, + pub rpc_per_second: u64, + pub rpc_in_flight: usize, +} + +impl Default for NetbenchConfig { + fn default() -> Self { + Self { + enabled: false, + max_network_channel_size: 1000, + netbench_service_threads: Some(2), + + enable_direct_send_testing: false, + direct_send_data_size: 100 * 1024, // 100 KB + direct_send_per_second: 1_000, + + enable_rpc_testing: false, + rpc_data_size: 100 * 1024, + rpc_per_second: 1_000, + rpc_in_flight: 8, + } + } +} diff --git a/config/src/config/network_config.rs b/config/src/config/network_config.rs index 9bd212636f4b7..4fb512e80f3e5 100644 --- a/config/src/config/network_config.rs +++ b/config/src/config/network_config.rs @@ -53,10 +53,6 @@ pub const MAX_MESSAGE_SIZE: usize = 64 * 1024 * 1024; /* 64 MiB */ pub const CONNECTION_BACKOFF_BASE: u64 = 2; pub const IP_BYTE_BUCKET_RATE: usize = 102400 /* 100 KiB */; pub const IP_BYTE_BUCKET_SIZE: usize = IP_BYTE_BUCKET_RATE; -pub const INBOUND_TCP_RX_BUFFER_SIZE: u32 = 3 * 1024 * 1024; // 3MB ~6MB/s with 500ms latency -pub const INBOUND_TCP_TX_BUFFER_SIZE: u32 = 512 * 1024; // 1MB use a bigger spoon -pub const OUTBOUND_TCP_RX_BUFFER_SIZE: u32 = 3 * 1024 * 1024; // 3MB ~6MB/s with 500ms latency -pub const OUTBOUND_TCP_TX_BUFFER_SIZE: u32 = 1024 * 1024; // 1MB use a bigger spoon #[derive(Clone, Debug, Deserialize, PartialEq, Serialize)] #[serde(default, deny_unknown_fields)] @@ -89,6 +85,14 @@ pub struct NetworkConfig { pub network_id: NetworkId, /// Number of threads to run for networking pub runtime_threads: Option, + /// Overrides for the size of the inbound and outbound buffers for each peer. + /// NOTE: The defaults are None, so socket options are not called. Change to Some values with + /// caution. Experiments have shown that relying on Linux's default tcp auto-tuning can perform + /// better than setting these. In particular, for larger values to take effect, the + /// `net.core.rmem_max` and `net.core.wmem_max` sysctl values may need to be increased. On a + /// vanilla GCP machine, these are set to 212992. Without increasing the sysctl values and + /// setting a value will constrain the buffer size to the sysctl value. (In contrast, default + /// auto-tuning can increase beyond these values.) pub inbound_rx_buffer_size_bytes: Option, pub inbound_tx_buffer_size_bytes: Option, pub outbound_rx_buffer_size_bytes: Option, @@ -157,10 +161,10 @@ impl NetworkConfig { inbound_rate_limit_config: None, outbound_rate_limit_config: None, max_message_size: MAX_MESSAGE_SIZE, - inbound_rx_buffer_size_bytes: Some(INBOUND_TCP_RX_BUFFER_SIZE), - inbound_tx_buffer_size_bytes: Some(INBOUND_TCP_TX_BUFFER_SIZE), - outbound_rx_buffer_size_bytes: Some(OUTBOUND_TCP_RX_BUFFER_SIZE), - outbound_tx_buffer_size_bytes: Some(OUTBOUND_TCP_TX_BUFFER_SIZE), + inbound_rx_buffer_size_bytes: None, + inbound_tx_buffer_size_bytes: None, + outbound_rx_buffer_size_bytes: None, + outbound_tx_buffer_size_bytes: None, max_parallel_deserialization_tasks: None, }; diff --git a/config/src/config/node_config.rs b/config/src/config/node_config.rs index ebf1aba8e2ad5..a58332f475690 100644 --- a/config/src/config/node_config.rs +++ b/config/src/config/node_config.rs @@ -3,11 +3,11 @@ use crate::{ config::{ - node_config_loader::NodeConfigLoader, persistable_config::PersistableConfig, - utils::RootPath, ApiConfig, BaseConfig, ConsensusConfig, Error, ExecutionConfig, - IndexerConfig, IndexerGrpcConfig, InspectionServiceConfig, LoggerConfig, MempoolConfig, - NetworkConfig, PeerMonitoringServiceConfig, SafetyRulesTestConfig, StateSyncConfig, - StorageConfig, + netbench::NetbenchConfig, node_config_loader::NodeConfigLoader, + persistable_config::PersistableConfig, utils::RootPath, ApiConfig, BaseConfig, + ConsensusConfig, Error, ExecutionConfig, IndexerConfig, IndexerGrpcConfig, + InspectionServiceConfig, LoggerConfig, MempoolConfig, NetworkConfig, + PeerMonitoringServiceConfig, SafetyRulesTestConfig, StateSyncConfig, StorageConfig, }, network_id::NetworkId, }; @@ -58,6 +58,8 @@ pub struct NodeConfig { pub storage: StorageConfig, #[serde(default)] pub validator_network: Option, + #[serde(default)] + pub netbench: Option, } impl NodeConfig { @@ -244,6 +246,7 @@ pub fn merge_node_config( )) }) } + #[cfg(test)] mod test { use crate::config::{merge_node_config, Error, NodeConfig, SafetyRulesConfig}; diff --git a/config/src/config/override_node_config.rs b/config/src/config/override_node_config.rs new file mode 100644 index 0000000000000..c8648c00d5acd --- /dev/null +++ b/config/src/config/override_node_config.rs @@ -0,0 +1,278 @@ +// Copyright © Aptos Foundation +// SPDX-License-Identifier: Apache-2.0 + +use crate::config::{Error, NodeConfig, PersistableConfig}; +use anyhow::bail; +use serde::{Deserialize, Serialize}; +use std::path::Path; + +/// Diff a config yaml with a base config yaml. Returns None if there is no diff. +fn diff_override_config_yaml( + override_config: serde_yaml::Value, + base_config: serde_yaml::Value, +) -> anyhow::Result> { + match (override_config.clone(), base_config.clone()) { + ( + serde_yaml::Value::Mapping(override_mapping), + serde_yaml::Value::Mapping(base_mapping), + ) => { + let mut overrides = serde_yaml::Mapping::new(); + for (override_key, override_value) in override_mapping { + match base_mapping.get(&override_key) { + Some(base_value) => { + if let Some(diff_value) = + diff_override_config_yaml(override_value, base_value.clone())? + { + overrides.insert(override_key, diff_value); + } + }, + None => { + overrides.insert(override_key, override_value); + }, + } + } + if overrides.is_empty() { + Ok(None) + } else { + Ok(Some(serde_yaml::Value::Mapping(overrides))) + } + }, + (serde_yaml::Value::Mapping(_), _) => Ok(Some(override_config)), + (serde_yaml::Value::Null, serde_yaml::Value::Null) => Ok(None), + (serde_yaml::Value::Bool(override_value), serde_yaml::Value::Bool(base_value)) => { + if override_value == base_value { + Ok(None) + } else { + Ok(Some(override_config)) + } + }, + (serde_yaml::Value::Number(override_value), serde_yaml::Value::Number(base_value)) => { + if override_value == base_value { + Ok(None) + } else { + Ok(Some(override_config)) + } + }, + (serde_yaml::Value::String(override_value), serde_yaml::Value::String(base_value)) => { + if override_value == base_value { + Ok(None) + } else { + Ok(Some(override_config)) + } + }, + (serde_yaml::Value::Sequence(override_value), serde_yaml::Value::Sequence(base_value)) => { + if override_value == base_value { + Ok(None) + } else { + Ok(Some(override_config)) + } + }, + (_, _) => bail!( + "base does not match override: {:?}, {:?}", + override_config, + base_config + ), + } +} + +/// A utility struct for managing a node config that is overriding a base config, and outputting a +/// yaml representation of it. In most cases, the base config will be the default config, and the +/// output will be a minimal yaml diff that an aptos node can read. +/// +/// In rare cases you may want to explicitly write a yaml value that is the same as the default +/// config (e.g., to avoid the config being optimized by ConfigOptimizer). To do this, change the +/// base config to a different value before calling get_yaml(). +#[derive(Deserialize, Serialize, PartialEq, Debug, Clone)] +pub struct OverrideNodeConfig { + override_config: NodeConfig, + base_config: NodeConfig, +} + +impl OverrideNodeConfig { + pub fn new(override_config: NodeConfig, base_config: NodeConfig) -> Self { + Self { + override_config, + base_config, + } + } + + pub fn new_default() -> Self { + Self::new(NodeConfig::default(), NodeConfig::default()) + } + + pub fn new_with_default_base(override_config: NodeConfig) -> Self { + Self { + override_config, + base_config: NodeConfig::default(), + } + } + + pub fn override_config(&self) -> &NodeConfig { + &self.override_config + } + + pub fn override_config_mut(&mut self) -> &mut NodeConfig { + &mut self.override_config + } + + pub fn base_config(&self) -> &NodeConfig { + &self.base_config + } + + pub fn base_config_mut(&mut self) -> &mut NodeConfig { + &mut self.base_config + } + + pub fn get_yaml(&self) -> anyhow::Result { + let config_yaml = serde_yaml::to_value(&self.override_config)?; + let base_yaml = serde_yaml::to_value(&self.base_config)?; + diff_override_config_yaml(config_yaml, base_yaml).map(|diff_yaml| { + diff_yaml.unwrap_or(serde_yaml::Value::Mapping(serde_yaml::Mapping::new())) + }) + } +} + +impl PersistableConfig for OverrideNodeConfig { + fn load_config>(path: P) -> Result { + let config = NodeConfig::load_config(path)?; + Ok(Self::new_with_default_base(config)) + } + + fn save_config>(&self, output_file: P) -> Result<(), Error> { + let yaml_value = self.get_yaml()?; + let yaml_string = serde_yaml::to_string(&yaml_value).map_err(|e| { + Error::Yaml( + "Unable to serialize override config to yaml. Error: {}".to_string(), + e, + ) + })?; + let yaml_bytes = yaml_string.as_bytes().to_vec(); + Self::write_file(yaml_bytes, output_file) + } +} + +#[cfg(test)] +mod test { + use crate::config::{ + NetworkConfig, NodeConfig, OverrideNodeConfig, PersistableConfig, WaypointConfig, + }; + use std::{env::temp_dir, path::PathBuf}; + + #[test] + fn test_override_node_config_no_diff() { + let override_config = OverrideNodeConfig::new(NodeConfig::default(), NodeConfig::default()); + let diff_yaml = override_config.get_yaml().unwrap(); + assert_eq!( + diff_yaml, + serde_yaml::Value::Mapping(serde_yaml::Mapping::new()) + ); + } + + #[test] + fn test_override_node_config_with_bool() { + let mut override_config = + OverrideNodeConfig::new(NodeConfig::default(), NodeConfig::default()); + let config = override_config.override_config_mut(); + config.api.enabled = false; + + let diff_yaml = override_config.get_yaml().unwrap(); + let expected_yaml: serde_yaml::Value = serde_yaml::from_str( + r#" + api: + enabled: false + "#, + ) + .unwrap(); + assert_eq!(diff_yaml, expected_yaml); + } + + #[test] + fn test_override_node_config_with_enum() { + let mut override_config = + OverrideNodeConfig::new(NodeConfig::default(), NodeConfig::default()); + let config = override_config.override_config_mut(); + config.base.waypoint = WaypointConfig::FromFile(PathBuf::from("test")); + + let diff_yaml = override_config.get_yaml().unwrap(); + let expected_yaml: serde_yaml::Value = serde_yaml::from_str( + r#" + base: + waypoint: + from_file: test + "#, + ) + .unwrap(); + assert_eq!(diff_yaml, expected_yaml); + } + + fn assert_equal_yaml(actual: serde_yaml::Value, expected: serde_yaml::Value) { + // TODO: why don't the actual Values match, only matches with as_str? + assert_eq!(actual.as_str(), expected.as_str()); + } + + #[test] + fn test_override_node_config_with_empty_base_vector() { + let mut override_config = + OverrideNodeConfig::new(NodeConfig::default(), NodeConfig::default()); + let config = override_config.override_config_mut(); + config.full_node_networks.push(Default::default()); + config.full_node_networks.push(Default::default()); + + let diff_yaml = override_config.get_yaml().unwrap(); + let default_node_config = serde_yaml::to_value(&NetworkConfig::default()).unwrap(); + let mut expected_yaml: serde_yaml::Value = serde_yaml::Value::Null; + expected_yaml["full_node_networks"] = + serde_yaml::Value::Sequence(vec![default_node_config.clone(), default_node_config]); + assert_equal_yaml(diff_yaml, expected_yaml); + } + + #[test] + fn test_override_node_config_with_non_empty_base_vector() { + let mut override_config = + OverrideNodeConfig::new(NodeConfig::default(), NodeConfig::default()); + let config = override_config.override_config_mut(); + config.full_node_networks.push(Default::default()); + config.full_node_networks.push(Default::default()); + let base = override_config.base_config_mut(); + base.full_node_networks.push(Default::default()); + + // Note, the diff will include the entire vector, not just the non-equal elements + let diff_yaml = override_config.get_yaml().unwrap(); + let default_node_config = serde_yaml::to_value(&NetworkConfig::default()).unwrap(); + let mut expected_yaml: serde_yaml::Value = serde_yaml::Value::Null; + expected_yaml["full_node_networks"] = + serde_yaml::Value::Sequence(vec![default_node_config.clone(), default_node_config]); + assert_equal_yaml(diff_yaml, expected_yaml); + } + + #[test] + fn test_override_node_config_with_base_change() { + let mut override_config = + OverrideNodeConfig::new(NodeConfig::default(), NodeConfig::default()); + let base = override_config.base_config_mut(); + base.api.enabled = false; + + let diff_yaml = override_config.get_yaml().unwrap(); + let expected_yaml: serde_yaml::Value = serde_yaml::from_str( + r#" + api: + enabled: true + "#, + ) + .unwrap(); + assert_eq!(diff_yaml, expected_yaml); + } + + #[test] + fn test_override_config_load_save() { + let mut override_config = + OverrideNodeConfig::new(NodeConfig::default(), NodeConfig::default()); + let config = override_config.override_config_mut(); + config.api.enabled = false; + + let temp_file = temp_dir().join("override_config.yaml"); + override_config.save_config(temp_file.as_path()).unwrap(); + let loaded_config = OverrideNodeConfig::load_config(temp_file.as_path()).unwrap(); + assert_eq!(override_config, loaded_config); + } +} diff --git a/config/src/config/peer_monitoring_config.rs b/config/src/config/peer_monitoring_config.rs index c69195b0f3c06..d709897595fac 100644 --- a/config/src/config/peer_monitoring_config.rs +++ b/config/src/config/peer_monitoring_config.rs @@ -127,7 +127,7 @@ impl Default for NodeMonitoringConfig { impl ConfigSanitizer for PeerMonitoringServiceConfig { fn sanitize( - node_config: &mut NodeConfig, + node_config: &NodeConfig, node_type: NodeType, chain_id: ChainId, ) -> Result<(), Error> { @@ -138,7 +138,7 @@ impl ConfigSanitizer for PeerMonitoringServiceConfig { impl ConfigSanitizer for PerformanceMonitoringConfig { fn sanitize( - node_config: &mut NodeConfig, + node_config: &NodeConfig, _node_type: NodeType, chain_id: ChainId, ) -> Result<(), Error> { diff --git a/config/src/config/persistable_config.rs b/config/src/config/persistable_config.rs index 53e63e8ae3e24..728f372d15da5 100644 --- a/config/src/config/persistable_config.rs +++ b/config/src/config/persistable_config.rs @@ -25,13 +25,7 @@ pub trait PersistableConfig: Serialize + DeserializeOwned { let serialized_config = serde_yaml::to_vec(&self) .map_err(|e| Error::Yaml(output_file.as_ref().to_str().unwrap().to_string(), e))?; - // Create the file and write the serialized config to the file - let mut file = File::create(output_file.as_ref()) - .map_err(|e| Error::IO(output_file.as_ref().to_str().unwrap().to_string(), e))?; - file.write_all(&serialized_config) - .map_err(|e| Error::IO(output_file.as_ref().to_str().unwrap().to_string(), e))?; - - Ok(()) + Self::write_file(serialized_config, output_file) } /// Read the config at the given path and return the contents as a string @@ -45,6 +39,16 @@ pub trait PersistableConfig: Serialize + DeserializeOwned { }) } + /// Create the file and write the serialized config to the file + fn write_file>(serialized_config: Vec, output_file: P) -> Result<(), Error> { + let mut file = File::create(output_file.as_ref()) + .map_err(|e| Error::IO(output_file.as_ref().to_str().unwrap().to_string(), e))?; + file.write_all(&serialized_config) + .map_err(|e| Error::IO(output_file.as_ref().to_str().unwrap().to_string(), e))?; + + Ok(()) + } + /// Parse the config from the serialized string fn parse_serialized_config(serialized_config: &str) -> Result { serde_yaml::from_str(serialized_config).map_err(|e| Error::Yaml("config".to_string(), e)) diff --git a/config/src/config/quorum_store_config.rs b/config/src/config/quorum_store_config.rs index 29b45c0cee9ee..3f1444de167e9 100644 --- a/config/src/config/quorum_store_config.rs +++ b/config/src/config/quorum_store_config.rs @@ -184,7 +184,7 @@ impl QuorumStoreConfig { impl ConfigSanitizer for QuorumStoreConfig { fn sanitize( - node_config: &mut NodeConfig, + node_config: &NodeConfig, _node_type: NodeType, _chain_id: ChainId, ) -> Result<(), Error> { diff --git a/config/src/config/safety_rules_config.rs b/config/src/config/safety_rules_config.rs index 63e7a9d02b14b..39714da271ee1 100644 --- a/config/src/config/safety_rules_config.rs +++ b/config/src/config/safety_rules_config.rs @@ -70,7 +70,7 @@ impl SafetyRulesConfig { impl ConfigSanitizer for SafetyRulesConfig { fn sanitize( - node_config: &mut NodeConfig, + node_config: &NodeConfig, node_type: NodeType, chain_id: ChainId, ) -> Result<(), Error> { @@ -232,7 +232,7 @@ mod tests { #[test] fn test_sanitize_invalid_backend_for_mainnet() { // Create a node config with an invalid backend for mainnet - let mut node_config = NodeConfig { + let node_config = NodeConfig { consensus: ConsensusConfig { safety_rules: SafetyRulesConfig { backend: SecureBackend::InMemoryStorage, @@ -245,7 +245,7 @@ mod tests { // Verify that the config sanitizer fails let error = - SafetyRulesConfig::sanitize(&mut node_config, NodeType::Validator, ChainId::mainnet()) + SafetyRulesConfig::sanitize(&node_config, NodeType::Validator, ChainId::mainnet()) .unwrap_err(); assert!(matches!(error, Error::ConfigSanitizerFailed(_, _))); } @@ -253,7 +253,7 @@ mod tests { #[test] fn test_sanitize_backend_for_mainnet_fullnodes() { // Create a node config with an invalid backend for mainnet validators - let mut node_config = NodeConfig { + let node_config = NodeConfig { consensus: ConsensusConfig { safety_rules: SafetyRulesConfig { backend: SecureBackend::InMemoryStorage, @@ -265,18 +265,14 @@ mod tests { }; // Verify that the config sanitizer passes because the node is a fullnode - SafetyRulesConfig::sanitize( - &mut node_config, - NodeType::PublicFullnode, - ChainId::mainnet(), - ) - .unwrap(); + SafetyRulesConfig::sanitize(&node_config, NodeType::PublicFullnode, ChainId::mainnet()) + .unwrap(); } #[test] fn test_sanitize_invalid_service_for_mainnet() { // Create a node config with a non-local service - let mut node_config = NodeConfig { + let node_config = NodeConfig { consensus: ConsensusConfig { safety_rules: SafetyRulesConfig { service: SafetyRulesService::Serializer, @@ -289,7 +285,7 @@ mod tests { // Verify that the config sanitizer fails let error = - SafetyRulesConfig::sanitize(&mut node_config, NodeType::Validator, ChainId::mainnet()) + SafetyRulesConfig::sanitize(&node_config, NodeType::Validator, ChainId::mainnet()) .unwrap_err(); assert!(matches!(error, Error::ConfigSanitizerFailed(_, _))); } @@ -297,7 +293,7 @@ mod tests { #[test] fn test_sanitize_test_config_on_mainnet() { // Create a node config with a test config - let mut node_config = NodeConfig { + let node_config = NodeConfig { consensus: ConsensusConfig { safety_rules: SafetyRulesConfig { test: Some(SafetyRulesTestConfig::new(PeerId::random())), @@ -310,7 +306,7 @@ mod tests { // Verify that the config sanitizer fails let error = - SafetyRulesConfig::sanitize(&mut node_config, NodeType::Validator, ChainId::mainnet()) + SafetyRulesConfig::sanitize(&node_config, NodeType::Validator, ChainId::mainnet()) .unwrap_err(); assert!(matches!(error, Error::ConfigSanitizerFailed(_, _))); } @@ -318,7 +314,7 @@ mod tests { #[test] fn test_sanitize_missing_initial_safety_rules() { // Create a node config with a test config - let mut node_config = NodeConfig { + let node_config = NodeConfig { consensus: ConsensusConfig { safety_rules: SafetyRulesConfig { test: Some(SafetyRulesTestConfig::new(PeerId::random())), @@ -331,7 +327,7 @@ mod tests { // Verify that the config sanitizer fails let error = - SafetyRulesConfig::sanitize(&mut node_config, NodeType::Validator, ChainId::mainnet()) + SafetyRulesConfig::sanitize(&node_config, NodeType::Validator, ChainId::mainnet()) .unwrap_err(); assert!(matches!(error, Error::ConfigSanitizerFailed(_, _))); } diff --git a/config/src/config/state_sync_config.rs b/config/src/config/state_sync_config.rs index d8943e2b2f8bc..9bb59501722b1 100644 --- a/config/src/config/state_sync_config.rs +++ b/config/src/config/state_sync_config.rs @@ -204,6 +204,9 @@ impl Default for StorageServiceConfig { #[derive(Clone, Copy, Debug, Deserialize, PartialEq, Eq, Serialize)] #[serde(default, deny_unknown_fields)] pub struct DataStreamingServiceConfig { + /// Whether or not to enable data subscription streaming. + pub enable_subscription_streaming: bool, + /// The interval (milliseconds) at which to refresh the global data summary. pub global_summary_refresh_interval_ms: u64, @@ -226,6 +229,10 @@ pub struct DataStreamingServiceConfig { /// memory. Once the number grows beyond this value, garbage collection occurs. pub max_notification_id_mappings: u64, + /// Maxinum number of consecutive subscriptions that can be made before + /// the subscription stream is terminated and a new stream must be created. + pub max_num_consecutive_subscriptions: u64, + /// The interval (milliseconds) at which to check the progress of each stream. pub progress_check_interval_ms: u64, } @@ -233,12 +240,14 @@ pub struct DataStreamingServiceConfig { impl Default for DataStreamingServiceConfig { fn default() -> Self { Self { + enable_subscription_streaming: false, global_summary_refresh_interval_ms: 50, max_concurrent_requests: MAX_CONCURRENT_REQUESTS, max_concurrent_state_requests: MAX_CONCURRENT_STATE_REQUESTS, max_data_stream_channel_sizes: 300, max_request_retry: 5, max_notification_id_mappings: 300, + max_num_consecutive_subscriptions: 50, progress_check_interval_ms: 50, } } @@ -257,14 +266,14 @@ pub struct AptosDataClientConfig { pub max_num_in_flight_regular_polls: u64, /// Maximum number of output reductions before transactions are returned pub max_num_output_reductions: u64, - /// Maximum version lag we'll tolerate when sending optimistic fetch requests - pub max_optimistic_fetch_version_lag: u64, + /// Maximum lag (in seconds) we'll tolerate when sending optimistic fetch requests + pub max_optimistic_fetch_lag_secs: u64, /// Maximum timeout (in ms) when waiting for a response (after exponential increases) pub max_response_timeout_ms: u64, /// Maximum number of state keys and values per chunk pub max_state_chunk_size: u64, - /// Maximum version lag we'll tolerate when sending subscription requests - pub max_subscription_version_lag: u64, + /// Maximum lag (in seconds) we'll tolerate when sending subscription requests + pub max_subscription_lag_secs: u64, /// Maximum number of transactions per chunk pub max_transaction_chunk_size: u64, /// Maximum number of transaction outputs per chunk @@ -273,6 +282,8 @@ pub struct AptosDataClientConfig { pub optimistic_fetch_timeout_ms: u64, /// First timeout (in ms) when waiting for a response pub response_timeout_ms: u64, + /// Timeout (in ms) when waiting for a subscription response + pub subscription_response_timeout_ms: u64, /// Interval (in ms) between data summary poll loop executions pub summary_poll_loop_interval_ms: u64, /// Whether or not to request compression for incoming data @@ -287,15 +298,16 @@ impl Default for AptosDataClientConfig { max_num_in_flight_priority_polls: 10, max_num_in_flight_regular_polls: 10, max_num_output_reductions: 0, - max_optimistic_fetch_version_lag: 50_000, // Assumes 5K TPS for 10 seconds, which should be plenty - max_response_timeout_ms: 60_000, // 60 seconds + max_optimistic_fetch_lag_secs: 30, // 30 seconds + max_response_timeout_ms: 60_000, // 60 seconds max_state_chunk_size: MAX_STATE_CHUNK_SIZE, - max_subscription_version_lag: 100_000, // Assumes 5K TPS for 20 seconds, which should be plenty + max_subscription_lag_secs: 30, // 30 seconds max_transaction_chunk_size: MAX_TRANSACTION_CHUNK_SIZE, max_transaction_output_chunk_size: MAX_TRANSACTION_OUTPUT_CHUNK_SIZE, optimistic_fetch_timeout_ms: 5000, // 5 seconds response_timeout_ms: 10_000, // 10 seconds summary_poll_loop_interval_ms: 200, + subscription_response_timeout_ms: 20_000, // 20 seconds (must be longer than a regular timeout because of pre-fetching) use_compression: true, } } @@ -303,7 +315,7 @@ impl Default for AptosDataClientConfig { impl ConfigSanitizer for StateSyncConfig { fn sanitize( - node_config: &mut NodeConfig, + node_config: &NodeConfig, node_type: NodeType, chain_id: ChainId, ) -> Result<(), Error> { @@ -314,12 +326,12 @@ impl ConfigSanitizer for StateSyncConfig { impl ConfigSanitizer for StateSyncDriverConfig { fn sanitize( - node_config: &mut NodeConfig, + node_config: &NodeConfig, _node_type: NodeType, _chain_id: ChainId, ) -> Result<(), Error> { let sanitizer_name = Self::get_sanitizer_name(); - let state_sync_driver_config = &mut node_config.state_sync.state_sync_driver; + let state_sync_driver_config = &node_config.state_sync.state_sync_driver; // Verify that auto-bootstrapping is not enabled for // nodes that are fast syncing. @@ -615,7 +627,7 @@ mod tests { fn test_sanitize_auto_bootstrapping_fast_sync() { // Create a node config with fast sync and // auto bootstrapping enabled. - let mut node_config = NodeConfig { + let node_config = NodeConfig { state_sync: StateSyncConfig { state_sync_driver: StateSyncDriverConfig { bootstrapping_mode: BootstrappingMode::DownloadLatestStates, @@ -629,7 +641,7 @@ mod tests { // Verify that sanitization fails let error = - StateSyncConfig::sanitize(&mut node_config, NodeType::Validator, ChainId::testnet()) + StateSyncConfig::sanitize(&node_config, NodeType::Validator, ChainId::testnet()) .unwrap_err(); assert!(matches!(error, Error::ConfigSanitizerFailed(_, _))); } diff --git a/config/src/config/storage_config.rs b/config/src/config/storage_config.rs index cc58cb805c96d..1bb6faa0e25dc 100644 --- a/config/src/config/storage_config.rs +++ b/config/src/config/storage_config.rs @@ -292,7 +292,7 @@ impl StorageConfig { impl ConfigSanitizer for StorageConfig { fn sanitize( - node_config: &mut NodeConfig, + node_config: &NodeConfig, _node_type: NodeType, _chain_id: ChainId, ) -> Result<(), Error> { diff --git a/consensus/consensus-types/src/block.rs b/consensus/consensus-types/src/block.rs index 476c9833cdbb3..c3b5bbe7330e4 100644 --- a/consensus/consensus-types/src/block.rs +++ b/consensus/consensus-types/src/block.rs @@ -8,6 +8,7 @@ use crate::{ quorum_cert::QuorumCert, }; use anyhow::{bail, ensure, format_err}; +use aptos_bitvec::BitVec; use aptos_crypto::{bls12381, hash::CryptoHash, HashValue}; use aptos_infallible::duration_since_epoch; use aptos_types::{ @@ -95,7 +96,7 @@ impl Block { } pub fn parent_id(&self) -> HashValue { - self.block_data.quorum_cert().certified_block().id() + self.block_data.parent_id() } pub fn payload(&self) -> Option<&Payload> { @@ -210,10 +211,26 @@ impl Block { payload: Payload, author: Author, failed_authors: Vec<(Round, Author)>, - ) -> anyhow::Result { - let block_data = - BlockData::new_for_dag(epoch, round, timestamp, payload, author, failed_authors); - Self::new_proposal_from_block_data(block_data, &ValidatorSigner::from_int(0)) + parent_block_id: HashValue, + parents_bitvec: BitVec, + node_digests: Vec, + ) -> Self { + let block_data = BlockData::new_for_dag( + epoch, + round, + timestamp, + payload, + author, + failed_authors, + parent_block_id, + parents_bitvec, + node_digests, + ); + Self { + id: block_data.hash(), + block_data, + signature: None, + } } pub fn new_proposal( @@ -271,6 +288,7 @@ impl Block { validator.verify(*author, &self.block_data, signature)?; self.quorum_cert().verify(validator) }, + BlockType::DAGBlock { .. } => bail!("We should not accept DAG block from others"), } } @@ -381,18 +399,21 @@ impl Block { } } + fn previous_bitvec(&self) -> BitVec { + if let BlockType::DAGBlock { parents_bitvec, .. } = self.block_data.block_type() { + parents_bitvec.clone() + } else { + self.quorum_cert().ledger_info().get_voters_bitvec().clone() + } + } + fn new_block_metadata(&self, validators: &[AccountAddress]) -> BlockMetadata { BlockMetadata::new( self.id(), self.epoch(), self.round(), self.author().unwrap_or(AccountAddress::ZERO), - // A bitvec of voters - self.quorum_cert() - .ledger_info() - .get_voters_bitvec() - .clone() - .into(), + self.previous_bitvec().into(), // For nil block, we use 0x0 which is convention for nil address in move. self.block_data() .failed_authors() diff --git a/consensus/consensus-types/src/block_data.rs b/consensus/consensus-types/src/block_data.rs index d0fb7b2b51b93..b8b4150171393 100644 --- a/consensus/consensus-types/src/block_data.rs +++ b/consensus/consensus-types/src/block_data.rs @@ -7,6 +7,7 @@ use crate::{ quorum_cert::QuorumCert, vote_data::VoteData, }; +use aptos_bitvec::BitVec; use aptos_crypto::hash::HashValue; use aptos_crypto_derive::{BCSCryptoHash, CryptoHasher}; use aptos_types::{ @@ -42,6 +43,17 @@ pub enum BlockType { /// from the previous epoch. The genesis block is used as the first root block of the /// BlockTree for all epochs. Genesis, + /// A virtual block that's constructed by nodes from DAG, this is purely a local thing so + /// we hide it from serde + #[serde(skip_deserializing)] + DAGBlock { + author: Author, + failed_authors: Vec<(Round, Author)>, + payload: Payload, + node_digests: Vec, + parent_block_id: HashValue, + parents_bitvec: BitVec, + }, } #[derive(Deserialize, Serialize, Clone, Debug, PartialEq, Eq, CryptoHasher, BCSCryptoHash)] @@ -79,10 +91,9 @@ pub struct BlockData { impl BlockData { pub fn author(&self) -> Option { - if let BlockType::Proposal { author, .. } = self.block_type { - Some(author) - } else { - None + match self.block_type { + BlockType::Proposal { author, .. } | BlockType::DAGBlock { author, .. } => Some(author), + _ => None, } } @@ -95,12 +106,32 @@ impl BlockData { } pub fn parent_id(&self) -> HashValue { - self.quorum_cert.certified_block().id() + if let BlockType::DAGBlock { + parent_block_id, .. + } = self.block_type() + { + *parent_block_id + } else { + self.quorum_cert.certified_block().id() + } } pub fn payload(&self) -> Option<&Payload> { - if let BlockType::Proposal { payload, .. } = &self.block_type { - Some(payload) + match &self.block_type { + BlockType::Proposal { payload, .. } | BlockType::DAGBlock { payload, .. } => { + Some(payload) + }, + _ => None, + } + } + + pub fn dag_nodes(&self) -> Option<&Vec> { + if let BlockType::DAGBlock { + node_digests: nodes_digests, + .. + } = &self.block_type + { + Some(nodes_digests) } else { None } @@ -129,11 +160,10 @@ impl BlockData { /// the list of consecutive proposers from the immediately preceeding /// rounds that didn't produce a successful block pub fn failed_authors(&self) -> Option<&Vec<(Round, Author)>> { - match self.block_type { - BlockType::Proposal { - ref failed_authors, .. - } => Some(failed_authors), - BlockType::NilBlock { ref failed_authors } => Some(failed_authors), + match &self.block_type { + BlockType::Proposal { failed_authors, .. } + | BlockType::NilBlock { failed_authors, .. } + | BlockType::DAGBlock { failed_authors, .. } => Some(failed_authors), BlockType::Genesis => None, } } @@ -218,6 +248,9 @@ impl BlockData { payload: Payload, author: Author, failed_authors: Vec<(Round, Author)>, + parent_block_id: HashValue, + parents_bitvec: BitVec, + node_digests: Vec, ) -> Self { Self { epoch, @@ -227,13 +260,16 @@ impl BlockData { VoteData::new(BlockInfo::empty(), BlockInfo::empty()), LedgerInfoWithSignatures::new( LedgerInfo::new(BlockInfo::empty(), HashValue::zero()), - AggregateSignature::empty(), + AggregateSignature::new(BitVec::default(), None), ), ), - block_type: BlockType::Proposal { - payload, + block_type: BlockType::DAGBlock { author, + payload, failed_authors, + node_digests, + parent_block_id, + parents_bitvec, }, } } @@ -300,3 +336,30 @@ fn test_reconfiguration_suffix() { ); assert!(reconfig_suffix_block.is_reconfiguration_suffix()); } + +#[test] +fn test_dag_block_no_deserialize() { + #[derive(Serialize)] + #[serde(rename = "BlockType")] + #[allow(dead_code)] + pub enum FakeBlockType { + Proposal, + NilBlock, + Genesis, + DAG { + author: Author, + failed_authors: Vec<(Round, Author)>, + payload: Payload, + node_digests: Vec, + }, + } + // test deserialize failure + let fake = FakeBlockType::DAG { + author: Author::ZERO, + failed_authors: vec![], + payload: Payload::DirectMempool(vec![]), + node_digests: vec![], + }; + let bytes = bcs::to_bytes(&fake).unwrap(); + bcs::from_bytes::(&bytes).unwrap_err(); +} diff --git a/consensus/consensus-types/src/common.rs b/consensus/consensus-types/src/common.rs index e7540aeb1e62c..cc16a94a57a46 100644 --- a/consensus/consensus-types/src/common.rs +++ b/consensus/consensus-types/src/common.rs @@ -4,7 +4,7 @@ use crate::proof_of_store::{BatchInfo, ProofOfStore}; use aptos_crypto::HashValue; -use aptos_executor_types::Error; +use aptos_executor_types::ExecutorResult; use aptos_infallible::Mutex; use aptos_types::{ account_address::AccountAddress, transaction::SignedTransaction, @@ -63,7 +63,7 @@ pub enum DataStatus { Requested( Vec<( HashValue, - oneshot::Receiver, Error>>, + oneshot::Receiver>>, )>, ), } diff --git a/consensus/consensus-types/src/executed_block.rs b/consensus/consensus-types/src/executed_block.rs index 6a1b45656bd57..c2c71e5554d2d 100644 --- a/consensus/consensus-types/src/executed_block.rs +++ b/consensus/consensus-types/src/executed_block.rs @@ -31,6 +31,13 @@ pub struct ExecutedBlock { state_compute_result: StateComputeResult, } +impl ExecutedBlock { + pub fn replace_result(mut self, result: StateComputeResult) -> Self { + self.state_compute_result = result; + self + } +} + impl Debug for ExecutedBlock { fn fmt(&self, f: &mut Formatter) -> std::fmt::Result { write!(f, "{}", self) diff --git a/consensus/src/block_storage/block_store.rs b/consensus/src/block_storage/block_store.rs index ca656fef22279..c5a2cda12f698 100644 --- a/consensus/src/block_storage/block_store.rs +++ b/consensus/src/block_storage/block_store.rs @@ -23,7 +23,7 @@ use aptos_consensus_types::{ sync_info::SyncInfo, timeout_2chain::TwoChainTimeoutCertificate, }; use aptos_crypto::{hash::ACCUMULATOR_PLACEHOLDER_HASH, HashValue}; -use aptos_executor_types::{Error, StateComputeResult}; +use aptos_executor_types::{ExecutorError, ExecutorResult, StateComputeResult}; use aptos_infallible::RwLock; use aptos_logger::prelude::*; use aptos_types::{ledger_info::LedgerInfoWithSignatures, transaction::TransactionStatus}; @@ -370,7 +370,7 @@ impl BlockStore { let executed_block = match self.execute_block(block.clone()).await { Ok(res) => Ok(res), - Err(Error::BlockNotFound(parent_block_id)) => { + Err(ExecutorError::BlockNotFound(parent_block_id)) => { // recover the block tree in executor let blocks_to_reexecute = self .path_from_ordered_root(parent_block_id) @@ -406,7 +406,7 @@ impl BlockStore { self.inner.write().insert_block(executed_block) } - async fn execute_block(&self, block: Block) -> anyhow::Result { + async fn execute_block(&self, block: Block) -> ExecutorResult { // Although NIL blocks don't have a payload, we still send a T::default() to compute // because we may inject a block prologue transaction. let state_compute_result = self diff --git a/consensus/src/consensusdb/consensusdb_test.rs b/consensus/src/consensusdb/consensusdb_test.rs index 8a4717ae2d208..dccb1c76f8533 100644 --- a/consensus/src/consensusdb/consensusdb_test.rs +++ b/consensus/src/consensusdb/consensusdb_test.rs @@ -113,11 +113,4 @@ fn test_dag() { let vote = Vote::new(node.metadata().clone(), Signature::dummy_signature()); test_dag_type::::Key>(node.id(), vote, &db); - - let anchor_id = node.id(); - test_dag_type::::Key>( - anchor_id, - (), - &db, - ); } diff --git a/consensus/src/consensusdb/mod.rs b/consensus/src/consensusdb/mod.rs index 833aafdc71d18..ff9e63e058c1e 100644 --- a/consensus/src/consensusdb/mod.rs +++ b/consensus/src/consensusdb/mod.rs @@ -16,13 +16,13 @@ use aptos_schemadb::{ }; pub use schema::{ block::BlockSchema, - dag::{CertifiedNodeSchema, DagVoteSchema, NodeSchema, OrderedAnchorIdSchema}, + dag::{CertifiedNodeSchema, DagVoteSchema, NodeSchema}, quorum_certificate::QCSchema, }; use schema::{ single_entry::{SingleEntryKey, SingleEntrySchema}, - BLOCK_CF_NAME, CERTIFIED_NODE_CF_NAME, DAG_VOTE_CF_NAME, NODE_CF_NAME, - ORDERED_ANCHOR_ID_CF_NAME, QC_CF_NAME, SINGLE_ENTRY_CF_NAME, + BLOCK_CF_NAME, CERTIFIED_NODE_CF_NAME, DAG_VOTE_CF_NAME, NODE_CF_NAME, QC_CF_NAME, + SINGLE_ENTRY_CF_NAME, }; use std::{iter::Iterator, path::Path, time::Instant}; @@ -59,7 +59,7 @@ impl ConsensusDB { NODE_CF_NAME, CERTIFIED_NODE_CF_NAME, DAG_VOTE_CF_NAME, - ORDERED_ANCHOR_ID_CF_NAME, + "ordered_anchor_id", // deprecated CF ]; let path = db_root_path.as_ref().join(CONSENSUS_DB_NAME); diff --git a/consensus/src/consensusdb/schema/dag/mod.rs b/consensus/src/consensusdb/schema/dag/mod.rs index 54f2f9cfa6982..22b7ac69f42dc 100644 --- a/consensus/src/consensusdb/schema/dag/mod.rs +++ b/consensus/src/consensusdb/schema/dag/mod.rs @@ -95,28 +95,3 @@ impl ValueCodec for CertifiedNode { Ok(bcs::from_bytes(data)?) } } - -pub const ORDERED_ANCHOR_ID_CF_NAME: ColumnFamilyName = "ordered_anchor_id"; - -define_schema!(OrderedAnchorIdSchema, NodeId, (), ORDERED_ANCHOR_ID_CF_NAME); - -impl KeyCodec for NodeId { - fn encode_key(&self) -> Result> { - Ok(bcs::to_bytes(&self)?) - } - - fn decode_key(data: &[u8]) -> Result { - Ok(bcs::from_bytes(data)?) - } -} - -impl ValueCodec for () { - fn encode_value(&self) -> Result> { - Ok(vec![]) - } - - fn decode_value(data: &[u8]) -> Result { - ensure_slice_len_eq(data, size_of::())?; - Ok(()) - } -} diff --git a/consensus/src/consensusdb/schema/mod.rs b/consensus/src/consensusdb/schema/mod.rs index 4557d2e722d85..3b2b511de9ab8 100644 --- a/consensus/src/consensusdb/schema/mod.rs +++ b/consensus/src/consensusdb/schema/mod.rs @@ -36,6 +36,6 @@ macro_rules! define_schema { } pub use block::BLOCK_CF_NAME; -pub use dag::{CERTIFIED_NODE_CF_NAME, DAG_VOTE_CF_NAME, NODE_CF_NAME, ORDERED_ANCHOR_ID_CF_NAME}; +pub use dag::{CERTIFIED_NODE_CF_NAME, DAG_VOTE_CF_NAME, NODE_CF_NAME}; pub use quorum_certificate::QC_CF_NAME; pub use single_entry::SINGLE_ENTRY_CF_NAME; diff --git a/consensus/src/dag/adapter.rs b/consensus/src/dag/adapter.rs index 23e047e439fa4..92d39d20dfff7 100644 --- a/consensus/src/dag/adapter.rs +++ b/consensus/src/dag/adapter.rs @@ -2,9 +2,7 @@ // SPDX-License-Identifier: Apache-2.0 use crate::{ - consensusdb::{ - CertifiedNodeSchema, ConsensusDB, DagVoteSchema, NodeSchema, OrderedAnchorIdSchema, - }, + consensusdb::{CertifiedNodeSchema, ConsensusDB, DagVoteSchema, NodeSchema}, dag::{ storage::{CommitEvent, DAGStorage}, CertifiedNode, Node, NodeId, Vote, @@ -20,56 +18,79 @@ use aptos_consensus_types::{ }; use aptos_crypto::HashValue; use aptos_executor_types::StateComputeResult; +use aptos_infallible::RwLock; use aptos_logger::error; use aptos_storage_interface::{DbReader, Order}; use aptos_types::{ account_config::{new_block_event_key, NewBlockEvent}, aggregate_signature::AggregateSignature, epoch_change::EpochChangeProof, + epoch_state::EpochState, ledger_info::{LedgerInfo, LedgerInfoWithSignatures}, }; use async_trait::async_trait; use futures_channel::mpsc::UnboundedSender; use std::{collections::HashMap, sync::Arc}; -#[async_trait] -pub trait Notifier: Send { +pub trait OrderedNotifier: Send + Sync { fn send_ordered_nodes( - &mut self, + &self, ordered_nodes: Vec>, failed_author: Vec<(Round, Author)>, ) -> anyhow::Result<()>; +} +#[async_trait] +pub trait ProofNotifier: Send + Sync { async fn send_epoch_change(&self, proof: EpochChangeProof); async fn send_commit_proof(&self, ledger_info: LedgerInfoWithSignatures); } -pub struct NotifierAdapter { + +pub struct OrderedNotifierAdapter { executor_channel: UnboundedSender, storage: Arc, + parent_block_id: Arc>, + epoch_state: Arc, } -impl NotifierAdapter { +impl OrderedNotifierAdapter { pub fn new( executor_channel: UnboundedSender, storage: Arc, + epoch_state: Arc, ) -> Self { + let ledger_info_from_storage = storage + .get_latest_ledger_info() + .expect("latest ledger info must exist"); + + // We start from the block that storage's latest ledger info, if storage has end-epoch + // LedgerInfo, we generate the virtual genesis block + let parent_block_id = if ledger_info_from_storage.ledger_info().ends_epoch() { + let genesis = + Block::make_genesis_block_from_ledger_info(ledger_info_from_storage.ledger_info()); + + genesis.id() + } else { + ledger_info_from_storage.ledger_info().commit_info().id() + }; + Self { executor_channel, storage, + parent_block_id: Arc::new(RwLock::new(parent_block_id)), + epoch_state, } } } -#[async_trait] -impl Notifier for NotifierAdapter { +impl OrderedNotifier for OrderedNotifierAdapter { fn send_ordered_nodes( - &mut self, + &self, ordered_nodes: Vec>, failed_author: Vec<(Round, Author)>, ) -> anyhow::Result<()> { let anchor = ordered_nodes.last().unwrap(); - let anchor_id = anchor.id(); let epoch = anchor.epoch(); let round = anchor.round(); let timestamp = anchor.metadata().timestamp(); @@ -80,13 +101,37 @@ impl Notifier for NotifierAdapter { payload.extend(node.payload().clone()); node_digests.push(node.digest()); } - // TODO: we may want to split payload into multiple blocks + let parent_block_id = *self.parent_block_id.read(); + // construct the bitvec that indicates which nodes present in the previous round in CommitEvent + let mut parents_bitvec = BitVec::with_num_bits(self.epoch_state.verifier.len() as u16); + for parent in anchor.parents().iter() { + if let Some(idx) = self + .epoch_state + .verifier + .address_to_validator_index() + .get(parent.metadata().author()) + { + parents_bitvec.set(*idx as u16); + } + } + let block = ExecutedBlock::new( - Block::new_for_dag(epoch, round, timestamp, payload, author, failed_author)?, + Block::new_for_dag( + epoch, + round, + timestamp, + payload, + author, + failed_author, + parent_block_id, + parents_bitvec, + node_digests, + ), StateComputeResult::new_dummy(), ); let block_info = block.block_info(); let storage = self.storage.clone(); + *self.parent_block_id.write() = block.id(); Ok(self.executor_channel.unbounded_send(OrderedBlocks { ordered_blocks: vec![block], ordered_proof: LedgerInfoWithSignatures::new( @@ -94,31 +139,24 @@ impl Notifier for NotifierAdapter { AggregateSignature::empty(), ), callback: Box::new( - move |_committed_blocks: &[Arc], + move |committed_blocks: &[Arc], _commit_decision: LedgerInfoWithSignatures| { - // TODO: this doesn't really work since not every block will trigger a callback, - // we need to update the buffer manager to invoke all callbacks instead of only last one - if let Err(e) = storage - .delete_certified_nodes(node_digests) - .and_then(|_| storage.delete_ordered_anchor_ids(vec![anchor_id])) - { - error!( - "Failed to garbage collect committed nodes and anchor: {:?}", - e - ); + for executed_block in committed_blocks { + if let Some(node_digests) = executed_block.block().block_data().dag_nodes() + { + if let Err(e) = storage.delete_certified_nodes(node_digests.clone()) { + error!( + "Failed to garbage collect committed for block {}: {:?}", + executed_block.block(), + e + ); + } + } } }, ), })?) } - - async fn send_epoch_change(&self, _proof: EpochChangeProof) { - todo!() - } - - async fn send_commit_proof(&self, _ledger_info: LedgerInfoWithSignatures) { - todo!() - } } pub struct StorageAdapter { @@ -244,22 +282,6 @@ impl DAGStorage for StorageAdapter { Ok(self.consensus_db.delete::(digests)?) } - fn save_ordered_anchor_id(&self, node_id: &NodeId) -> anyhow::Result<()> { - Ok(self - .consensus_db - .put::(node_id, &())?) - } - - fn get_ordered_anchor_ids(&self) -> anyhow::Result> { - Ok(self.consensus_db.get_all::()?) - } - - fn delete_ordered_anchor_ids(&self, node_ids: Vec) -> anyhow::Result<()> { - Ok(self - .consensus_db - .delete::(node_ids)?) - } - fn get_latest_k_committed_events(&self, k: u64) -> anyhow::Result> { let latest_db_version = self.aptos_db.get_latest_version().unwrap_or(0); let mut commit_events = vec![]; @@ -278,6 +300,12 @@ impl DAGStorage for StorageAdapter { commit_events.push(self.convert(new_block_event)?); } } + commit_events.reverse(); Ok(commit_events) } + + fn get_latest_ledger_info(&self) -> anyhow::Result { + // TODO: use callback from notifier to cache the latest ledger info + self.aptos_db.get_latest_ledger_info() + } } diff --git a/consensus/src/dag/anchor_election.rs b/consensus/src/dag/anchor_election.rs index ff7c45dc5a72b..e01b44198b4bd 100644 --- a/consensus/src/dag/anchor_election.rs +++ b/consensus/src/dag/anchor_election.rs @@ -6,7 +6,13 @@ use aptos_consensus_types::common::{Author, Round}; pub trait AnchorElection: Send { fn get_anchor(&self, round: Round) -> Author; - fn commit(&mut self, round: Round); + fn update_reputation( + &mut self, + round: Round, + author: &Author, + parents: Vec, + failed_authors: Vec, + ); } pub struct RoundRobinAnchorElection { @@ -24,5 +30,12 @@ impl AnchorElection for RoundRobinAnchorElection { self.validators[(round / 2) as usize % self.validators.len()] } - fn commit(&mut self, _round: Round) {} + fn update_reputation( + &mut self, + _round: Round, + _author: &Author, + _parents: Vec, + _failed_authors: Vec, + ) { + } } diff --git a/consensus/src/dag/bootstrap.rs b/consensus/src/dag/bootstrap.rs index a28c613e24c0c..0199dff924d3f 100644 --- a/consensus/src/dag/bootstrap.rs +++ b/consensus/src/dag/bootstrap.rs @@ -1,133 +1,306 @@ // Copyright © Aptos Foundation use super::{ + adapter::{OrderedNotifier, OrderedNotifierAdapter}, anchor_election::RoundRobinAnchorElection, dag_driver::DagDriver, - dag_fetcher::{DagFetcherService, FetchRequestHandler}, + dag_fetcher::{DagFetcher, DagFetcherService, FetchRequestHandler}, dag_handler::NetworkHandler, dag_network::TDAGNetworkSender, - dag_state_sync::DAG_WINDOW, + dag_state_sync::{DagStateSynchronizer, StateSyncTrigger, DAG_WINDOW}, dag_store::Dag, order_rule::OrderRule, rb_handler::NodeBroadcastHandler, storage::DAGStorage, types::DAGMessage, + ProofNotifier, }; use crate::{ - dag::adapter::NotifierAdapter, experimental::buffer_manager::OrderedBlocks, - network::IncomingDAGRequest, state_replication::PayloadClient, + dag::dag_state_sync::StateSyncStatus, + experimental::buffer_manager::OrderedBlocks, + network::IncomingDAGRequest, + state_replication::{PayloadClient, StateComputer}, +}; +use aptos_channels::{ + aptos_channel::{self, Receiver}, + message_queues::QueueStyle, }; -use aptos_channels::{aptos_channel, message_queues::QueueStyle}; use aptos_consensus_types::common::Author; +use aptos_crypto::HashValue; use aptos_infallible::RwLock; +use aptos_logger::error; use aptos_reliable_broadcast::{RBNetworkSender, ReliableBroadcast}; use aptos_types::{ - epoch_state::EpochState, ledger_info::LedgerInfo, validator_signer::ValidatorSigner, + aggregate_signature::AggregateSignature, + block_info::BlockInfo, + epoch_state::EpochState, + ledger_info::{LedgerInfo, LedgerInfoWithSignatures}, + validator_signer::ValidatorSigner, +}; +use futures_channel::{ + mpsc::{UnboundedReceiver, UnboundedSender}, + oneshot, }; -use futures::stream::{AbortHandle, Abortable}; use std::{sync::Arc, time::Duration}; +use tokio::{select, task::JoinHandle}; use tokio_retry::strategy::ExponentialBackoff; -pub fn bootstrap_dag( +struct DagBootstrapper { self_peer: Author, - signer: ValidatorSigner, + signer: Arc, epoch_state: Arc, - latest_ledger_info: LedgerInfo, storage: Arc, rb_network_sender: Arc>, dag_network_sender: Arc, + proof_notifier: Arc, time_service: aptos_time_service::TimeService, payload_client: Arc, -) -> ( - AbortHandle, - AbortHandle, - aptos_channel::Sender, - futures_channel::mpsc::UnboundedReceiver, -) { - let validators = epoch_state.verifier.get_ordered_account_addresses(); - let current_round = latest_ledger_info.round(); + state_computer: Arc, +} - let (ordered_nodes_tx, ordered_nodes_rx) = futures_channel::mpsc::unbounded(); - let adapter = Box::new(NotifierAdapter::new(ordered_nodes_tx, storage.clone())); - let (dag_rpc_tx, dag_rpc_rx) = aptos_channel::new(QueueStyle::FIFO, 64, None); +impl DagBootstrapper { + fn new( + self_peer: Author, + signer: Arc, + epoch_state: Arc, + storage: Arc, + rb_network_sender: Arc>, + dag_network_sender: Arc, + proof_notifier: Arc, + time_service: aptos_time_service::TimeService, + payload_client: Arc, + state_computer: Arc, + ) -> Self { + Self { + self_peer, + signer, + epoch_state, + storage, + rb_network_sender, + dag_network_sender, + proof_notifier, + time_service, + payload_client, + state_computer, + } + } - // A backoff policy that starts at 100ms and doubles each iteration. - let rb_backoff_policy = ExponentialBackoff::from_millis(2).factor(50); - let rb = Arc::new(ReliableBroadcast::new( - validators.clone(), - rb_network_sender, - rb_backoff_policy, - time_service.clone(), - // TODO: add to config - Duration::from_millis(500), - )); + fn bootstrap_dag_store( + &self, + latest_ledger_info: LedgerInfo, + notifier: Arc, + ) -> (Arc>, OrderRule) { + let dag = Arc::new(RwLock::new(Dag::new( + self.epoch_state.clone(), + self.storage.clone(), + latest_ledger_info.round(), + DAG_WINDOW, + ))); - let dag = Arc::new(RwLock::new(Dag::new( - epoch_state.clone(), - storage.clone(), - current_round, - DAG_WINDOW, - ))); + let validators = self.epoch_state.verifier.get_ordered_account_addresses(); + let anchor_election = Box::new(RoundRobinAnchorElection::new(validators)); - let anchor_election = Box::new(RoundRobinAnchorElection::new(validators)); - let order_rule = OrderRule::new( - epoch_state.clone(), - latest_ledger_info, - dag.clone(), - anchor_election, - adapter, - storage.clone(), - ); + let order_rule = OrderRule::new( + self.epoch_state.clone(), + latest_ledger_info, + dag.clone(), + anchor_election, + notifier, + self.storage.clone(), + ); - let (dag_fetcher, fetch_requester, node_fetch_waiter, certified_node_fetch_waiter) = - DagFetcherService::new( - epoch_state.clone(), - dag_network_sender, + (dag, order_rule) + } + + fn bootstrap_components( + &self, + dag: Arc>, + order_rule: OrderRule, + state_sync_trigger: StateSyncTrigger, + ) -> (NetworkHandler, DagFetcherService) { + let validators = self.epoch_state.verifier.get_ordered_account_addresses(); + + // A backoff policy that starts at 100ms and doubles each iteration. + let rb_backoff_policy = ExponentialBackoff::from_millis(2).factor(50); + let rb = Arc::new(ReliableBroadcast::new( + validators.clone(), + self.rb_network_sender.clone(), + rb_backoff_policy, + self.time_service.clone(), + // TODO: add to config + Duration::from_millis(500), + )); + + let (dag_fetcher, fetch_requester, node_fetch_waiter, certified_node_fetch_waiter) = + DagFetcherService::new( + self.epoch_state.clone(), + self.dag_network_sender.clone(), + dag.clone(), + self.time_service.clone(), + ); + let fetch_requester = Arc::new(fetch_requester); + + let dag_driver = DagDriver::new( + self.self_peer, + self.epoch_state.clone(), + dag.clone(), + self.payload_client.clone(), + rb, + self.time_service.clone(), + self.storage.clone(), + order_rule, + fetch_requester.clone(), + ); + let rb_handler = NodeBroadcastHandler::new( dag.clone(), - time_service.clone(), + self.signer.clone(), + self.epoch_state.clone(), + self.storage.clone(), + fetch_requester, ); - let fetch_requester = Arc::new(fetch_requester); + let fetch_handler = FetchRequestHandler::new(dag, self.epoch_state.clone()); + + let dag_handler = NetworkHandler::new( + self.epoch_state.clone(), + rb_handler, + dag_driver, + fetch_handler, + node_fetch_waiter, + certified_node_fetch_waiter, + state_sync_trigger, + ); + + (dag_handler, dag_fetcher) + } + + async fn bootstrapper( + self, + mut dag_rpc_rx: Receiver, + ordered_nodes_tx: UnboundedSender, + mut shutdown_rx: oneshot::Receiver<()>, + ) { + let sync_manager = DagStateSynchronizer::new( + self.epoch_state.clone(), + self.time_service.clone(), + self.state_computer.clone(), + self.storage.clone(), + ); + + // TODO: fetch the correct block info + let ledger_info = LedgerInfoWithSignatures::new( + LedgerInfo::new(BlockInfo::empty(), HashValue::zero()), + AggregateSignature::empty(), + ); + + loop { + let adapter = Arc::new(OrderedNotifierAdapter::new( + ordered_nodes_tx.clone(), + self.storage.clone(), + self.epoch_state.clone(), + )); + + let (dag_store, order_rule) = + self.bootstrap_dag_store(ledger_info.ledger_info().clone(), adapter.clone()); - let dag_driver = DagDriver::new( + let state_sync_trigger = StateSyncTrigger::new( + self.epoch_state.clone(), + dag_store.clone(), + self.proof_notifier.clone(), + ); + + let (handler, fetch_service) = + self.bootstrap_components(dag_store.clone(), order_rule, state_sync_trigger); + + let df_handle = tokio::spawn(fetch_service.start()); + + // poll the network handler while waiting for rebootstrap notification or shutdown notification + select! { + biased; + _ = &mut shutdown_rx => { + df_handle.abort(); + let _ = df_handle.await; + return; + }, + sync_status = handler.run(&mut dag_rpc_rx) => { + df_handle.abort(); + let _ = df_handle.await; + + match sync_status { + StateSyncStatus::NeedsSync(certified_node_msg) => { + let dag_fetcher = DagFetcher::new(self.epoch_state.clone(), self.dag_network_sender.clone(), self.time_service.clone()); + + if let Err(e) = sync_manager.sync_dag_to(&certified_node_msg, dag_fetcher, dag_store.clone()).await { + error!(error = ?e, "unable to sync"); + } + }, + StateSyncStatus::EpochEnds => { + // Wait for epoch manager to signal shutdown + _ = shutdown_rx.await; + return; + }, + _ => unreachable!() + } + + + } + } + } + } +} + +pub(super) fn bootstrap_dag_for_test( + self_peer: Author, + signer: ValidatorSigner, + epoch_state: Arc, + latest_ledger_info: LedgerInfo, + storage: Arc, + rb_network_sender: Arc>, + dag_network_sender: Arc, + proof_notifier: Arc, + time_service: aptos_time_service::TimeService, + payload_client: Arc, + state_computer: Arc, +) -> ( + JoinHandle, + JoinHandle<()>, + aptos_channel::Sender, + UnboundedReceiver, +) { + let bootstraper = DagBootstrapper::new( self_peer, + signer.into(), epoch_state.clone(), - dag.clone(), - payload_client, - rb, - time_service, storage.clone(), - order_rule, - fetch_requester.clone(), + rb_network_sender, + dag_network_sender, + proof_notifier.clone(), + time_service, + payload_client, + state_computer, ); - let rb_handler = NodeBroadcastHandler::new( - dag.clone(), - signer, - epoch_state.clone(), + + let (ordered_nodes_tx, ordered_nodes_rx) = futures_channel::mpsc::unbounded(); + let adapter = Arc::new(OrderedNotifierAdapter::new( + ordered_nodes_tx, storage.clone(), - fetch_requester, - ); - let fetch_handler = FetchRequestHandler::new(dag, epoch_state.clone()); - - let dag_handler = NetworkHandler::new( - epoch_state, - dag_rpc_rx, - rb_handler, - dag_driver, - fetch_handler, - node_fetch_waiter, - certified_node_fetch_waiter, - ); + epoch_state.clone(), + )); + let (dag_rpc_tx, dag_rpc_rx) = aptos_channel::new(QueueStyle::FIFO, 64, None); + + let (dag_store, order_rule) = + bootstraper.bootstrap_dag_store(latest_ledger_info, adapter.clone()); + + let state_sync_trigger = + StateSyncTrigger::new(epoch_state, dag_store.clone(), proof_notifier.clone()); - let (nh_abort_handle, nh_abort_registration) = AbortHandle::new_pair(); - let (df_abort_handle, df_abort_registration) = AbortHandle::new_pair(); + let (handler, fetch_service) = + bootstraper.bootstrap_components(dag_store.clone(), order_rule, state_sync_trigger); - tokio::spawn(Abortable::new(dag_handler.start(), nh_abort_registration)); - tokio::spawn(Abortable::new(dag_fetcher.start(), df_abort_registration)); + let dh_handle = tokio::spawn(async move { + let mut dag_rpc_rx = dag_rpc_rx; + handler.run(&mut dag_rpc_rx).await + }); + let df_handle = tokio::spawn(fetch_service.start()); - ( - nh_abort_handle, - df_abort_handle, - dag_rpc_tx, - ordered_nodes_rx, - ) + (dh_handle, df_handle, dag_rpc_tx, ordered_nodes_rx) } diff --git a/consensus/src/dag/commit_signer.rs b/consensus/src/dag/commit_signer.rs new file mode 100644 index 0000000000000..54e978e5e41fa --- /dev/null +++ b/consensus/src/dag/commit_signer.rs @@ -0,0 +1,30 @@ +// Copyright © Aptos Foundation + +use crate::experimental::signing_phase::CommitSignerProvider; +use aptos_crypto::bls12381; +use aptos_types::validator_signer::ValidatorSigner; + +pub struct DagCommitSigner { + signer: ValidatorSigner, +} + +impl DagCommitSigner { + pub fn new(signer: ValidatorSigner) -> Self { + Self { signer } + } +} + +impl CommitSignerProvider for DagCommitSigner { + fn sign_commit_vote( + &self, + _ledger_info: aptos_types::ledger_info::LedgerInfoWithSignatures, + new_ledger_info: aptos_types::ledger_info::LedgerInfo, + ) -> Result { + let signature = self + .signer + .sign(&new_ledger_info) + .map_err(|err| aptos_safety_rules::Error::SerializationError(err.to_string()))?; + + Ok(signature) + } +} diff --git a/consensus/src/dag/dag_driver.rs b/consensus/src/dag/dag_driver.rs index 3e79d444b6183..fd1cd07dc3709 100644 --- a/consensus/src/dag/dag_driver.rs +++ b/consensus/src/dag/dag_driver.rs @@ -5,29 +5,32 @@ use super::{ dag_fetcher::FetchRequester, order_rule::OrderRule, storage::DAGStorage, - types::{CertifiedAck, DAGMessage, Extensions}, + types::{CertifiedAck, CertifiedNodeMessage, DAGMessage, Extensions}, RpcHandler, }; use crate::{ dag::{ dag_fetcher::TFetchRequester, + dag_state_sync::DAG_WINDOW, dag_store::Dag, types::{CertificateAckState, CertifiedNode, Node, NodeCertificate, SignatureBuilder}, }, state_replication::PayloadClient, }; -use anyhow::{bail, Ok}; -use aptos_consensus_types::common::{Author, Payload}; +use anyhow::bail; +use aptos_consensus_types::common::{Author, PayloadFilter}; use aptos_infallible::RwLock; -use aptos_logger::error; +use aptos_logger::{debug, error}; use aptos_reliable_broadcast::ReliableBroadcast; use aptos_time_service::{TimeService, TimeServiceTrait}; use aptos_types::{block_info::Round, epoch_state::EpochState}; +use async_trait::async_trait; use futures::{ + executor::block_on, future::{AbortHandle, Abortable}, FutureExt, }; -use std::sync::Arc; +use std::{sync::Arc, time::Duration}; use thiserror::Error as ThisError; use tokio_retry::strategy::ExponentialBackoff; @@ -71,6 +74,12 @@ impl DagDriver { .read() .get_strong_links_for_round(highest_round, &epoch_state.verifier) .map_or_else(|| highest_round.saturating_sub(1), |_| highest_round); + + debug!( + "highest_round: {}, current_round: {}", + highest_round, current_round + ); + let mut driver = Self { author, epoch_state, @@ -96,37 +105,79 @@ impl DagDriver { .read() .get_strong_links_for_round(current_round, &driver.epoch_state.verifier) .unwrap_or(vec![]); - driver.enter_new_round(current_round + 1, strong_links); + block_on(driver.enter_new_round(current_round + 1, strong_links)); } driver } - pub fn add_node(&mut self, node: CertifiedNode) -> anyhow::Result<()> { - let mut dag_writer = self.dag.write(); - let round = node.metadata().round(); + pub async fn add_node(&mut self, node: CertifiedNode) -> anyhow::Result<()> { + let maybe_strong_links = { + let mut dag_writer = self.dag.write(); + let round = node.metadata().round(); - if !dag_writer.all_exists(node.parents_metadata()) { - if let Err(err) = self.fetch_requester.request_for_certified_node(node) { - error!("request to fetch failed: {}", err); + if !dag_writer.all_exists(node.parents_metadata()) { + if let Err(err) = self.fetch_requester.request_for_certified_node(node) { + error!("request to fetch failed: {}", err); + } + bail!(DagDriverError::MissingParents); } - bail!(DagDriverError::MissingParents); - } - dag_writer.add_node(node)?; - if self.current_round == round { - let maybe_strong_links = dag_writer - .get_strong_links_for_round(self.current_round, &self.epoch_state.verifier); - drop(dag_writer); - if let Some(strong_links) = maybe_strong_links { - self.enter_new_round(self.current_round + 1, strong_links); + dag_writer.add_node(node)?; + if self.current_round == round { + dag_writer + .get_strong_links_for_round(self.current_round, &self.epoch_state.verifier) + } else { + None } + }; + + if let Some(strong_links) = maybe_strong_links { + self.enter_new_round(self.current_round + 1, strong_links) + .await; } Ok(()) } - pub fn enter_new_round(&mut self, new_round: Round, strong_links: Vec) { - // TODO: support pulling payload - let payload = Payload::empty(false); + pub async fn enter_new_round(&mut self, new_round: Round, strong_links: Vec) { + debug!("entering new round {}", new_round); + let payload_filter = { + let dag_reader = self.dag.read(); + let highest_commit_round = dag_reader.highest_committed_anchor_round(); + if strong_links.is_empty() { + PayloadFilter::Empty + } else { + PayloadFilter::from( + &dag_reader + .reachable( + strong_links.iter().map(|node| node.metadata()), + Some(highest_commit_round.saturating_sub(DAG_WINDOW as u64)), + |_| true, + ) + .map(|node_status| node_status.as_node().payload()) + .collect(), + ) + } + }; + let payload = match self + .payload_client + .pull_payload( + Duration::from_secs(1), + 100, + 1000, + payload_filter, + Box::pin(async {}), + false, + 0, + 0.0, + ) + .await + { + Ok(payload) => payload, + Err(e) => { + error!("error pulling payload: {}", e); + return; + }, + }; // TODO: need to wait to pass median of parents timestamp let timestamp = self.time_service.now_unix_time(); self.current_round = new_round; @@ -151,12 +202,18 @@ impl DagDriver { let signature_builder = SignatureBuilder::new(node.metadata().clone(), self.epoch_state.clone()); let cert_ack_set = CertificateAckState::new(self.epoch_state.verifier.len()); + let latest_ledger_info = self + .storage + .get_latest_ledger_info() + .expect("latest ledger info must exist"); let task = self .reliable_broadcast .broadcast(node.clone(), signature_builder) .then(move |certificate| { let certified_node = CertifiedNode::new(node, certificate.signatures().to_owned()); - rb.broadcast(certified_node, cert_ack_set) + let certified_node_msg = + CertifiedNodeMessage::new(certified_node, latest_ledger_info); + rb.broadcast(certified_node_msg, cert_ack_set) }); tokio::spawn(Abortable::new(task, abort_registration)); if let Some(prev_handle) = self.rb_abort_handle.replace(abort_handle) { @@ -165,11 +222,12 @@ impl DagDriver { } } +#[async_trait] impl RpcHandler for DagDriver { type Request = CertifiedNode; type Response = CertifiedAck; - fn process(&mut self, node: Self::Request) -> anyhow::Result { + async fn process(&mut self, node: Self::Request) -> anyhow::Result { let epoch = node.metadata().epoch(); { let dag_reader = self.dag.read(); @@ -180,6 +238,7 @@ impl RpcHandler for DagDriver { let node_metadata = node.metadata().clone(); self.add_node(node) + .await .map(|_| self.order_rule.process_new_node(&node_metadata))?; Ok(CertifiedAck::new(epoch)) diff --git a/consensus/src/dag/dag_fetcher.rs b/consensus/src/dag/dag_fetcher.rs index 2e496ff60703d..30c9bdff407ae 100644 --- a/consensus/src/dag/dag_fetcher.rs +++ b/consensus/src/dag/dag_fetcher.rs @@ -273,7 +273,7 @@ impl TDagFetcher for DagFetcher { } } - if dag.read().all_exists(remote_request.targets().iter()) { + if dag.read().all_exists(remote_request.targets()) { return Ok(()); } } @@ -303,11 +303,12 @@ impl FetchRequestHandler { } } +#[async_trait] impl RpcHandler for FetchRequestHandler { type Request = RemoteFetchRequest; type Response = FetchResponse; - fn process(&mut self, message: Self::Request) -> anyhow::Result { + async fn process(&mut self, message: Self::Request) -> anyhow::Result { let dag_reader = self.dag.read(); // `Certified Node`: In the good case, there should exist at least one honest validator that @@ -316,7 +317,7 @@ impl RpcHandler for FetchRequestHandler { // `Node`: In the good case, the sender of the Node should have the parents in its local DAG // to satisfy this request. ensure!( - dag_reader.all_exists(message.targets().iter()), + dag_reader.all_exists(message.targets()), FetchRequestHandleError::TargetsMissing ); diff --git a/consensus/src/dag/dag_handler.rs b/consensus/src/dag/dag_handler.rs index 62e83ea056e99..0120409412106 100644 --- a/consensus/src/dag/dag_handler.rs +++ b/consensus/src/dag/dag_handler.rs @@ -3,6 +3,7 @@ use super::{ dag_driver::DagDriver, dag_fetcher::{FetchRequestHandler, FetchWaiter}, + dag_state_sync::{StateSyncStatus, StateSyncTrigger}, types::TDAGMessage, CertifiedNode, Node, }; @@ -13,7 +14,7 @@ use crate::{ use anyhow::bail; use aptos_channels::aptos_channel; use aptos_consensus_types::common::Author; -use aptos_logger::{error, warn}; +use aptos_logger::{debug, warn}; use aptos_network::protocols::network::RpcError; use aptos_types::epoch_state::EpochState; use bytes::Bytes; @@ -23,59 +24,95 @@ use tokio::select; pub(crate) struct NetworkHandler { epoch_state: Arc, - dag_rpc_rx: aptos_channel::Receiver, node_receiver: NodeBroadcastHandler, dag_driver: DagDriver, fetch_receiver: FetchRequestHandler, node_fetch_waiter: FetchWaiter, certified_node_fetch_waiter: FetchWaiter, + state_sync_trigger: StateSyncTrigger, } impl NetworkHandler { - pub fn new( + pub(super) fn new( epoch_state: Arc, - dag_rpc_rx: aptos_channel::Receiver, node_receiver: NodeBroadcastHandler, dag_driver: DagDriver, fetch_receiver: FetchRequestHandler, node_fetch_waiter: FetchWaiter, certified_node_fetch_waiter: FetchWaiter, + state_sync_trigger: StateSyncTrigger, ) -> Self { Self { epoch_state, - dag_rpc_rx, node_receiver, dag_driver, fetch_receiver, node_fetch_waiter, certified_node_fetch_waiter, + state_sync_trigger, } } - pub async fn start(mut self) { + pub async fn run( + mut self, + dag_rpc_rx: &mut aptos_channel::Receiver, + ) -> StateSyncStatus { // TODO(ibalajiarun): clean up Reliable Broadcast storage periodically. loop { select! { - Some(msg) = self.dag_rpc_rx.next() => { - if let Err(e) = self.process_rpc(msg).await { - warn!(error = ?e, "error processing rpc"); + Some(msg) = dag_rpc_rx.next() => { + match self.process_rpc(msg).await { + Ok(sync_status) => { + if matches!(sync_status, StateSyncStatus::NeedsSync(_) | StateSyncStatus::EpochEnds) { + return sync_status; + } + }, + Err(e) => { + warn!(error = ?e, "error processing rpc"); + } } }, Some(res) = self.node_fetch_waiter.next() => { - if let Err(e) = res.map_err(|e| anyhow::anyhow!("recv error: {}", e)).and_then(|node| self.node_receiver.process(node)) { - warn!(error = ?e, "error processing node fetch notification"); - } + match res { + Ok(node) => if let Err(e) = self.node_receiver.process(node).await { + warn!(error = ?e, "error processing node fetch notification"); + }, + Err(e) => { + debug!("sender dropped channel: {}", e); + }, + }; }, Some(res) = self.certified_node_fetch_waiter.next() => { - if let Err(e) = res.map_err(|e| anyhow::anyhow!("recv error: {}", e)).and_then(|certified_node| self.dag_driver.process(certified_node)) { - warn!(error = ?e, "error processing certified node fetch notification"); - } + match res { + Ok(certified_node) => if let Err(e) = self.dag_driver.process(certified_node).await { + warn!(error = ?e, "error processing certified node fetch notification"); }, + Err(e) => { + debug!("sender dropped channel: {}", e); + }, + }; } } } } - async fn process_rpc(&mut self, rpc_request: IncomingDAGRequest) -> anyhow::Result<()> { + fn verify_incoming_rpc(&self, dag_message: &DAGMessage) -> Result<(), anyhow::Error> { + match dag_message { + DAGMessage::NodeMsg(node) => node.verify(&self.epoch_state.verifier), + DAGMessage::CertifiedNodeMsg(certified_node) => { + certified_node.verify(&self.epoch_state.verifier) + }, + DAGMessage::FetchRequest(request) => request.verify(&self.epoch_state.verifier), + _ => Err(anyhow::anyhow!( + "unexpected rpc message{:?}", + std::mem::discriminant(dag_message) + )), + } + } + + async fn process_rpc( + &mut self, + rpc_request: IncomingDAGRequest, + ) -> anyhow::Result { let dag_message: DAGMessage = rpc_request.req.try_into()?; let author = dag_message @@ -85,23 +122,32 @@ impl NetworkHandler { bail!("message author and network author mismatch"); } - let response: anyhow::Result = match dag_message { - DAGMessage::NodeMsg(node) => node - .verify(&self.epoch_state.verifier) - .and_then(|_| self.node_receiver.process(node)) - .map(|r| r.into()), - DAGMessage::CertifiedNodeMsg(node) => node - .verify(&self.epoch_state.verifier) - .and_then(|_| self.dag_driver.process(node)) - .map(|r| r.into()), - DAGMessage::FetchRequest(request) => request - .verify(&self.epoch_state.verifier) - .and_then(|_| self.fetch_receiver.process(request)) - .map(|r| r.into()), - _ => { - error!("unknown rpc message {:?}", dag_message); - Err(anyhow::anyhow!("unknown rpc message")) - }, + let response: anyhow::Result = { + let verification_result = self.verify_incoming_rpc(&dag_message); + match verification_result { + Ok(_) => match dag_message { + DAGMessage::NodeMsg(node) => { + self.node_receiver.process(node).await.map(|r| r.into()) + }, + DAGMessage::CertifiedNodeMsg(certified_node_msg) => { + match self.state_sync_trigger.check(certified_node_msg).await? { + StateSyncStatus::Synced(Some(certified_node_msg)) => self + .dag_driver + .process(certified_node_msg.certified_node()) + .await + .map(|r| r.into()), + status @ (StateSyncStatus::NeedsSync(_) + | StateSyncStatus::EpochEnds) => return Ok(status), + _ => unreachable!(), + } + }, + DAGMessage::FetchRequest(request) => { + self.fetch_receiver.process(request).await.map(|r| r.into()) + }, + _ => unreachable!("verification must catch this error"), + }, + Err(err) => Err(err), + } }; let response = response @@ -117,5 +163,6 @@ impl NetworkHandler { .response_sender .send(response) .map_err(|_| anyhow::anyhow!("unable to respond to rpc")) + .map(|_| StateSyncStatus::Synced(None)) } } diff --git a/consensus/src/dag/dag_network.rs b/consensus/src/dag/dag_network.rs index b56511d961d73..ddab80237f3d2 100644 --- a/consensus/src/dag/dag_network.rs +++ b/consensus/src/dag/dag_network.rs @@ -17,11 +17,12 @@ use std::{ time::Duration, }; +#[async_trait] pub trait RpcHandler { type Request; type Response; - fn process(&mut self, message: Self::Request) -> anyhow::Result; + async fn process(&mut self, message: Self::Request) -> anyhow::Result; } #[async_trait] @@ -36,7 +37,7 @@ pub trait TDAGNetworkSender: Send + Sync + RBNetworkSender { /// Given a list of potential responders, sending rpc to get response from any of them and could /// fallback to more in case of failures. async fn send_rpc_with_fallbacks( - &self, + self: Arc, responders: Vec, message: DAGMessage, retry_interval: Duration, diff --git a/consensus/src/dag/dag_state_sync.rs b/consensus/src/dag/dag_state_sync.rs index 82b270291ca60..04e1027549bde 100644 --- a/consensus/src/dag/dag_state_sync.rs +++ b/consensus/src/dag/dag_state_sync.rs @@ -1,14 +1,14 @@ // Copyright © Aptos Foundation use super::{ - adapter::Notifier, - dag_fetcher::{DagFetcher, TDagFetcher}, + dag_fetcher::TDagFetcher, dag_store::Dag, storage::DAGStorage, types::{CertifiedNodeMessage, RemoteFetchRequest}, - TDAGNetworkSender, + ProofNotifier, }; use crate::state_replication::StateComputer; +use anyhow::ensure; use aptos_consensus_types::common::Round; use aptos_infallible::RwLock; use aptos_logger::error; @@ -24,66 +24,99 @@ use std::sync::Arc; pub const DAG_WINDOW: usize = 1; pub const STATE_SYNC_WINDOW_MULTIPLIER: usize = 30; -pub(super) struct StateSyncManager { +pub enum StateSyncStatus { + NeedsSync(CertifiedNodeMessage), + Synced(Option), + EpochEnds, +} + +pub(super) struct StateSyncTrigger { epoch_state: Arc, - network: Arc, - notifier: Arc, - time_service: TimeService, - state_computer: Arc, - storage: Arc, dag_store: Arc>, + proof_notifier: Arc, } -impl StateSyncManager { - pub fn new( +impl StateSyncTrigger { + pub(super) fn new( epoch_state: Arc, - network: Arc, - notifier: Arc, - time_service: TimeService, - state_computer: Arc, - storage: Arc, dag_store: Arc>, + proof_notifier: Arc, ) -> Self { Self { epoch_state, - network, - notifier, - time_service, - state_computer, - storage, dag_store, + proof_notifier, + } + } + + fn verify_ledger_info(&self, ledger_info: &LedgerInfoWithSignatures) -> anyhow::Result<()> { + ensure!(ledger_info.commit_info().epoch() == self.epoch_state.epoch); + + if ledger_info.commit_info().round() > 0 { + ledger_info + .verify_signatures(&self.epoch_state.verifier) + .map_err(|e| anyhow::anyhow!("unable to verify ledger info: {}", e))?; } + + Ok(()) } - pub async fn sync_to( + /// This method checks if a state sync is required, and if so, + /// notifies the bootstraper, to let the bootstraper can abort this task. + pub(super) async fn check( &self, - node: &CertifiedNodeMessage, - ) -> anyhow::Result>>> { - self.sync_to_highest_commit_cert(node.ledger_info()).await; - self.try_sync_to_highest_ordered_anchor(node).await + node: CertifiedNodeMessage, + ) -> anyhow::Result { + let ledger_info_with_sigs = node.ledger_info(); + + if !self.need_sync_for_ledger_info(ledger_info_with_sigs) { + return Ok(StateSyncStatus::Synced(Some(node))); + } + + // Only verify the certificate if we need to sync + self.verify_ledger_info(ledger_info_with_sigs)?; + + self.notify_commit_proof(ledger_info_with_sigs).await; + + if ledger_info_with_sigs.ledger_info().ends_epoch() { + self.proof_notifier + .send_epoch_change(EpochChangeProof::new( + vec![ledger_info_with_sigs.clone()], + /* more = */ false, + )) + .await; + return Ok(StateSyncStatus::EpochEnds); + } + + Ok(StateSyncStatus::NeedsSync(node)) } /// Fast forward in the decoupled-execution pipeline if the block exists there - pub async fn sync_to_highest_commit_cert(&self, ledger_info: &LedgerInfoWithSignatures) { - let send_commit_proof = { - let dag_reader = self.dag_store.read(); - dag_reader.highest_committed_anchor_round() < ledger_info.commit_info().round() - && dag_reader - .highest_ordered_anchor_round() - .unwrap_or_default() - >= ledger_info.commit_info().round() - }; - + async fn notify_commit_proof(&self, ledger_info: &LedgerInfoWithSignatures) { // if the anchor exists between ledger info round and highest ordered round // Note: ledger info round <= highest ordered round - if send_commit_proof { - self.notifier.send_commit_proof(ledger_info.clone()).await + if self.dag_store.read().highest_committed_anchor_round() + < ledger_info.commit_info().round() + && self + .dag_store + .read() + .highest_ordered_anchor_round() + .unwrap_or_default() + >= ledger_info.commit_info().round() + { + self.proof_notifier + .send_commit_proof(ledger_info.clone()) + .await } } /// Check if we're far away from this ledger info and need to sync. /// This ensures that the block referred by the ledger info is not in buffer manager. - pub fn need_sync_for_ledger_info(&self, li: &LedgerInfoWithSignatures) -> bool { + fn need_sync_for_ledger_info(&self, li: &LedgerInfoWithSignatures) -> bool { + if li.commit_info().round() <= self.dag_store.read().highest_committed_anchor_round() { + return false; + } + let dag_reader = self.dag_store.read(); // check whether if DAG order round is behind the given ledger info round // (meaning consensus is behind) or @@ -97,43 +130,50 @@ impl StateSyncManager { + ((STATE_SYNC_WINDOW_MULTIPLIER * DAG_WINDOW) as Round) < li.commit_info().round() } +} - pub async fn try_sync_to_highest_ordered_anchor( - &self, - node: &CertifiedNodeMessage, - ) -> anyhow::Result>>> { - // Check whether to actually sync - let commit_li = node.ledger_info(); - if !self.need_sync_for_ledger_info(commit_li) { - return Ok(None); - } - - let dag_fetcher = Arc::new(DagFetcher::new( - self.epoch_state.clone(), - self.network.clone(), - self.time_service.clone(), - )); +pub(super) struct DagStateSynchronizer { + epoch_state: Arc, + time_service: TimeService, + state_computer: Arc, + storage: Arc, +} - self.sync_to_highest_ordered_anchor(node, dag_fetcher).await +impl DagStateSynchronizer { + pub fn new( + epoch_state: Arc, + time_service: TimeService, + state_computer: Arc, + storage: Arc, + ) -> Self { + Self { + epoch_state, + time_service, + state_computer, + storage, + } } /// Note: Assumes that the sync checks have been done - pub async fn sync_to_highest_ordered_anchor( + pub async fn sync_dag_to( &self, node: &CertifiedNodeMessage, - dag_fetcher: Arc, - ) -> anyhow::Result>>> { + dag_fetcher: impl TDagFetcher, + current_dag_store: Arc>, + ) -> anyhow::Result> { let commit_li = node.ledger_info(); - if commit_li.ledger_info().ends_epoch() { - self.notifier - .send_epoch_change(EpochChangeProof::new( - vec![commit_li.clone()], - /* more = */ false, - )) - .await; - // TODO: make sure to terminate DAG and yield to epoch manager - return Ok(None); + { + let dag_reader = current_dag_store.read(); + assert!( + dag_reader + .highest_ordered_anchor_round() + .unwrap_or_default() + < commit_li.commit_info().round() + || dag_reader.highest_committed_anchor_round() + + ((STATE_SYNC_WINDOW_MULTIPLIER * DAG_WINDOW) as Round) + < commit_li.commit_info().round() + ); } // TODO: there is a case where DAG fetches missing nodes in window and a crash happens and when we restart, @@ -177,8 +217,6 @@ impl StateSyncManager { // State sync self.state_computer.sync_to(commit_li.clone()).await?; - // TODO: the caller should rebootstrap the order rule - - Ok(Some(sync_dag_store)) + Ok(Arc::into_inner(sync_dag_store).map(|r| r.into_inner())) } } diff --git a/consensus/src/dag/dag_store.rs b/consensus/src/dag/dag_store.rs index 0fef37ae9b738..985d0032d462c 100644 --- a/consensus/src/dag/dag_store.rs +++ b/consensus/src/dag/dag_store.rs @@ -266,16 +266,16 @@ impl Dag { }) } - pub fn reachable( + pub fn reachable<'a>( &self, - targets: &[NodeMetadata], + targets: impl Iterator + Clone, until: Option, // TODO: replace filter with bool to filter unordered filter: impl Fn(&NodeStatus) -> bool, ) -> impl Iterator { let until = until.unwrap_or(self.lowest_round()); - let initial = targets.iter().map(|t| *t.digest()).collect(); - let initial_round = targets[0].round(); + let initial_round = targets.clone().map(|t| t.round()).max().unwrap(); + let initial = targets.map(|t| *t.digest()).collect(); let mut reachable_filter = Self::reachable_filter(initial); self.nodes_by_round diff --git a/consensus/src/dag/mod.rs b/consensus/src/dag/mod.rs index b4de8c1ca539f..17b4d7c751142 100644 --- a/consensus/src/dag/mod.rs +++ b/consensus/src/dag/mod.rs @@ -5,6 +5,7 @@ mod adapter; mod anchor_election; mod bootstrap; +mod commit_signer; mod dag_driver; mod dag_fetcher; mod dag_handler; @@ -18,5 +19,6 @@ mod storage; mod tests; mod types; +pub use adapter::ProofNotifier; pub use dag_network::{RpcHandler, RpcWithFallback, TDAGNetworkSender}; pub use types::{CertifiedNode, DAGMessage, DAGNetworkMessage, Extensions, Node, NodeId, Vote}; diff --git a/consensus/src/dag/order_rule.rs b/consensus/src/dag/order_rule.rs index 52687efd80cde..f5822caa86c76 100644 --- a/consensus/src/dag/order_rule.rs +++ b/consensus/src/dag/order_rule.rs @@ -2,8 +2,9 @@ // SPDX-License-Identifier: Apache-2.0 use crate::dag::{ - adapter::Notifier, + adapter::OrderedNotifier, anchor_election::AnchorElection, + dag_state_sync::DAG_WINDOW, dag_store::{Dag, NodeStatus}, storage::DAGStorage, types::NodeMetadata, @@ -20,7 +21,7 @@ pub struct OrderRule { lowest_unordered_anchor_round: Round, dag: Arc>, anchor_election: Box, - notifier: Box, + notifier: Arc, storage: Arc, } @@ -29,55 +30,51 @@ impl OrderRule { epoch_state: Arc, latest_ledger_info: LedgerInfo, dag: Arc>, - anchor_election: Box, - notifier: Box, + mut anchor_election: Box, + notifier: Arc, storage: Arc, ) -> Self { - // TODO: we need to initialize the anchor election based on the dag - let mut anchors = storage.get_ordered_anchor_ids().unwrap(); - let mut expired = anchors.clone(); - expired.retain(|(id, _)| id.epoch() < epoch_state.epoch); - if let Err(e) = - storage.delete_ordered_anchor_ids(expired.into_iter().map(|(id, _)| id).collect()) - { - error!("Failed to delete expired anchors: {:?}", e); - } - anchors.retain(|(id, _)| id.epoch() == epoch_state.epoch); let committed_round = if latest_ledger_info.ends_epoch() { 0 } else { latest_ledger_info.round() }; + let commit_events = storage + .get_latest_k_committed_events(DAG_WINDOW as u64) + .expect("Failed to read commit events from storage"); + // make sure it's sorted + assert!(commit_events + .windows(2) + .all(|w| (w[0].epoch(), w[0].round()) < (w[1].epoch(), w[1].round()))); + for event in commit_events { + if event.epoch() == epoch_state.epoch { + let maybe_anchor = dag + .read() + .get_node_by_round_author(event.round(), event.author()) + .cloned(); + if let Some(anchor) = maybe_anchor { + dag.write() + .reachable_mut(&anchor, None) + .for_each(|node_status| node_status.mark_as_ordered()); + } + } + anchor_election.update_reputation( + event.round(), + event.author(), + event.parents(), + event.failed_authors(), + ); + } let mut order_rule = Self { epoch_state, - lowest_unordered_anchor_round: latest_ledger_info.commit_info().round() + 1, + lowest_unordered_anchor_round: committed_round + 1, dag, anchor_election, notifier, storage, }; - // Sort by round first, TODO: make the schema encode support the ordering directly - anchors.sort_by(|(a, _), (b, _)| a.round().cmp(&b.round())); - for (id, _) in anchors { - let maybe_anchor = order_rule - .dag - .read() - .get_node_by_round_author(id.round(), id.author()) - .cloned(); - if id.round() <= committed_round { - // mark already committed node - if let Some(anchor) = maybe_anchor { - order_rule - .dag - .write() - .reachable_mut(&anchor, None) - .for_each(|node_status| node_status.mark_as_ordered()); - } - } else { - // re-process pending anchors - order_rule.finalize_order(maybe_anchor.expect("Uncommitted anchor should exist")); - } - } + // re-check if anything can be ordered to recover pending anchors + order_rule.process_all(); order_rule } @@ -141,7 +138,7 @@ impl OrderRule { }; while let Some(prev_anchor) = dag_reader .reachable( - &[current_anchor.metadata().clone()], + Some(current_anchor.metadata().clone()).iter(), Some(self.lowest_unordered_anchor_round), |node_status| matches!(node_status, NodeStatus::Unordered(_)), ) @@ -157,32 +154,51 @@ impl OrderRule { /// Finalize the ordering with the given anchor node, update anchor election and construct blocks for execution. fn finalize_order(&mut self, anchor: Arc) { - let failed_authors: Vec<_> = (self.lowest_unordered_anchor_round..anchor.round()) - .step_by(2) - .map(|failed_round| (failed_round, self.anchor_election.get_anchor(failed_round))) - .collect(); + // Check we're in the expected instance assert!(Self::check_parity( self.lowest_unordered_anchor_round, anchor.round(), )); - self.lowest_unordered_anchor_round = anchor.round() + 1; + let lowest_round_to_reach = anchor.round().saturating_sub(DAG_WINDOW as u64); + + // Ceil it to the closest unordered anchor round + let lowest_anchor_round = std::cmp::max( + self.lowest_unordered_anchor_round, + lowest_round_to_reach + + !Self::check_parity(lowest_round_to_reach, anchor.round()) as u64, + ); + assert!(Self::check_parity(lowest_anchor_round, anchor.round())); + + let failed_authors: Vec<_> = (lowest_anchor_round..anchor.round()) + .step_by(2) + .map(|failed_round| (failed_round, self.anchor_election.get_anchor(failed_round))) + .collect(); + let parents = anchor + .parents() + .iter() + .map(|cert| *cert.metadata().author()) + .collect(); + self.anchor_election.update_reputation( + anchor.round(), + anchor.author(), + parents, + failed_authors.iter().map(|(_, author)| *author).collect(), + ); let mut dag_writer = self.dag.write(); let mut ordered_nodes: Vec<_> = dag_writer - .reachable_mut(&anchor, None) + .reachable_mut(&anchor, Some(lowest_round_to_reach)) .map(|node_status| { node_status.mark_as_ordered(); node_status.as_node().clone() }) .collect(); ordered_nodes.reverse(); + + self.lowest_unordered_anchor_round = anchor.round() + 1; if let Err(e) = self - .storage - .save_ordered_anchor_id(&anchor.id()) - .and_then(|_| { - self.notifier - .send_ordered_nodes(ordered_nodes, failed_authors) - }) + .notifier + .send_ordered_nodes(ordered_nodes, failed_authors) { error!("Failed to send ordered nodes {:?}", e); } diff --git a/consensus/src/dag/rb_handler.rs b/consensus/src/dag/rb_handler.rs index cf740f9db56be..4c1199bcc9b64 100644 --- a/consensus/src/dag/rb_handler.rs +++ b/consensus/src/dag/rb_handler.rs @@ -12,6 +12,7 @@ use aptos_consensus_types::common::{Author, Round}; use aptos_infallible::RwLock; use aptos_logger::error; use aptos_types::{epoch_state::EpochState, validator_signer::ValidatorSigner}; +use async_trait::async_trait; use std::{collections::BTreeMap, mem, sync::Arc}; use thiserror::Error as ThisError; @@ -28,7 +29,7 @@ pub enum NodeBroadcastHandleError { pub(crate) struct NodeBroadcastHandler { dag: Arc>, votes_by_round_peer: BTreeMap>, - signer: ValidatorSigner, + signer: Arc, epoch_state: Arc, storage: Arc, fetch_requester: Arc, @@ -37,7 +38,7 @@ pub(crate) struct NodeBroadcastHandler { impl NodeBroadcastHandler { pub fn new( dag: Arc>, - signer: ValidatorSigner, + signer: Arc, epoch_state: Arc, storage: Arc, fetch_requester: Arc, @@ -140,11 +141,12 @@ fn read_votes_from_storage( votes_by_round_peer } +#[async_trait] impl RpcHandler for NodeBroadcastHandler { type Request = Node; type Response = Vote; - fn process(&mut self, node: Self::Request) -> anyhow::Result { + async fn process(&mut self, node: Self::Request) -> anyhow::Result { let node = self.validate(node)?; let votes_by_peer = self diff --git a/consensus/src/dag/storage.rs b/consensus/src/dag/storage.rs index 211225e1585ca..30d4649564e39 100644 --- a/consensus/src/dag/storage.rs +++ b/consensus/src/dag/storage.rs @@ -3,8 +3,9 @@ use super::{types::Vote, NodeId}; use crate::dag::{CertifiedNode, Node}; -use aptos_consensus_types::common::Author; +use aptos_consensus_types::common::{Author, Round}; use aptos_crypto::HashValue; +use aptos_types::ledger_info::LedgerInfoWithSignatures; pub struct CommitEvent { node_id: NodeId, @@ -20,6 +21,26 @@ impl CommitEvent { failed_authors, } } + + pub fn epoch(&self) -> u64 { + self.node_id.epoch() + } + + pub fn round(&self) -> Round { + self.node_id.round() + } + + pub fn author(&self) -> &Author { + self.node_id.author() + } + + pub fn parents(&self) -> Vec { + self.parents.clone() + } + + pub fn failed_authors(&self) -> Vec { + self.failed_authors.clone() + } } pub trait DAGStorage: Send + Sync { @@ -41,11 +62,7 @@ pub trait DAGStorage: Send + Sync { fn delete_certified_nodes(&self, digests: Vec) -> anyhow::Result<()>; - fn save_ordered_anchor_id(&self, node_id: &NodeId) -> anyhow::Result<()>; - - fn get_ordered_anchor_ids(&self) -> anyhow::Result>; - - fn delete_ordered_anchor_ids(&self, node_ids: Vec) -> anyhow::Result<()>; - fn get_latest_k_committed_events(&self, k: u64) -> anyhow::Result>; + + fn get_latest_ledger_info(&self) -> anyhow::Result; } diff --git a/consensus/src/dag/tests/dag_driver_tests.rs b/consensus/src/dag/tests/dag_driver_tests.rs index 2e78f49878747..3a1f55a55b6cf 100644 --- a/consensus/src/dag/tests/dag_driver_tests.rs +++ b/consensus/src/dag/tests/dag_driver_tests.rs @@ -22,7 +22,9 @@ use aptos_infallible::RwLock; use aptos_reliable_broadcast::{RBNetworkSender, ReliableBroadcast}; use aptos_time_service::TimeService; use aptos_types::{ - epoch_state::EpochState, ledger_info::LedgerInfo, validator_verifier::random_validator_verifier, + epoch_state::EpochState, + ledger_info::{generate_ledger_info_with_sig, LedgerInfo}, + validator_verifier::random_validator_verifier, }; use async_trait::async_trait; use claims::{assert_ok, assert_ok_eq}; @@ -58,7 +60,7 @@ impl TDAGNetworkSender for MockNetworkSender { /// Given a list of potential responders, sending rpc to get response from any of them and could /// fallback to more in case of failures. async fn send_rpc_with_fallbacks( - &self, + self: Arc, _responders: Vec, _message: DAGMessage, _retry_interval: Duration, @@ -75,7 +77,10 @@ async fn test_certified_node_handler() { epoch: 1, verifier: validator_verifier, }); - let storage = Arc::new(MockStorage::new()); + + let mock_ledger_info = LedgerInfo::mock_genesis(None); + let mock_ledger_info = generate_ledger_info_with_sig(&signers, mock_ledger_info); + let storage = Arc::new(MockStorage::new_with_ledger_info(mock_ledger_info)); let dag = Arc::new(RwLock::new(Dag::new( epoch_state.clone(), storage.clone(), @@ -99,7 +104,7 @@ async fn test_certified_node_handler() { LedgerInfo::mock_genesis(None), dag.clone(), Box::new(RoundRobinAnchorElection::new(validators)), - Box::new(TestNotifier { tx }), + Arc::new(TestNotifier { tx }), storage.clone(), ); @@ -125,14 +130,14 @@ async fn test_certified_node_handler() { let first_round_node = new_certified_node(1, signers[0].author(), vec![]); // expect an ack for a valid message - assert_ok!(driver.process(first_round_node.clone())); + assert_ok!(driver.process(first_round_node.clone()).await); // expect an ack if the same message is sent again - assert_ok_eq!(driver.process(first_round_node), CertifiedAck::new(1)); + assert_ok_eq!(driver.process(first_round_node).await, CertifiedAck::new(1)); let parent_node = new_certified_node(1, signers[1].author(), vec![]); let invalid_node = new_certified_node(2, signers[0].author(), vec![parent_node.certificate()]); assert_eq!( - driver.process(invalid_node).unwrap_err().to_string(), + driver.process(invalid_node).await.unwrap_err().to_string(), DagDriverError::MissingParents.to_string() ); } diff --git a/consensus/src/dag/tests/dag_network_test.rs b/consensus/src/dag/tests/dag_network_test.rs index 2bf07bd8c1db9..edfd2fc76afb7 100644 --- a/consensus/src/dag/tests/dag_network_test.rs +++ b/consensus/src/dag/tests/dag_network_test.rs @@ -70,7 +70,7 @@ impl TDAGNetworkSender for MockDAGNetworkSender { } async fn send_rpc_with_fallbacks( - &self, + self: Arc, responders: Vec, message: DAGMessage, retry_interval: Duration, @@ -81,7 +81,7 @@ impl TDAGNetworkSender for MockDAGNetworkSender { message, retry_interval, rpc_timeout, - Arc::new(self.clone()), + self.clone(), self.time_service.clone(), ) } @@ -111,7 +111,7 @@ async fn test_send_rpc_with_fallback() { }; let message = TestMessage(vec![42; validators.len() - 1]); - let mut rpc = sender + let mut rpc = Arc::new(sender) .send_rpc_with_fallbacks( validators, message.into(), diff --git a/consensus/src/dag/tests/dag_state_sync_tests.rs b/consensus/src/dag/tests/dag_state_sync_tests.rs index 4644590ef78ce..37f4dea722a98 100644 --- a/consensus/src/dag/tests/dag_state_sync_tests.rs +++ b/consensus/src/dag/tests/dag_state_sync_tests.rs @@ -2,9 +2,9 @@ use crate::{ dag::{ - adapter::Notifier, + adapter::OrderedNotifier, dag_fetcher::{FetchRequestHandler, TDagFetcher}, - dag_state_sync::{StateSyncManager, DAG_WINDOW}, + dag_state_sync::{DagStateSynchronizer, DAG_WINDOW}, dag_store::Dag, storage::DAGStorage, tests::{dag_test::MockStorage, helpers::generate_dag_nodes}, @@ -21,7 +21,6 @@ use aptos_time_service::TimeService; use aptos_types::{ aggregate_signature::AggregateSignature, block_info::BlockInfo, - epoch_change::EpochChangeProof, epoch_state::EpochState, ledger_info::{LedgerInfo, LedgerInfoWithSignatures}, validator_verifier::random_validator_verifier, @@ -58,7 +57,7 @@ impl TDAGNetworkSender for MockDAGNetworkSender { /// Given a list of potential responders, sending rpc to get response from any of them and could /// fallback to more in case of failures. async fn send_rpc_with_fallbacks( - &self, + self: Arc, _responders: Vec, _message: DAGMessage, _retry_interval: Duration, @@ -83,6 +82,7 @@ impl TDagFetcher for MockDagFetcher { ) -> anyhow::Result<()> { let response = FetchRequestHandler::new(self.target_dag.clone(), self.epoch_state.clone()) .process(remote_request) + .await .unwrap(); let mut new_dag_writer = new_dag.write(); @@ -98,39 +98,21 @@ impl TDagFetcher for MockDagFetcher { struct MockNotifier {} #[async_trait] -impl Notifier for MockNotifier { +impl OrderedNotifier for MockNotifier { fn send_ordered_nodes( - &mut self, + &self, _ordered_nodes: Vec>, _failed_author: Vec<(Round, Author)>, ) -> anyhow::Result<()> { Ok(()) } - - async fn send_epoch_change(&self, _proof: EpochChangeProof) {} - - async fn send_commit_proof(&self, _ledger_info: LedgerInfoWithSignatures) {} } -fn setup( - epoch_state: Arc, - dag_store: Arc>, - storage: Arc, -) -> StateSyncManager { - let network = Arc::new(MockDAGNetworkSender {}); +fn setup(epoch_state: Arc, storage: Arc) -> DagStateSynchronizer { let time_service = TimeService::mock(); let state_computer = Arc::new(EmptyStateComputer {}); - let upstream_notifier = Arc::new(MockNotifier {}); - - StateSyncManager::new( - epoch_state, - network, - upstream_notifier, - time_service, - state_computer, - storage, - dag_store, - ) + + DagStateSynchronizer::new(epoch_state, time_service, state_computer, storage) } #[tokio::test] @@ -193,24 +175,19 @@ async fn test_dag_state_sync() { let sync_node_li = CertifiedNodeMessage::new(sync_to_node, sync_to_li); - let state_sync = setup(epoch_state.clone(), slow_dag.clone(), storage.clone()); - let dag_fetcher = Arc::new(MockDagFetcher { + let state_sync = setup(epoch_state.clone(), storage.clone()); + let dag_fetcher = MockDagFetcher { target_dag: fast_dag.clone(), epoch_state: epoch_state.clone(), - }); + }; let sync_result = state_sync - .sync_to_highest_ordered_anchor(&sync_node_li, dag_fetcher) + .sync_dag_to(&sync_node_li, dag_fetcher, slow_dag.clone()) .await; let new_dag = sync_result.unwrap().unwrap(); - let dag_reader = new_dag.read(); - - assert_eq!(dag_reader.lowest_round(), (LI_ROUNDS - DAG_WINDOW) as Round); - assert_eq!(dag_reader.highest_round(), (NUM_ROUNDS - 1) as Round); - assert_none!(dag_reader.highest_ordered_anchor_round(),); - assert_eq!( - dag_reader.highest_committed_anchor_round(), - LI_ROUNDS as Round - ); + assert_eq!(new_dag.lowest_round(), (LI_ROUNDS - DAG_WINDOW) as Round); + assert_eq!(new_dag.highest_round(), (NUM_ROUNDS - 1) as Round); + assert_none!(new_dag.highest_ordered_anchor_round(),); + assert_eq!(new_dag.highest_committed_anchor_round(), LI_ROUNDS as Round); } diff --git a/consensus/src/dag/tests/dag_test.rs b/consensus/src/dag/tests/dag_test.rs index a40cfba8de160..5cf2bd8382b03 100644 --- a/consensus/src/dag/tests/dag_test.rs +++ b/consensus/src/dag/tests/dag_test.rs @@ -12,8 +12,8 @@ use crate::dag::{ use aptos_crypto::HashValue; use aptos_infallible::Mutex; use aptos_types::{ - epoch_state::EpochState, validator_signer::ValidatorSigner, - validator_verifier::random_validator_verifier, + epoch_state::EpochState, ledger_info::LedgerInfoWithSignatures, + validator_signer::ValidatorSigner, validator_verifier::random_validator_verifier, }; use std::{collections::HashMap, sync::Arc}; @@ -21,6 +21,7 @@ pub struct MockStorage { node_data: Mutex>, vote_data: Mutex>, certified_node_data: Mutex>, + latest_ledger_info: Option, } impl MockStorage { @@ -29,6 +30,16 @@ impl MockStorage { node_data: Mutex::new(None), vote_data: Mutex::new(HashMap::new()), certified_node_data: Mutex::new(HashMap::new()), + latest_ledger_info: None, + } + } + + pub fn new_with_ledger_info(ledger_info: LedgerInfoWithSignatures) -> Self { + Self { + node_data: Mutex::new(None), + vote_data: Mutex::new(HashMap::new()), + certified_node_data: Mutex::new(HashMap::new()), + latest_ledger_info: Some(ledger_info), } } } @@ -87,20 +98,14 @@ impl DAGStorage for MockStorage { Ok(()) } - fn save_ordered_anchor_id(&self, _node_id: &NodeId) -> anyhow::Result<()> { - Ok(()) - } - - fn get_ordered_anchor_ids(&self) -> anyhow::Result> { + fn get_latest_k_committed_events(&self, _k: u64) -> anyhow::Result> { Ok(vec![]) } - fn delete_ordered_anchor_ids(&self, _node_ids: Vec) -> anyhow::Result<()> { - Ok(()) - } - - fn get_latest_k_committed_events(&self, _k: u64) -> anyhow::Result> { - Ok(vec![]) + fn get_latest_ledger_info(&self) -> anyhow::Result { + self.latest_ledger_info + .clone() + .ok_or_else(|| anyhow::anyhow!("ledger info not set")) } } diff --git a/consensus/src/dag/tests/fetcher_test.rs b/consensus/src/dag/tests/fetcher_test.rs index 28c6b9a3e08cc..8306f3db0fca9 100644 --- a/consensus/src/dag/tests/fetcher_test.rs +++ b/consensus/src/dag/tests/fetcher_test.rs @@ -14,8 +14,8 @@ use aptos_types::{epoch_state::EpochState, validator_verifier::random_validator_ use claims::assert_ok_eq; use std::sync::Arc; -#[test] -fn test_dag_fetcher_receiver() { +#[tokio::test] +async fn test_dag_fetcher_receiver() { let (signers, validator_verifier) = random_validator_verifier(4, None, false); let epoch_state = Arc::new(EpochState { epoch: 1, @@ -56,7 +56,7 @@ fn test_dag_fetcher_receiver() { DagSnapshotBitmask::new(1, vec![vec![true, false]]), ); assert_ok_eq!( - fetcher.process(request), + fetcher.process(request).await, FetchResponse::new(1, vec![first_round_nodes[1].clone()]) ); } diff --git a/consensus/src/dag/tests/integration_tests.rs b/consensus/src/dag/tests/integration_tests.rs index 1288716fa8df7..54ad393de5c3f 100644 --- a/consensus/src/dag/tests/integration_tests.rs +++ b/consensus/src/dag/tests/integration_tests.rs @@ -2,12 +2,12 @@ use super::dag_test; use crate::{ - dag::bootstrap::bootstrap_dag, + dag::{bootstrap::bootstrap_dag_for_test, dag_state_sync::StateSyncStatus}, experimental::buffer_manager::OrderedBlocks, - network::{DAGNetworkSenderImpl, IncomingDAGRequest, NetworkSender}, + network::{IncomingDAGRequest, NetworkSender}, network_interface::{ConsensusMsg, ConsensusNetworkClient, DIRECT_SEND, RPC}, network_tests::{NetworkPlayground, TwinId}, - test_utils::{consensus_runtime, MockPayloadManager, MockStorage}, + test_utils::{consensus_runtime, EmptyStateComputer, MockPayloadManager, MockStorage}, }; use aptos_channels::{aptos_channel, message_queues::QueueStyle}; use aptos_config::network_id::{NetworkId, PeerNetworkId}; @@ -26,21 +26,23 @@ use aptos_network::{ use aptos_time_service::TimeService; use aptos_types::{ epoch_state::EpochState, + ledger_info::generate_ledger_info_with_sig, validator_signer::ValidatorSigner, validator_verifier::{random_validator_verifier, ValidatorVerifier}, }; use claims::assert_gt; use futures::{ - stream::{select, AbortHandle, Select}, + stream::{select, Select}, StreamExt, }; use futures_channel::mpsc::UnboundedReceiver; use maplit::hashmap; use std::sync::Arc; +use tokio::task::JoinHandle; struct DagBootstrapUnit { - nh_abort_handle: AbortHandle, - df_abort_handle: AbortHandle, + nh_task_handle: JoinHandle, + df_task_handle: JoinHandle<()>, dag_rpc_tx: aptos_channel::Sender, network_events: Box, aptos_channels::Receiver>>>, @@ -57,33 +59,40 @@ impl DagBootstrapUnit { network_events: Box< Select, aptos_channels::Receiver>>, >, + all_signers: Vec, ) -> (Self, UnboundedReceiver) { let epoch_state = EpochState { epoch, verifier: storage.get_validator_set().into(), }; - let dag_storage = dag_test::MockStorage::new(); + let ledger_info = generate_ledger_info_with_sig(&all_signers, storage.get_ledger_info()); + let dag_storage = dag_test::MockStorage::new_with_ledger_info(ledger_info); - let network = Arc::new(DAGNetworkSenderImpl::new(Arc::new(network))); + let network = Arc::new(network); let payload_client = Arc::new(MockPayloadManager::new(None)); - let (nh_abort_handle, df_abort_handle, dag_rpc_tx, ordered_nodes_rx) = bootstrap_dag( - self_peer, - signer, - Arc::new(epoch_state), - storage.get_ledger_info(), - Arc::new(dag_storage), - network.clone(), - network.clone(), - time_service, - payload_client, - ); + let state_computer = Arc::new(EmptyStateComputer {}); + + let (nh_abort_handle, df_abort_handle, dag_rpc_tx, ordered_nodes_rx) = + bootstrap_dag_for_test( + self_peer, + signer, + Arc::new(epoch_state), + storage.get_ledger_info(), + Arc::new(dag_storage), + network.clone(), + network.clone(), + network.clone(), + time_service, + payload_client, + state_computer, + ); ( Self { - nh_abort_handle, - df_abort_handle, + nh_task_handle: nh_abort_handle, + df_task_handle: df_abort_handle, dag_rpc_tx, network_events, }, @@ -186,6 +195,7 @@ fn bootstrap_nodes( network, aptos_time_service::TimeService::real(), network_events, + signers.clone(), ) }) .unzip(); diff --git a/consensus/src/dag/tests/order_rule_tests.rs b/consensus/src/dag/tests/order_rule_tests.rs index c403820cd89ca..1ebee05057fd8 100644 --- a/consensus/src/dag/tests/order_rule_tests.rs +++ b/consensus/src/dag/tests/order_rule_tests.rs @@ -3,7 +3,7 @@ use crate::{ dag::{ - adapter::Notifier, + adapter::OrderedNotifier, anchor_election::RoundRobinAnchorElection, dag_state_sync::DAG_WINDOW, dag_store::Dag, @@ -16,10 +16,7 @@ use crate::{ }; use aptos_consensus_types::common::{Author, Round}; use aptos_infallible::{Mutex, RwLock}; -use aptos_types::{ - epoch_change::EpochChangeProof, epoch_state::EpochState, ledger_info::LedgerInfoWithSignatures, - validator_verifier::random_validator_verifier, -}; +use aptos_types::{epoch_state::EpochState, validator_verifier::random_validator_verifier}; use async_trait::async_trait; use futures_channel::mpsc::{unbounded, UnboundedReceiver, UnboundedSender}; use proptest::prelude::*; @@ -85,22 +82,14 @@ pub struct TestNotifier { } #[async_trait] -impl Notifier for TestNotifier { +impl OrderedNotifier for TestNotifier { fn send_ordered_nodes( - &mut self, + &self, ordered_nodes: Vec>, _failed_authors: Vec<(Round, Author)>, ) -> anyhow::Result<()> { Ok(self.tx.unbounded_send(ordered_nodes)?) } - - async fn send_epoch_change(&self, _proof: EpochChangeProof) { - unimplemented!() - } - - async fn send_commit_proof(&self, _ledger_info: LedgerInfoWithSignatures) { - unimplemented!() - } } fn create_order_rule( @@ -118,7 +107,7 @@ fn create_order_rule( ledger_info, dag, anchor_election, - Box::new(TestNotifier { tx }), + Arc::new(TestNotifier { tx }), Arc::new(MockStorage::new()), ), rx, @@ -256,7 +245,7 @@ fn test_order_rule_basic() { // anchor (2, 1) has 3 votes vec![(1, 2), (1, 1), (2, 1)], // anchor (3, 1) has 2 votes - vec![(1, 3), (2, 2), (2, 0), (3, 1)], + vec![(2, 2), (2, 0), (3, 1)], // anchor (4, 2) has 3 votes vec![(3, 3), (3, 2), (3, 0), (4, 2)], // anchor (5, 2) has 3 votes diff --git a/consensus/src/dag/tests/rb_handler_tests.rs b/consensus/src/dag/tests/rb_handler_tests.rs index d49503c5d1f4f..eece0f78e1e7d 100644 --- a/consensus/src/dag/tests/rb_handler_tests.rs +++ b/consensus/src/dag/tests/rb_handler_tests.rs @@ -17,6 +17,7 @@ use aptos_types::{ validator_verifier::random_validator_verifier, }; use claims::{assert_ok, assert_ok_eq}; +use futures::executor::block_on; use std::{collections::BTreeMap, sync::Arc}; struct MockFetchRequester {} @@ -38,6 +39,7 @@ async fn test_node_broadcast_receiver_succeed() { epoch: 1, verifier: validator_verifier.clone(), }); + let signers: Vec<_> = signers.into_iter().map(Arc::new).collect(); // Scenario: Start DAG from beginning let storage = Arc::new(MockStorage::new()); @@ -66,9 +68,12 @@ async fn test_node_broadcast_receiver_succeed() { wellformed_node.sign_vote(&signers[3]).unwrap(), ); // expect an ack for a valid message - assert_ok_eq!(rb_receiver.process(wellformed_node), expected_result); + assert_ok_eq!(rb_receiver.process(wellformed_node).await, expected_result); // expect the original ack for any future message from same author - assert_ok_eq!(rb_receiver.process(equivocating_node), expected_result); + assert_ok_eq!( + rb_receiver.process(equivocating_node).await, + expected_result + ); } // TODO: Unit test node broad receiver with a pruned DAG store. Possibly need a validator verifier trait. @@ -80,6 +85,7 @@ async fn test_node_broadcast_receiver_failure() { epoch: 1, verifier: validator_verifier.clone(), }); + let signers: Vec<_> = signers.into_iter().map(Arc::new).collect(); let mut rb_receivers: Vec<_> = signers .iter() @@ -104,7 +110,7 @@ async fn test_node_broadcast_receiver_failure() { // Round 1 let node = new_node(1, 10, signers[0].author(), vec![]); - let vote = rb_receivers[1].process(node.clone()).unwrap(); + let vote = rb_receivers[1].process(node.clone()).await.unwrap(); // Round 2 with invalid parent let partial_sigs = PartialSignatures::new(BTreeMap::from([( @@ -119,7 +125,7 @@ async fn test_node_broadcast_receiver_failure() { ); let node = new_node(2, 20, signers[0].author(), vec![node_cert]); assert_eq!( - rb_receivers[1].process(node).unwrap_err().to_string(), + rb_receivers[1].process(node).await.unwrap_err().to_string(), NodeBroadcastHandleError::InvalidParent.to_string(), ); @@ -133,7 +139,7 @@ async fn test_node_broadcast_receiver_failure() { .iter_mut() .zip(&signers) .for_each(|(rb_receiver, signer)| { - let sig = rb_receiver.process(node.clone()).unwrap(); + let sig = block_on(rb_receiver.process(node.clone())).unwrap(); partial_sigs.add_signature(signer.author(), sig.signature().clone()) }); NodeCertificate::new( @@ -148,18 +154,20 @@ async fn test_node_broadcast_receiver_failure() { // Add Round 2 node with proper certificates let node = new_node(2, 20, signers[0].author(), node_certificates); assert_eq!( - rb_receivers[0].process(node).unwrap_err().to_string(), + rb_receivers[0].process(node).await.unwrap_err().to_string(), NodeBroadcastHandleError::MissingParents.to_string() ); } -#[test] -fn test_node_broadcast_receiver_storage() { +#[tokio::test] +async fn test_node_broadcast_receiver_storage() { let (signers, validator_verifier) = random_validator_verifier(4, None, false); + let signers: Vec<_> = signers.into_iter().map(Arc::new).collect(); let epoch_state = Arc::new(EpochState { epoch: 1, verifier: validator_verifier, }); + let storage = Arc::new(MockStorage::new()); let dag = Arc::new(RwLock::new(Dag::new( epoch_state.clone(), @@ -177,7 +185,7 @@ fn test_node_broadcast_receiver_storage() { storage.clone(), Arc::new(MockFetchRequester {}), ); - let sig = rb_receiver.process(node).expect("must succeed"); + let sig = rb_receiver.process(node).await.expect("must succeed"); assert_ok_eq!(storage.get_votes(), vec![( NodeId::new(0, 1, signers[0].author()), diff --git a/consensus/src/dag/types.rs b/consensus/src/dag/types.rs index 56c8d190bf32b..6fd4b41c96c1a 100644 --- a/consensus/src/dag/types.rs +++ b/consensus/src/dag/types.rs @@ -399,6 +399,14 @@ impl CertifiedNode { pub fn certificate(&self) -> NodeCertificate { NodeCertificate::new(self.node.metadata.clone(), self.signatures.clone()) } + + pub fn verify(&self, verifier: &ValidatorVerifier) -> anyhow::Result<()> { + ensure!(self.digest() == self.calculate_digest(), "invalid digest"); + + verifier + .verify_multi_signatures(self.metadata(), self.certificate().signatures()) + .map_err(|e| anyhow::anyhow!("unable to verify: {}", e)) + } } impl Deref for CertifiedNode { @@ -409,16 +417,6 @@ impl Deref for CertifiedNode { } } -impl TDAGMessage for CertifiedNode { - fn verify(&self, verifier: &ValidatorVerifier) -> anyhow::Result<()> { - ensure!(self.digest() == self.calculate_digest(), "invalid digest"); - - verifier - .verify_multi_signatures(self.metadata(), self.certificate().signatures()) - .map_err(|e| anyhow::anyhow!("unable to verify: {}", e)) - } -} - #[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] pub struct CertifiedNodeMessage { inner: CertifiedNode, @@ -433,6 +431,10 @@ impl CertifiedNodeMessage { } } + pub fn certified_node(self) -> CertifiedNode { + self.inner + } + pub fn ledger_info(&self) -> &LedgerInfoWithSignatures { &self.ledger_info } @@ -446,6 +448,16 @@ impl Deref for CertifiedNodeMessage { } } +impl TDAGMessage for CertifiedNodeMessage { + fn verify(&self, verifier: &ValidatorVerifier) -> anyhow::Result<()> { + self.inner.verify(verifier)?; + + self.ledger_info + .verify_signatures(verifier) + .map_err(|e| anyhow::anyhow!("unable to verify ledger info: {}", e)) + } +} + #[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] pub struct Vote { metadata: NodeMetadata, @@ -533,7 +545,7 @@ impl CertifiedAck { impl BroadcastStatus for CertificateAckState { type Ack = CertifiedAck; type Aggregated = (); - type Message = CertifiedNode; + type Message = CertifiedNodeMessage; fn add(&mut self, peer: Author, _ack: Self::Ack) -> anyhow::Result> { self.received.insert(peer); @@ -568,8 +580,8 @@ impl RemoteFetchRequest { self.epoch } - pub fn targets(&self) -> &[NodeMetadata] { - &self.targets + pub fn targets(&self) -> impl Iterator + Clone { + self.targets.iter() } pub fn exists_bitmask(&self) -> &DagSnapshotBitmask { @@ -647,7 +659,7 @@ impl core::fmt::Debug for DAGNetworkMessage { pub enum DAGMessage { NodeMsg(Node), VoteMsg(Vote), - CertifiedNodeMsg(CertifiedNode), + CertifiedNodeMsg(CertifiedNodeMessage), CertifiedAckMsg(CertifiedAck), FetchRequest(RemoteFetchRequest), FetchResponse(FetchResponse), diff --git a/consensus/src/epoch_manager.rs b/consensus/src/epoch_manager.rs index 691d5c17eb33c..87b14eed4ff46 100644 --- a/consensus/src/epoch_manager.rs +++ b/consensus/src/epoch_manager.rs @@ -13,6 +13,7 @@ use crate::{ buffer_manager::{OrderedBlocks, ResetRequest}, decoupled_execution_utils::prepare_phases_and_buffer_manager, ordering_state_computer::OrderingStateComputer, + signing_phase::CommitSignerProvider, }, liveness::{ cached_proposer_election::CachedProposerElection, @@ -37,6 +38,7 @@ use crate::{ }, network_interface::{ConsensusMsg, ConsensusNetworkClient}, payload_client::QuorumStoreClient, + payload_manager::PayloadManager, persistent_liveness_storage::{LedgerRecoveryData, PersistentLivenessStorage, RecoveryData}, quorum_store::{ quorum_store_builder::{DirectMempoolInnerBuilder, InnerBuilder, QuorumStoreBuilder}, @@ -53,17 +55,19 @@ use crate::{ use anyhow::{bail, ensure, Context}; use aptos_bounded_executor::BoundedExecutor; use aptos_channels::{aptos_channel, message_queues::QueueStyle}; -use aptos_config::config::{ConsensusConfig, NodeConfig}; +use aptos_config::config::{ConsensusConfig, NodeConfig, SecureBackend}; use aptos_consensus_types::{ common::{Author, Round}, epoch_retrieval::EpochRetrievalRequest, }; use aptos_event_notifications::ReconfigNotificationListener; +use aptos_global_constants::CONSENSUS_KEY; use aptos_infallible::{duration_since_epoch, Mutex}; use aptos_logger::prelude::*; use aptos_mempool::QuorumStoreRequest; use aptos_network::{application::interface::NetworkClient, protocols::network::Event}; use aptos_safety_rules::SafetyRulesManager; +use aptos_secure_storage::{KVStorage, Storage}; use aptos_types::{ account_address::AccountAddress, epoch_change::EpochChangeProof, @@ -72,6 +76,7 @@ use aptos_types::{ LeaderReputationType, OnChainConfigPayload, OnChainConfigProvider, OnChainConsensusConfig, OnChainExecutionConfig, ProposerElectionType, ValidatorSet, }, + validator_signer::ValidatorSigner, validator_verifier::ValidatorVerifier, }; use fail::fail_point; @@ -483,7 +488,7 @@ impl EpochManager

{ /// it sets `self.commit_msg_tx` to a new aptos_channel::Sender and returns an OrderingStateComputer fn spawn_decoupled_execution( &mut self, - safety_rules_container: Arc>, + commit_signer_provider: Arc, verifier: ValidatorVerifier, ) -> OrderingStateComputer { let network_sender = NetworkSender::new( @@ -506,20 +511,26 @@ impl EpochManager

{ self.buffer_manager_msg_tx = Some(commit_msg_tx); self.buffer_manager_reset_tx = Some(reset_tx.clone()); - let (execution_phase, signing_phase, persisting_phase, buffer_manager) = - prepare_phases_and_buffer_manager( - self.author, - self.commit_state_computer.clone(), - safety_rules_container, - network_sender, - commit_msg_rx, - self.commit_state_computer.clone(), - block_rx, - reset_rx, - verifier, - ); + let ( + execution_schedule_phase, + execution_wait_phase, + signing_phase, + persisting_phase, + buffer_manager, + ) = prepare_phases_and_buffer_manager( + self.author, + self.commit_state_computer.clone(), + commit_signer_provider, + network_sender, + commit_msg_rx, + self.commit_state_computer.clone(), + block_rx, + reset_rx, + verifier, + ); - tokio::spawn(execution_phase.start()); + tokio::spawn(execution_schedule_phase.start()); + tokio::spawn(execution_wait_phase.start()); tokio::spawn(signing_phase.start()); tokio::spawn(persisting_phase.start()); tokio::spawn(buffer_manager.start()); @@ -600,56 +611,11 @@ impl EpochManager

{ tokio::spawn(recovery_manager.start(recovery_manager_rx, close_rx)); } - async fn start_round_manager( + async fn init_payload_provider( &mut self, - recovery_data: RecoveryData, - epoch_state: EpochState, - onchain_consensus_config: OnChainConsensusConfig, - onchain_execution_config: OnChainExecutionConfig, - ) { - let epoch = epoch_state.epoch; - counters::EPOCH.set(epoch_state.epoch as i64); - counters::CURRENT_EPOCH_VALIDATORS.set(epoch_state.verifier.len() as i64); - info!( - epoch = epoch_state.epoch, - validators = epoch_state.verifier.to_string(), - root_block = %recovery_data.root_block(), - "Starting new epoch", - ); - let last_vote = recovery_data.last_vote(); - - info!(epoch = epoch, "Update SafetyRules"); - - let mut safety_rules = - MetricsSafetyRules::new(self.safety_rules_manager.client(), self.storage.clone()); - if let Err(error) = safety_rules.perform_initialize() { - error!( - epoch = epoch, - error = error, - "Unable to initialize safety rules.", - ); - } - - info!(epoch = epoch, "Create RoundState"); - let round_state = - self.create_round_state(self.time_service.clone(), self.timeout_sender.clone()); - - info!(epoch = epoch, "Create ProposerElection"); - let proposer_election = - self.create_proposer_election(&epoch_state, &onchain_consensus_config); - let network_sender = NetworkSender::new( - self.author, - self.network_sender.clone(), - self.self_sender.clone(), - epoch_state.verifier.clone(), - ); - let chain_health_backoff_config = - ChainHealthBackoffConfig::new(self.config.chain_health_backoff.clone()); - let pipeline_backpressure_config = - PipelineBackpressureConfig::new(self.config.pipeline_backpressure.clone()); - - let safety_rules_container = Arc::new(Mutex::new(safety_rules)); - + epoch_state: &EpochState, + network_sender: NetworkSender, + ) -> (Arc, QuorumStoreClient, QuorumStoreBuilder) { // Start QuorumStore let (consensus_to_quorum_store_tx, consensus_to_quorum_store_rx) = mpsc::channel(self.config.intra_consensus_channel_buffer_size); @@ -680,11 +646,6 @@ impl EpochManager

{ }; let (payload_manager, quorum_store_msg_tx) = quorum_store_builder.init_payload_manager(); - let transaction_shuffler = - create_transaction_shuffler(onchain_execution_config.transaction_shuffler_type()); - let block_gas_limit = onchain_execution_config.block_gas_limit(); - let transaction_deduper = - create_transaction_deduper(onchain_execution_config.transaction_deduper_type()); self.quorum_store_msg_tx = quorum_store_msg_tx; let payload_client = QuorumStoreClient::new( @@ -693,23 +654,119 @@ impl EpochManager

{ self.config.wait_for_full_blocks_above_recent_fill_threshold, self.config.wait_for_full_blocks_above_pending_blocks, ); + (payload_manager, payload_client, quorum_store_builder) + } + + fn init_state_computer( + &mut self, + epoch_state: &EpochState, + payload_manager: Arc, + onchain_consensus_config: &OnChainConsensusConfig, + onchain_execution_config: &OnChainExecutionConfig, + commit_signer_provider: Arc, + ) -> Arc { + let transaction_shuffler = + create_transaction_shuffler(onchain_execution_config.transaction_shuffler_type()); + let block_gas_limit = onchain_execution_config.block_gas_limit(); + let transaction_deduper = + create_transaction_deduper(onchain_execution_config.transaction_deduper_type()); self.commit_state_computer.new_epoch( - &epoch_state, - payload_manager.clone(), + epoch_state, + payload_manager, transaction_shuffler, block_gas_limit, transaction_deduper, ); - let state_computer = if onchain_consensus_config.decoupled_execution() { - Arc::new(self.spawn_decoupled_execution( - safety_rules_container.clone(), - epoch_state.verifier.clone(), - )) + + if onchain_consensus_config.decoupled_execution() { + Arc::new( + self.spawn_decoupled_execution( + commit_signer_provider, + epoch_state.verifier.clone(), + ), + ) } else { self.commit_state_computer.clone() - }; + } + } + + fn set_epoch_start_metrics(&self, epoch_state: &EpochState) { + counters::EPOCH.set(epoch_state.epoch as i64); + counters::CURRENT_EPOCH_VALIDATORS.set(epoch_state.verifier.len() as i64); + + counters::TOTAL_VOTING_POWER.set(epoch_state.verifier.total_voting_power() as f64); + counters::VALIDATOR_VOTING_POWER.set( + epoch_state + .verifier + .get_voting_power(&self.author) + .unwrap_or(0) as f64, + ); + epoch_state + .verifier + .get_ordered_account_addresses_iter() + .for_each(|peer_id| { + counters::ALL_VALIDATORS_VOTING_POWER + .with_label_values(&[&peer_id.to_string()]) + .set(epoch_state.verifier.get_voting_power(&peer_id).unwrap_or(0) as i64) + }); + } + + async fn start_round_manager( + &mut self, + recovery_data: RecoveryData, + epoch_state: EpochState, + onchain_consensus_config: OnChainConsensusConfig, + onchain_execution_config: OnChainExecutionConfig, + ) { + let epoch = epoch_state.epoch; + info!( + epoch = epoch_state.epoch, + validators = epoch_state.verifier.to_string(), + root_block = %recovery_data.root_block(), + "Starting new epoch", + ); + + info!(epoch = epoch, "Update SafetyRules"); + + let mut safety_rules = + MetricsSafetyRules::new(self.safety_rules_manager.client(), self.storage.clone()); + if let Err(error) = safety_rules.perform_initialize() { + error!( + epoch = epoch, + error = error, + "Unable to initialize safety rules.", + ); + } + + info!(epoch = epoch, "Create RoundState"); + let round_state = + self.create_round_state(self.time_service.clone(), self.timeout_sender.clone()); + + info!(epoch = epoch, "Create ProposerElection"); + let proposer_election = + self.create_proposer_election(&epoch_state, &onchain_consensus_config); + let network_sender = self.init_network_sender(&epoch_state); + let chain_health_backoff_config = + ChainHealthBackoffConfig::new(self.config.chain_health_backoff.clone()); + let pipeline_backpressure_config = + PipelineBackpressureConfig::new(self.config.pipeline_backpressure.clone()); + + let safety_rules_container = Arc::new(Mutex::new(safety_rules)); + + let (payload_manager, payload_client, quorum_store_builder) = self + .init_payload_provider(&epoch_state, network_sender.clone()) + .await; + let state_computer = self.init_state_computer( + &epoch_state, + payload_manager.clone(), + &onchain_consensus_config, + &onchain_execution_config, + safety_rules_container.clone(), + ); info!(epoch = epoch, "Create BlockStore"); + // Read the last vote, before "moving" `recovery_data` + let last_vote = recovery_data.last_vote(); let block_store = Arc::new(BlockStore::new( Arc::clone(&self.storage), recovery_data, @@ -754,21 +811,7 @@ impl EpochManager

{ self.round_manager_tx = Some(round_manager_tx.clone()); - counters::TOTAL_VOTING_POWER.set(epoch_state.verifier.total_voting_power() as f64); - counters::VALIDATOR_VOTING_POWER.set( - epoch_state - .verifier - .get_voting_power(&self.author) - .unwrap_or(0) as f64, - ); - epoch_state - .verifier - .get_ordered_account_addresses_iter() - .for_each(|peer_id| { - counters::ALL_VALIDATORS_VOTING_POWER - .with_label_values(&[&peer_id.to_string()]) - .set(epoch_state.verifier.get_voting_power(&peer_id).unwrap_or(0) as i64) - }); + self.set_epoch_start_metrics(&epoch_state); let mut round_manager = RoundManager::new( epoch_state, @@ -793,6 +836,15 @@ impl EpochManager

{ self.spawn_block_retrieval_task(epoch, block_store); } + fn init_network_sender(&self, epoch_state: &EpochState) -> NetworkSender { + NetworkSender::new( + self.author, + self.network_sender.clone(), + self.self_sender.clone(), + epoch_state.verifier.clone(), + ) + } + async fn start_new_epoch(&mut self, payload: OnChainConfigPayload

) { let validator_set: ValidatorSet = payload .get() @@ -814,11 +866,21 @@ impl EpochManager

{ self.epoch_state = Some(Arc::new(epoch_state.clone())); + let consensus_config = onchain_consensus_config.unwrap_or_default(); + let execution_config = onchain_execution_config + .unwrap_or_else(|_| OnChainExecutionConfig::default_if_missing()); + self.start_new_epoch_with_joltean(epoch_state, consensus_config, execution_config) + .await + } + + async fn start_new_epoch_with_joltean( + &mut self, + epoch_state: EpochState, + consensus_config: OnChainConsensusConfig, + execution_config: OnChainExecutionConfig, + ) { match self.storage.start() { LivenessStorageData::FullRecoveryData(initial_data) => { - let consensus_config = onchain_consensus_config.unwrap_or_default(); - let execution_config = onchain_execution_config - .unwrap_or_else(|_| OnChainExecutionConfig::default_if_missing()); self.quorum_store_enabled = self.enable_quorum_store(&consensus_config); self.recovery_mode = false; self.start_round_manager( @@ -1144,3 +1206,16 @@ impl EpochManager

{ } } } + +#[allow(dead_code)] +fn new_signer_from_storage(author: Author, backend: &SecureBackend) -> Arc { + let storage: Storage = backend.try_into().expect("Unable to initialize storage"); + if let Err(error) = storage.available() { + panic!("Storage is not available: {:?}", error); + } + let private_key = storage + .get(CONSENSUS_KEY) + .map(|v| v.value) + .expect("Unable to get private key"); + Arc::new(ValidatorSigner::new(author, private_key)) +} diff --git a/consensus/src/error.rs b/consensus/src/error.rs index b21b4177da707..dcb21ce30216c 100644 --- a/consensus/src/error.rs +++ b/consensus/src/error.rs @@ -25,8 +25,8 @@ impl From for StateSyncError { } } -impl From for StateSyncError { - fn from(e: aptos_executor_types::Error) -> Self { +impl From for StateSyncError { + fn from(e: aptos_executor_types::ExecutorError) -> Self { StateSyncError { inner: e.into() } } } @@ -53,12 +53,14 @@ pub struct VerifyError { } pub fn error_kind(e: &anyhow::Error) -> &'static str { - if e.downcast_ref::().is_some() { + if e.downcast_ref::() + .is_some() + { return "Execution"; } if let Some(e) = e.downcast_ref::() { if e.inner - .downcast_ref::() + .downcast_ref::() .is_some() { return "Execution"; @@ -90,7 +92,7 @@ mod tests { #[test] fn conversion_and_downcast() { - let error = aptos_executor_types::Error::InternalError { + let error = aptos_executor_types::ExecutorError::InternalError { error: "lalala".to_string(), }; let typed_error: StateSyncError = error.into(); diff --git a/consensus/src/execution_pipeline.rs b/consensus/src/execution_pipeline.rs new file mode 100644 index 0000000000000..09a2f59911a2d --- /dev/null +++ b/consensus/src/execution_pipeline.rs @@ -0,0 +1,156 @@ +// Copyright © Aptos Foundation +// SPDX-License-Identifier: Apache-2.0 + +#![forbid(unsafe_code)] + +use crate::{monitor, state_computer::StateComputeResultFut}; +use aptos_crypto::HashValue; +use aptos_executor_types::{ + state_checkpoint_output::StateCheckpointOutput, BlockExecutorTrait, ExecutorError, + ExecutorResult, StateComputeResult, +}; +use aptos_logger::{debug, error}; +use aptos_types::block_executor::partitioner::ExecutableBlock; +use fail::fail_point; +use std::sync::Arc; +use tokio::sync::{mpsc, oneshot}; + +pub struct ExecutionPipeline { + block_tx: mpsc::UnboundedSender, +} + +impl ExecutionPipeline { + pub fn spawn(executor: Arc, runtime: &tokio::runtime::Handle) -> Self { + let (block_tx, block_rx) = mpsc::unbounded_channel(); + let (ledger_apply_tx, ledger_apply_rx) = mpsc::unbounded_channel(); + runtime.spawn(Self::execute_stage( + block_rx, + ledger_apply_tx, + executor.clone(), + )); + runtime.spawn(Self::ledger_apply_stage(ledger_apply_rx, executor)); + Self { block_tx } + } + + pub async fn queue( + &self, + block: ExecutableBlock, + parent_block_id: HashValue, + maybe_block_gas_limit: Option, + ) -> StateComputeResultFut { + let (result_tx, result_rx) = oneshot::channel(); + let block_id = block.block_id; + self.block_tx + .send(ExecuteBlockCommand { + block, + parent_block_id, + maybe_block_gas_limit, + result_tx, + }) + .expect("Failed to send block to execution pipeline."); + + Box::pin(async move { + result_rx + .await + .map_err(|err| ExecutorError::InternalError { + error: format!( + "Failed to receive execution result for block {}: {:?}.", + block_id, err + ), + })? + }) + } + + async fn execute_stage( + mut block_rx: mpsc::UnboundedReceiver, + ledger_apply_tx: mpsc::UnboundedSender, + executor: Arc, + ) { + while let Some(ExecuteBlockCommand { + block, + parent_block_id, + maybe_block_gas_limit, + result_tx, + }) = block_rx.recv().await + { + let block_id = block.block_id; + debug!("execute_stage received block {}.", block_id); + let executor = executor.clone(); + let state_checkpoint_output = monitor!( + "execute_block", + tokio::task::spawn_blocking(move || { + fail_point!("consensus::compute", |_| { + Err(ExecutorError::InternalError { + error: "Injected error in compute".into(), + }) + }); + executor.execute_and_state_checkpoint( + block, + parent_block_id, + maybe_block_gas_limit, + ) + }) + .await + ) + .expect("Failed to spawn_blocking."); + + ledger_apply_tx + .send(LedgerApplyCommand { + block_id, + parent_block_id, + state_checkpoint_output, + result_tx, + }) + .expect("Failed to send block to ledger_apply stage."); + } + debug!("execute_stage quitting."); + } + + async fn ledger_apply_stage( + mut block_rx: mpsc::UnboundedReceiver, + executor: Arc, + ) { + while let Some(LedgerApplyCommand { + block_id, + parent_block_id, + state_checkpoint_output, + result_tx, + }) = block_rx.recv().await + { + debug!("ledger_apply stage received block {}.", block_id); + let res = async { + let executor = executor.clone(); + monitor!( + "ledger_apply", + tokio::task::spawn_blocking(move || { + executor.ledger_update(block_id, parent_block_id, state_checkpoint_output?) + }) + ) + .await + .expect("Failed to spawn_blocking().") + } + .await; + result_tx.send(res).unwrap_or_else(|err| { + error!( + block_id = block_id, + "Failed to send back execution result for block {}: {:?}", block_id, err, + ); + }); + } + debug!("ledger_apply stage quitting."); + } +} + +struct ExecuteBlockCommand { + block: ExecutableBlock, + parent_block_id: HashValue, + maybe_block_gas_limit: Option, + result_tx: oneshot::Sender>, +} + +struct LedgerApplyCommand { + block_id: HashValue, + parent_block_id: HashValue, + state_checkpoint_output: ExecutorResult, + result_tx: oneshot::Sender>, +} diff --git a/consensus/src/experimental/buffer_item.rs b/consensus/src/experimental/buffer_item.rs index 8b559c122f27e..cf9ec3b397a15 100644 --- a/consensus/src/experimental/buffer_item.rs +++ b/consensus/src/experimental/buffer_item.rs @@ -11,6 +11,7 @@ use aptos_consensus_types::{ common::Author, executed_block::ExecutedBlock, experimental::commit_vote::CommitVote, }; use aptos_crypto::{bls12381, HashValue}; +use aptos_executor_types::ExecutorResult; use aptos_logger::prelude::*; use aptos_types::{ aggregate_signature::PartialSignatures, @@ -18,6 +19,7 @@ use aptos_types::{ ledger_info::{LedgerInfo, LedgerInfoWithPartialSignatures, LedgerInfoWithSignatures}, validator_verifier::ValidatorVerifier, }; +use futures::future::BoxFuture; use itertools::zip_eq; use tokio::time::Instant; @@ -131,6 +133,8 @@ impl Hashable for BufferItem { } } +pub type ExecutionFut = BoxFuture<'static, ExecutorResult>>; + impl BufferItem { pub fn new_ordered( ordered_blocks: Vec, diff --git a/consensus/src/experimental/buffer_manager.rs b/consensus/src/experimental/buffer_manager.rs index b4935b44d42bc..4420df3530621 100644 --- a/consensus/src/experimental/buffer_manager.rs +++ b/consensus/src/experimental/buffer_manager.rs @@ -9,7 +9,8 @@ use crate::{ buffer::{Buffer, Cursor}, buffer_item::BufferItem, commit_reliable_broadcast::{AckState, CommitMessage, DropGuard}, - execution_phase::{ExecutionRequest, ExecutionResponse}, + execution_schedule_phase::ExecutionRequest, + execution_wait_phase::{ExecutionResponse, ExecutionWaitRequest}, persisting_phase::PersistingRequest, pipeline_phase::CountedRequest, signing_phase::{SigningRequest, SigningResponse}, @@ -83,8 +84,10 @@ pub struct BufferManager { // the roots point to the first *unprocessed* item. // None means no items ready to be processed (either all processed or no item finishes previous stage) execution_root: BufferItemRootType, - execution_phase_tx: Sender>, - execution_phase_rx: Receiver, + execution_schedule_phase_tx: Sender>, + execution_schedule_phase_rx: Receiver, + execution_wait_phase_tx: Sender>, + execution_wait_phase_rx: Receiver, signing_root: BufferItemRootType, signing_phase_tx: Sender>, @@ -120,8 +123,10 @@ pub struct BufferManager { impl BufferManager { pub fn new( author: Author, - execution_phase_tx: Sender>, - execution_phase_rx: Receiver, + execution_schedule_phase_tx: Sender>, + execution_schedule_phase_rx: Receiver, + execution_wait_phase_tx: Sender>, + execution_wait_phase_rx: Receiver, signing_phase_tx: Sender>, signing_phase_rx: Receiver, commit_msg_tx: Arc, @@ -146,8 +151,10 @@ impl BufferManager { buffer, execution_root: None, - execution_phase_tx, - execution_phase_rx, + execution_schedule_phase_tx, + execution_schedule_phase_rx, + execution_wait_phase_tx, + execution_wait_phase_rx, signing_root: None, signing_phase_tx, @@ -208,7 +215,7 @@ impl BufferManager { /// process incoming ordered blocks /// push them into the buffer and update the roots if they are none. - fn process_ordered_blocks(&mut self, ordered_blocks: OrderedBlocks) { + async fn process_ordered_blocks(&mut self, ordered_blocks: OrderedBlocks) { let OrderedBlocks { ordered_blocks, ordered_proof, @@ -220,6 +227,15 @@ impl BufferManager { ordered_proof.commit_info(), self.buffer.len() + 1, ); + + let request = self.create_new_request(ExecutionRequest { + ordered_blocks: ordered_blocks.clone(), + }); + self.execution_schedule_phase_tx + .send(request) + .await + .expect("Failed to send execution schedule request"); + let item = BufferItem::new_ordered(ordered_blocks, ordered_proof, callback); self.buffer.push_back(item); } @@ -237,19 +253,19 @@ impl BufferManager { "Advance execution root from {:?} to {:?}", cursor, self.execution_root ); - if self.execution_root.is_some() { + if self.execution_root.is_some() && cursor == self.execution_root { + // Schedule retry. + // NOTE: probably should schedule retry for all ordered blocks, but since execution error + // is not expected nor retryable in reality, I'd rather remove retrying or do it more + // properly than complicating it here. let ordered_blocks = self.buffer.get(&self.execution_root).get_blocks().clone(); let request = self.create_new_request(ExecutionRequest { ordered_blocks }); - if cursor == self.execution_root { - let sender = self.execution_phase_tx.clone(); - Self::spawn_retry_request(sender, request, Duration::from_millis(100)); - } else { - self.execution_phase_tx - .send(request) - .await - .expect("Failed to send execution request") - } + let sender = self.execution_schedule_phase_tx.clone(); + Self::spawn_retry_request(sender, request, Duration::from_millis(100)); } + // Otherwise do nothing, because the execution wait phase is driven by the response of + // the execution schedule phase, which is in turn fed as soon as the ordered blocks + // come in. } /// Set the signing root to the first not signed item (Executed) and send execution request @@ -374,6 +390,15 @@ impl BufferManager { info!("Reset finishes"); } + async fn process_execution_schedule_response(&mut self, response: ExecutionWaitRequest) { + // pass through to the execution wait phase + let request = self.create_new_request(response); + self.execution_wait_phase_tx + .send(request) + .await + .expect("Failed to send execution wait request."); + } + /// If the response is successful, advance the item to Executed, otherwise panic (TODO fix). async fn process_execution_response(&mut self, response: ExecutionResponse) { let ExecutionResponse { block_id, inner } = response; @@ -394,6 +419,16 @@ impl BufferManager { "Receive executed response {}", executed_blocks.last().unwrap().block_info() ); + let current_item = self.buffer.get(¤t_cursor); + + if current_item.block_id() != block_id { + error!( + block_id = block_id, + expected_block_id = current_item.block_id(), + "Received result for unexpected block id. Ignoring." + ); + return; + } // Handle reconfiguration timestamp reconciliation. // end epoch timestamp is set to the first block that causes the reconfiguration. @@ -654,7 +689,7 @@ impl BufferManager { ::futures::select! { blocks = self.block_rx.select_next_some() => { monitor!("buffer_manager_process_ordered", { - self.process_ordered_blocks(blocks); + self.process_ordered_blocks(blocks).await; if self.execution_root.is_none() { self.advance_execution_root().await; }}); @@ -663,8 +698,12 @@ impl BufferManager { monitor!("buffer_manager_process_reset", self.process_reset_request(reset_event).await); }, - response = self.execution_phase_rx.select_next_some() => { - monitor!("buffer_manager_process_execution_response", { + response = self.execution_schedule_phase_rx.select_next_some() => { + monitor!("buffer_manager_process_execution_schedule_response", { + self.process_execution_schedule_response(response).await; + })}, + response = self.execution_wait_phase_rx.select_next_some() => { + monitor!("buffer_manager_process_execution_wait_response", { self.process_execution_response(response).await; self.advance_execution_root().await; if self.signing_root.is_none() { diff --git a/consensus/src/experimental/decoupled_execution_utils.rs b/consensus/src/experimental/decoupled_execution_utils.rs index 506e23b2793ca..fc548d92d32fe 100644 --- a/consensus/src/experimental/decoupled_execution_utils.rs +++ b/consensus/src/experimental/decoupled_execution_utils.rs @@ -5,18 +5,17 @@ use crate::{ experimental::{ buffer_manager::{create_channel, BufferManager, OrderedBlocks, ResetRequest}, - execution_phase::{ExecutionPhase, ExecutionRequest, ExecutionResponse}, + execution_schedule_phase::{ExecutionRequest, ExecutionSchedulePhase}, + execution_wait_phase::{ExecutionResponse, ExecutionWaitPhase, ExecutionWaitRequest}, persisting_phase::{PersistingPhase, PersistingRequest}, pipeline_phase::{CountedRequest, PipelinePhase}, - signing_phase::{SigningPhase, SigningRequest, SigningResponse}, + signing_phase::{CommitSignerProvider, SigningPhase, SigningRequest, SigningResponse}, }, - metrics_safety_rules::MetricsSafetyRules, network::{IncomingCommitRequest, NetworkSender}, state_replication::StateComputer, }; use aptos_channels::aptos_channel::Receiver; use aptos_consensus_types::common::Author; -use aptos_infallible::Mutex; use aptos_types::{account_address::AccountAddress, validator_verifier::ValidatorVerifier}; use futures::channel::mpsc::UnboundedReceiver; use std::sync::{atomic::AtomicU64, Arc}; @@ -25,7 +24,7 @@ use std::sync::{atomic::AtomicU64, Arc}; pub fn prepare_phases_and_buffer_manager( author: Author, execution_proxy: Arc, - safety_rules: Arc>, + safety_rules: Arc, commit_msg_tx: NetworkSender, commit_msg_rx: Receiver, persisting_proxy: Arc, @@ -33,24 +32,35 @@ pub fn prepare_phases_and_buffer_manager( sync_rx: UnboundedReceiver, verifier: ValidatorVerifier, ) -> ( - PipelinePhase, + PipelinePhase, + PipelinePhase, PipelinePhase, PipelinePhase, BufferManager, ) { + let ongoing_tasks = Arc::new(AtomicU64::new(0)); + // Execution Phase - let (execution_phase_request_tx, execution_phase_request_rx) = + let (execution_schedule_phase_request_tx, execution_schedule_phase_request_rx) = create_channel::>(); - let (execution_phase_response_tx, execution_phase_response_rx) = - create_channel::(); - - let ongoing_tasks = Arc::new(AtomicU64::new(0)); + let (execution_schedule_phase_response_tx, execution_schedule_phase_response_rx) = + create_channel::(); + let execution_schedule_phase_processor = ExecutionSchedulePhase::new(execution_proxy); + let execution_schedule_phase = PipelinePhase::new( + execution_schedule_phase_request_rx, + Some(execution_schedule_phase_response_tx), + Box::new(execution_schedule_phase_processor), + ); - let execution_phase_processor = ExecutionPhase::new(execution_proxy); - let execution_phase = PipelinePhase::new( - execution_phase_request_rx, - Some(execution_phase_response_tx), - Box::new(execution_phase_processor), + let (execution_wait_phase_request_tx, execution_wait_phase_request_rx) = + create_channel::>(); + let (execution_wait_phase_response_tx, execution_wait_phase_response_rx) = + create_channel::(); + let execution_wait_phase_processor = ExecutionWaitPhase; + let execution_wait_phase = PipelinePhase::new( + execution_wait_phase_request_rx, + Some(execution_wait_phase_response_tx), + Box::new(execution_wait_phase_processor), ); // Signing Phase @@ -78,13 +88,16 @@ pub fn prepare_phases_and_buffer_manager( ); ( - execution_phase, + execution_schedule_phase, + execution_wait_phase, signing_phase, persisting_phase, BufferManager::new( author, - execution_phase_request_tx, - execution_phase_response_rx, + execution_schedule_phase_request_tx, + execution_schedule_phase_response_rx, + execution_wait_phase_request_tx, + execution_wait_phase_response_rx, signing_phase_request_tx, signing_phase_response_rx, Arc::new(commit_msg_tx), diff --git a/consensus/src/experimental/execution_phase.rs b/consensus/src/experimental/execution_phase.rs index c27fbcf3e5be5..3f52473d51af8 100644 --- a/consensus/src/experimental/execution_phase.rs +++ b/consensus/src/experimental/execution_phase.rs @@ -3,10 +3,9 @@ // SPDX-License-Identifier: Apache-2.0 use crate::{experimental::pipeline_phase::StatelessPipeline, state_replication::StateComputer}; -use anyhow::Result; use aptos_consensus_types::executed_block::ExecutedBlock; use aptos_crypto::HashValue; -use aptos_executor_types::Error as ExecutionError; +use aptos_executor_types::{ExecutorError, ExecutorResult}; use async_trait::async_trait; use std::{ fmt::{Debug, Display, Formatter}, @@ -37,7 +36,7 @@ impl Display for ExecutionRequest { pub struct ExecutionResponse { pub block_id: HashValue, - pub inner: Result, ExecutionError>, + pub inner: ExecutorResult>, } pub struct ExecutionPhase { @@ -64,7 +63,7 @@ impl StatelessPipeline for ExecutionPhase { // return err when the blocks are empty return ExecutionResponse { block_id: HashValue::zero(), - inner: Err(ExecutionError::EmptyBlocks), + inner: Err(ExecutorError::EmptyBlocks), }; } diff --git a/consensus/src/experimental/execution_schedule_phase.rs b/consensus/src/experimental/execution_schedule_phase.rs new file mode 100644 index 0000000000000..eb214f364626b --- /dev/null +++ b/consensus/src/experimental/execution_schedule_phase.rs @@ -0,0 +1,89 @@ +// Copyright © Aptos Foundation +// SPDX-License-Identifier: Apache-2.0 + +use crate::{ + experimental::{execution_wait_phase::ExecutionWaitRequest, pipeline_phase::StatelessPipeline}, + state_replication::StateComputer, +}; +use aptos_consensus_types::executed_block::ExecutedBlock; +use aptos_crypto::HashValue; +use aptos_logger::debug; +use async_trait::async_trait; +use std::{ + fmt::{Debug, Display, Formatter}, + sync::Arc, +}; + +/// [ This class is used when consensus.decoupled = true ] +/// ExecutionSchedulePhase is a singleton that receives ordered blocks from +/// the buffer manager and send them to the ExecutionPipeline. + +pub struct ExecutionRequest { + pub ordered_blocks: Vec, +} + +impl Debug for ExecutionRequest { + fn fmt(&self, f: &mut Formatter) -> std::fmt::Result { + write!(f, "{}", self) + } +} + +impl Display for ExecutionRequest { + fn fmt(&self, f: &mut Formatter) -> std::fmt::Result { + write!(f, "ExecutionScheduleRequest({:?})", self.ordered_blocks) + } +} + +pub struct ExecutionSchedulePhase { + execution_proxy: Arc, +} + +impl ExecutionSchedulePhase { + pub fn new(execution_proxy: Arc) -> Self { + Self { execution_proxy } + } +} + +#[async_trait] +impl StatelessPipeline for ExecutionSchedulePhase { + type Request = ExecutionRequest; + type Response = ExecutionWaitRequest; + + const NAME: &'static str = "execution_schedule"; + + async fn process(&self, req: ExecutionRequest) -> ExecutionWaitRequest { + let ExecutionRequest { ordered_blocks } = req; + + if ordered_blocks.is_empty() { + return ExecutionWaitRequest { + block_id: HashValue::zero(), + fut: Box::pin(async { Err(aptos_executor_types::ExecutorError::EmptyBlocks) }), + }; + } + + let block_id = ordered_blocks.last().unwrap().id(); + + // Call schedule_compute() for each block here (not in the fut being returned) to + // make sure they are scheduled in order. + let mut futs = vec![]; + for b in &ordered_blocks { + let fut = self + .execution_proxy + .schedule_compute(b.block(), b.parent_id()) + .await; + futs.push(fut) + } + + // In the future being returned, wait for the compute results in order. + let fut = Box::pin(async move { + let mut results = vec![]; + for (block, fut) in itertools::zip_eq(ordered_blocks, futs) { + debug!("try to receive compute result for block {}", block.id()); + results.push(block.replace_result(fut.await?)); + } + Ok(results) + }); + + ExecutionWaitRequest { block_id, fut } + } +} diff --git a/consensus/src/experimental/execution_wait_phase.rs b/consensus/src/experimental/execution_wait_phase.rs new file mode 100644 index 0000000000000..78d118b6f479e --- /dev/null +++ b/consensus/src/experimental/execution_wait_phase.rs @@ -0,0 +1,55 @@ +// Copyright © Aptos Foundation +// Parts of the project are originally copyright © Meta Platforms, Inc. +// SPDX-License-Identifier: Apache-2.0 + +use crate::experimental::{buffer_item::ExecutionFut, pipeline_phase::StatelessPipeline}; +use aptos_consensus_types::executed_block::ExecutedBlock; +use aptos_crypto::HashValue; +use aptos_executor_types::ExecutorResult; +use async_trait::async_trait; +use std::fmt::{Debug, Display, Formatter}; + +/// [ This class is used when consensus.decoupled = true ] +/// ExecutionWaitPhase is a singleton that receives scheduled execution futures +/// from ExecutionSchedulePhase and waits for the results from the ExecutionPipeline. + +pub struct ExecutionWaitRequest { + pub block_id: HashValue, + pub fut: ExecutionFut, +} + +impl Debug for ExecutionWaitRequest { + fn fmt(&self, f: &mut Formatter) -> std::fmt::Result { + write!(f, "{}", self) + } +} + +impl Display for ExecutionWaitRequest { + fn fmt(&self, f: &mut Formatter) -> std::fmt::Result { + write!(f, "ExecutionRequest({:?})", self.block_id) + } +} + +pub struct ExecutionResponse { + pub block_id: HashValue, + pub inner: ExecutorResult>, +} + +pub struct ExecutionWaitPhase; + +#[async_trait] +impl StatelessPipeline for ExecutionWaitPhase { + type Request = ExecutionWaitRequest; + type Response = ExecutionResponse; + + const NAME: &'static str = "execution"; + + async fn process(&self, req: ExecutionWaitRequest) -> ExecutionResponse { + let ExecutionWaitRequest { block_id, fut } = req; + + ExecutionResponse { + block_id, + inner: fut.await, + } + } +} diff --git a/consensus/src/experimental/mod.rs b/consensus/src/experimental/mod.rs index 4eebdb3d5c39b..b8aad5120a036 100644 --- a/consensus/src/experimental/mod.rs +++ b/consensus/src/experimental/mod.rs @@ -28,7 +28,8 @@ pub mod buffer_manager; pub mod commit_reliable_broadcast; pub mod decoupled_execution_utils; pub mod errors; -pub mod execution_phase; +pub mod execution_schedule_phase; +pub mod execution_wait_phase; pub mod hashable; pub mod ordering_state_computer; pub mod persisting_phase; diff --git a/consensus/src/experimental/ordering_state_computer.rs b/consensus/src/experimental/ordering_state_computer.rs index a71dbcf5917c8..117cc03317a6f 100644 --- a/consensus/src/experimental/ordering_state_computer.rs +++ b/consensus/src/experimental/ordering_state_computer.rs @@ -16,14 +16,16 @@ use crate::{ use anyhow::Result; use aptos_consensus_types::{block::Block, executed_block::ExecutedBlock}; use aptos_crypto::HashValue; -use aptos_executor_types::{Error as ExecutionError, StateComputeResult}; +use aptos_executor_types::{ExecutorResult, StateComputeResult}; use aptos_logger::prelude::*; use aptos_types::{epoch_state::EpochState, ledger_info::LedgerInfoWithSignatures}; +use async_trait::async_trait; use fail::fail_point; use futures::{ channel::{mpsc::UnboundedSender, oneshot}, SinkExt, }; +use futures_channel::mpsc::unbounded; use std::sync::Arc; /// Ordering-only execution proxy @@ -59,7 +61,7 @@ impl StateComputer for OrderingStateComputer { _block: &Block, // The parent block id. _parent_block_id: HashValue, - ) -> Result { + ) -> ExecutorResult { // Return dummy block and bypass the execution phase. // This will break the e2e smoke test (for now because // no one is actually handling the next phase) if the @@ -74,7 +76,7 @@ impl StateComputer for OrderingStateComputer { blocks: &[Arc], finality_proof: LedgerInfoWithSignatures, callback: StateComputerCommitCallBackType, - ) -> Result<(), ExecutionError> { + ) -> ExecutorResult<()> { assert!(!blocks.is_empty()); if self @@ -133,3 +135,74 @@ impl StateComputer for OrderingStateComputer { fn end_epoch(&self) {} } + +// TODO: stop using state computer for DAG state sync +pub struct DagStateSyncComputer { + ordering_state_computer: OrderingStateComputer, +} + +impl DagStateSyncComputer { + #[allow(dead_code)] + pub fn new( + state_computer_for_sync: Arc, + reset_event_channel_tx: UnboundedSender, + ) -> Self { + // note: this channel is unused + let (sender_tx, _) = unbounded(); + Self { + ordering_state_computer: OrderingStateComputer { + executor_channel: sender_tx, + state_computer_for_sync, + reset_event_channel_tx, + }, + } + } +} + +#[async_trait] +impl StateComputer for DagStateSyncComputer { + async fn compute( + &self, + // The block that will be computed. + _block: &Block, + // The parent block root hash. + _parent_block_id: HashValue, + ) -> ExecutorResult { + unimplemented!("method not supported") + } + + /// Send a successful commit. A future is fulfilled when the state is finalized. + async fn commit( + &self, + _blocks: &[Arc], + _finality_proof: LedgerInfoWithSignatures, + _callback: StateComputerCommitCallBackType, + ) -> ExecutorResult<()> { + unimplemented!("method not supported") + } + + /// Best effort state synchronization to the given target LedgerInfo. + /// In case of success (`Result::Ok`) the LI of storage is at the given target. + /// In case of failure (`Result::Error`) the LI of storage remains unchanged, and the validator + /// can assume there were no modifications to the storage made. + async fn sync_to(&self, target: LedgerInfoWithSignatures) -> Result<(), StateSyncError> { + self.ordering_state_computer.sync_to(target).await + } + + // Reconfigure to execute transactions for a new epoch. + fn new_epoch( + &self, + _epoch_state: &EpochState, + _payload_manager: Arc, + _transaction_shuffler: Arc, + _block_gas_limit: Option, + _transaction_deduper: Arc, + ) { + unimplemented!("method not supported"); + } + + // Reconfigure to clear epoch state at end of epoch. + fn end_epoch(&self) { + unimplemented!("method not supported") + } +} diff --git a/consensus/src/experimental/persisting_phase.rs b/consensus/src/experimental/persisting_phase.rs index 20c69f41301d3..4fa67a2dbb028 100644 --- a/consensus/src/experimental/persisting_phase.rs +++ b/consensus/src/experimental/persisting_phase.rs @@ -7,7 +7,7 @@ use crate::{ state_replication::{StateComputer, StateComputerCommitCallBackType}, }; use aptos_consensus_types::executed_block::ExecutedBlock; -use aptos_executor_types::Error; +use aptos_executor_types::ExecutorResult; use aptos_types::ledger_info::LedgerInfoWithSignatures; use async_trait::async_trait; use std::{ @@ -42,7 +42,7 @@ impl Display for PersistingRequest { } } -pub type PersistingResponse = Result<(), Error>; +pub type PersistingResponse = ExecutorResult<()>; pub struct PersistingPhase { persisting_handle: Arc, diff --git a/consensus/src/experimental/signing_phase.rs b/consensus/src/experimental/signing_phase.rs index 55411b0d2685b..b6d1b407a13bd 100644 --- a/consensus/src/experimental/signing_phase.rs +++ b/consensus/src/experimental/signing_phase.rs @@ -2,12 +2,9 @@ // Parts of the project are originally copyright © Meta Platforms, Inc. // SPDX-License-Identifier: Apache-2.0 -use crate::{ - experimental::pipeline_phase::StatelessPipeline, metrics_safety_rules::MetricsSafetyRules, -}; +use crate::experimental::pipeline_phase::StatelessPipeline; use aptos_crypto::bls12381; -use aptos_infallible::Mutex; -use aptos_safety_rules::{Error, TSafetyRules}; +use aptos_safety_rules::Error; use aptos_types::ledger_info::{LedgerInfo, LedgerInfoWithSignatures}; use async_trait::async_trait; use std::{ @@ -41,17 +38,25 @@ impl Display for SigningRequest { } } +pub trait CommitSignerProvider: Send + Sync { + fn sign_commit_vote( + &self, + ledger_info: LedgerInfoWithSignatures, + new_ledger_info: LedgerInfo, + ) -> Result; +} + pub struct SigningResponse { pub signature_result: Result, pub commit_ledger_info: LedgerInfo, } pub struct SigningPhase { - safety_rule_handle: Arc>, + safety_rule_handle: Arc, } impl SigningPhase { - pub fn new(safety_rule_handle: Arc>) -> Self { + pub fn new(safety_rule_handle: Arc) -> Self { Self { safety_rule_handle } } } @@ -72,7 +77,6 @@ impl StatelessPipeline for SigningPhase { SigningResponse { signature_result: self .safety_rule_handle - .lock() .sign_commit_vote(ordered_ledger_info, commit_ledger_info.clone()), commit_ledger_info, } diff --git a/consensus/src/experimental/tests/buffer_manager_tests.rs b/consensus/src/experimental/tests/buffer_manager_tests.rs index 80448560f5aca..1dc438b72f133 100644 --- a/consensus/src/experimental/tests/buffer_manager_tests.rs +++ b/consensus/src/experimental/tests/buffer_manager_tests.rs @@ -8,7 +8,8 @@ use crate::{ create_channel, BufferManager, OrderedBlocks, Receiver, ResetAck, ResetRequest, Sender, }, decoupled_execution_utils::prepare_phases_and_buffer_manager, - execution_phase::ExecutionPhase, + execution_schedule_phase::ExecutionSchedulePhase, + execution_wait_phase::ExecutionWaitPhase, ordering_state_computer::OrderingStateComputer, persisting_phase::PersistingPhase, pipeline_phase::PipelinePhase, @@ -60,7 +61,8 @@ pub fn prepare_buffer_manager() -> ( Sender, aptos_channel::Sender, aptos_channels::Receiver>, - PipelinePhase, + PipelinePhase, + PipelinePhase, PipelinePhase, PipelinePhase, HashValue, @@ -137,7 +139,8 @@ pub fn prepare_buffer_manager() -> ( let hash_val = mocked_execution_proxy.get_root_hash(); let ( - execution_phase_pipeline, + execution_schedule_phase_pipeline, + execution_wait_phase_pipeline, signing_phase_pipeline, persisting_phase_pipeline, buffer_manager, @@ -159,7 +162,8 @@ pub fn prepare_buffer_manager() -> ( buffer_reset_tx, msg_tx, // channel to pass commit messages into the buffer manager self_loop_rx, // channel to receive message from the buffer manager itself - execution_phase_pipeline, + execution_schedule_phase_pipeline, + execution_wait_phase_pipeline, signing_phase_pipeline, persisting_phase_pipeline, hash_val, @@ -188,7 +192,8 @@ pub fn launch_buffer_manager() -> ( reset_tx, msg_tx, // channel to pass commit messages into the buffer manager self_loop_rx, // channel to receive message from the buffer manager itself - execution_phase_pipeline, + execution_schedule_phase_pipeline, + execution_wait_phase_pipeline, signing_phase_pipeline, persisting_phase_pipeline, hash_val, @@ -197,7 +202,8 @@ pub fn launch_buffer_manager() -> ( validators, ) = prepare_buffer_manager(); - runtime.spawn(execution_phase_pipeline.start()); + runtime.spawn(execution_schedule_phase_pipeline.start()); + runtime.spawn(execution_wait_phase_pipeline.start()); runtime.spawn(signing_phase_pipeline.start()); runtime.spawn(persisting_phase_pipeline.start()); runtime.spawn(buffer_manager.start()); diff --git a/consensus/src/experimental/tests/execution_phase_tests.rs b/consensus/src/experimental/tests/execution_phase_tests.rs index 83ca21dd72e0d..fc51867853538 100644 --- a/consensus/src/experimental/tests/execution_phase_tests.rs +++ b/consensus/src/experimental/tests/execution_phase_tests.rs @@ -5,10 +5,12 @@ use crate::{ experimental::{ buffer_manager::create_channel, - execution_phase::{ExecutionPhase, ExecutionRequest, ExecutionResponse}, - pipeline_phase::{CountedRequest, PipelinePhase}, + execution_schedule_phase::{ExecutionRequest, ExecutionSchedulePhase}, + execution_wait_phase::{ExecutionResponse, ExecutionWaitPhase}, + pipeline_phase::{CountedRequest, PipelinePhase, StatelessPipeline}, tests::phase_tester::PhaseTester, }, + state_replication::StateComputer, test_utils::{consensus_runtime, RandomComputeResultStateComputer}, }; use aptos_consensus_types::{ @@ -18,19 +20,52 @@ use aptos_consensus_types::{ quorum_cert::QuorumCert, }; use aptos_crypto::HashValue; -use aptos_executor_types::{Error, StateComputeResult}; +use aptos_executor_types::{ExecutorError, StateComputeResult}; use aptos_types::{ledger_info::LedgerInfo, validator_verifier::random_validator_verifier}; +use async_trait::async_trait; use std::sync::Arc; -pub fn prepare_execution_phase() -> (HashValue, ExecutionPhase) { +// ExecutionSchedulePhase and ExecutionWaitPhase chained together. +// In BufferManager they are chained through the main loop. +pub struct ExecutionPhaseForTest { + schedule_phase: ExecutionSchedulePhase, + wait_phase: ExecutionWaitPhase, +} + +impl ExecutionPhaseForTest { + pub fn new(execution_proxy: Arc) -> Self { + let schedule_phase = ExecutionSchedulePhase::new(execution_proxy); + let wait_phase = ExecutionWaitPhase; + Self { + schedule_phase, + wait_phase, + } + } +} + +#[async_trait] +impl StatelessPipeline for ExecutionPhaseForTest { + type Request = ExecutionRequest; + type Response = ExecutionResponse; + + const NAME: &'static str = "execution"; + + async fn process(&self, req: ExecutionRequest) -> ExecutionResponse { + let wait_req = self.schedule_phase.process(req).await; + self.wait_phase.process(wait_req).await + } +} + +pub fn prepare_execution_phase() -> (HashValue, ExecutionPhaseForTest) { let execution_proxy = Arc::new(RandomComputeResultStateComputer::new()); let random_hash_value = execution_proxy.get_root_hash(); - let execution_phase = ExecutionPhase::new(execution_proxy); + let execution_phase = ExecutionPhaseForTest::new(execution_proxy); + (random_hash_value, execution_phase) } fn add_execution_phase_test_cases( - phase_tester: &mut PhaseTester, + phase_tester: &mut PhaseTester, random_hash_value: HashValue, ) { let genesis_qc = certificate_for_genesis(); @@ -63,7 +98,7 @@ fn add_execution_phase_test_cases( ExecutionRequest { ordered_blocks: vec![], }, - Box::new(move |resp| assert!(matches!(resp.inner, Err(Error::EmptyBlocks)))), + Box::new(move |resp| assert!(matches!(resp.inner, Err(ExecutorError::EmptyBlocks)))), ); // bad parent id @@ -80,7 +115,7 @@ fn add_execution_phase_test_cases( StateComputeResult::new_dummy(), )], }, - Box::new(move |resp| assert!(matches!(resp.inner, Err(Error::BlockNotFound(_))))), + Box::new(move |resp| assert!(matches!(resp.inner, Err(ExecutorError::BlockNotFound(_))))), ); } @@ -90,7 +125,7 @@ fn execution_phase_tests() { // unit tests let (random_hash_value, execution_phase) = prepare_execution_phase(); - let mut unit_phase_tester = PhaseTester::::new(); + let mut unit_phase_tester = PhaseTester::::new(); add_execution_phase_test_cases(&mut unit_phase_tester, random_hash_value); unit_phase_tester.unit_test(&execution_phase); @@ -106,7 +141,7 @@ fn execution_phase_tests() { runtime.spawn(execution_phase_pipeline.start()); - let mut e2e_phase_tester = PhaseTester::::new(); + let mut e2e_phase_tester = PhaseTester::::new(); add_execution_phase_test_cases(&mut e2e_phase_tester, random_hash_value); e2e_phase_tester.e2e_test(in_channel_tx, out_channel_rx); } diff --git a/consensus/src/lib.rs b/consensus/src/lib.rs index 51c0ccc45610d..6ae35b7c7e486 100644 --- a/consensus/src/lib.rs +++ b/consensus/src/lib.rs @@ -46,6 +46,7 @@ mod util; pub mod consensus_provider; /// Required by the telemetry service pub mod counters; +mod execution_pipeline; /// AptosNet interface. pub mod network_interface; mod payload_manager; diff --git a/consensus/src/metrics_safety_rules.rs b/consensus/src/metrics_safety_rules.rs index 8692822c17a0e..6f000c97f60e4 100644 --- a/consensus/src/metrics_safety_rules.rs +++ b/consensus/src/metrics_safety_rules.rs @@ -2,7 +2,10 @@ // Parts of the project are originally copyright © Meta Platforms, Inc. // SPDX-License-Identifier: Apache-2.0 -use crate::{monitor, persistent_liveness_storage::PersistentLivenessStorage}; +use crate::{ + experimental::signing_phase::CommitSignerProvider, monitor, + persistent_liveness_storage::PersistentLivenessStorage, +}; use aptos_consensus_types::{ block_data::BlockData, timeout_2chain::{TwoChainTimeout, TwoChainTimeoutCertificate}, @@ -10,6 +13,7 @@ use aptos_consensus_types::{ vote_proposal::VoteProposal, }; use aptos_crypto::bls12381; +use aptos_infallible::Mutex; use aptos_logger::prelude::info; use aptos_safety_rules::{ConsensusState, Error, TSafetyRules}; use aptos_types::{ @@ -133,6 +137,16 @@ impl TSafetyRules for MetricsSafetyRules { } } +impl CommitSignerProvider for Mutex { + fn sign_commit_vote( + &self, + ledger_info: LedgerInfoWithSignatures, + new_ledger_info: LedgerInfo, + ) -> Result { + self.lock().sign_commit_vote(ledger_info, new_ledger_info) + } +} + #[cfg(test)] mod tests { use crate::{metrics_safety_rules::MetricsSafetyRules, test_utils::EmptyStorage}; diff --git a/consensus/src/network.rs b/consensus/src/network.rs index 249829b0c312f..c9bc7a2dba888 100644 --- a/consensus/src/network.rs +++ b/consensus/src/network.rs @@ -5,7 +5,7 @@ use crate::{ block_storage::tracing::{observe_block, BlockStage}, counters, - dag::{DAGMessage, DAGNetworkMessage, RpcWithFallback, TDAGNetworkSender}, + dag::{DAGMessage, DAGNetworkMessage, ProofNotifier, RpcWithFallback, TDAGNetworkSender}, experimental::commit_reliable_broadcast::CommitMessage, logging::LogEvent, monitor, @@ -30,7 +30,7 @@ use aptos_network::{ protocols::{network::Event, rpc::error::RpcError}, ProtocolId, }; -use aptos_reliable_broadcast::{RBMessage, RBNetworkSender}; +use aptos_reliable_broadcast::RBNetworkSender; use aptos_types::{ account_address::AccountAddress, epoch_change::EpochChangeProof, ledger_info::LedgerInfoWithSignatures, validator_verifier::ValidatorVerifier, @@ -154,6 +154,7 @@ pub struct NetworkSender { // (self sending is not supported by the networking API). self_sender: aptos_channels::Sender>, validators: ValidatorVerifier, + time_service: aptos_time_service::TimeService, } impl NetworkSender { @@ -168,6 +169,7 @@ impl NetworkSender { consensus_network_client, self_sender, validators, + time_service: aptos_time_service::TimeService::real(), } } @@ -441,34 +443,15 @@ impl QuorumStoreSender for NetworkSender { } } -// TODO: this can be improved -#[derive(Clone)] -pub struct DAGNetworkSenderImpl { - sender: Arc, - time_service: aptos_time_service::TimeService, -} - -impl DAGNetworkSenderImpl { - #[allow(unused)] - pub fn new(sender: Arc) -> Self { - Self { - sender, - time_service: aptos_time_service::TimeService::real(), - } - } -} - #[async_trait] -impl TDAGNetworkSender for DAGNetworkSenderImpl { +impl TDAGNetworkSender for NetworkSender { async fn send_rpc( &self, receiver: Author, message: DAGMessage, timeout: Duration, ) -> anyhow::Result { - self.sender - .consensus_network_client - .send_rpc(receiver, message.into_network_message(), timeout) + self.send_rpc(receiver, message.into_network_message(), timeout) .await .map_err(|e| anyhow!("invalid rpc response: {}", e)) .and_then(TConsensusMsg::from_network_message) @@ -477,41 +460,46 @@ impl TDAGNetworkSender for DAGNetworkSenderImpl { /// Given a list of potential responders, sending rpc to get response from any of them and could /// fallback to more in case of failures. async fn send_rpc_with_fallbacks( - &self, + self: Arc, responders: Vec, message: DAGMessage, retry_interval: Duration, rpc_timeout: Duration, ) -> RpcWithFallback { - let sender = Arc::new(self.clone()); RpcWithFallback::new( responders, message, retry_interval, rpc_timeout, - sender, + self.clone(), self.time_service.clone(), ) } } #[async_trait] -impl RBNetworkSender for DAGNetworkSenderImpl -where - M: RBMessage + TConsensusMsg + 'static, -{ +impl RBNetworkSender for NetworkSender { async fn send_rb_rpc( &self, receiver: Author, - message: M, + message: DAGMessage, timeout: Duration, - ) -> anyhow::Result { - self.sender - .consensus_network_client - .send_rpc(receiver, message.into_network_message(), timeout) + ) -> anyhow::Result { + self.send_rpc(receiver, message.into_network_message(), timeout) .await .map_err(|e| anyhow!("invalid rpc response: {}", e)) - .and_then(|msg| TConsensusMsg::from_network_message(msg)) + .and_then(TConsensusMsg::from_network_message) + } +} + +#[async_trait] +impl ProofNotifier for NetworkSender { + async fn send_epoch_change(&self, proof: EpochChangeProof) { + self.send_epoch_change(proof).await + } + + async fn send_commit_proof(&self, ledger_info: LedgerInfoWithSignatures) { + self.send_commit_proof(ledger_info).await } } diff --git a/consensus/src/payload_manager.rs b/consensus/src/payload_manager.rs index 52eae77b2fe74..935bc55af3e8e 100644 --- a/consensus/src/payload_manager.rs +++ b/consensus/src/payload_manager.rs @@ -15,7 +15,7 @@ use aptos_consensus_types::{ proof_of_store::ProofOfStore, }; use aptos_crypto::HashValue; -use aptos_executor_types::{Error::DataNotFound, *}; +use aptos_executor_types::{ExecutorError::DataNotFound, *}; use aptos_logger::prelude::*; use aptos_types::transaction::SignedTransaction; use futures::{channel::mpsc::Sender, SinkExt}; @@ -36,7 +36,7 @@ impl PayloadManager { batch_store: &BatchStore, ) -> Vec<( HashValue, - oneshot::Receiver, aptos_executor_types::Error>>, + oneshot::Receiver>>, )> { let mut receivers = Vec::new(); for pos in proofs { @@ -126,7 +126,7 @@ impl PayloadManager { /// Extract transaction from a given block /// Assumes it is never called for the same block concurrently. Otherwise status can be None. - pub async fn get_transactions(&self, block: &Block) -> Result, Error> { + pub async fn get_transactions(&self, block: &Block) -> ExecutorResult> { let payload = match block.payload() { Some(p) => p, None => return Ok(Vec::new()), diff --git a/consensus/src/persistent_liveness_storage.rs b/consensus/src/persistent_liveness_storage.rs index 84bfeb0e4455f..d00f86852d473 100644 --- a/consensus/src/persistent_liveness_storage.rs +++ b/consensus/src/persistent_liveness_storage.rs @@ -51,6 +51,9 @@ pub trait PersistentLivenessStorage: Send + Sync { /// Returns a handle of the aptosdb. fn aptos_db(&self) -> Arc; + + // Returns a handle of the consensus db + fn consensus_db(&self) -> Arc; } #[derive(Clone)] @@ -444,4 +447,8 @@ impl PersistentLivenessStorage for StorageWriteProxy { fn aptos_db(&self) -> Arc { self.aptos_db.clone() } + + fn consensus_db(&self) -> Arc { + self.db.clone() + } } diff --git a/consensus/src/quorum_store/batch_requester.rs b/consensus/src/quorum_store/batch_requester.rs index d83ea7eab6e13..11b36a7f71cd4 100644 --- a/consensus/src/quorum_store/batch_requester.rs +++ b/consensus/src/quorum_store/batch_requester.rs @@ -18,7 +18,7 @@ use tokio::{sync::oneshot, time}; struct BatchRequesterState { signers: Vec, next_index: usize, - ret_tx: oneshot::Sender, aptos_executor_types::Error>>, + ret_tx: oneshot::Sender>>, num_retries: usize, retry_limit: usize, } @@ -26,7 +26,7 @@ struct BatchRequesterState { impl BatchRequesterState { fn new( signers: Vec, - ret_tx: oneshot::Sender, aptos_executor_types::Error>>, + ret_tx: oneshot::Sender>>, retry_limit: usize, ) -> Self { Self { @@ -81,7 +81,11 @@ impl BatchRequesterState { } else { counters::RECEIVED_BATCH_REQUEST_TIMEOUT_COUNT.inc(); debug!("QS: batch timed out, digest {}", digest); - if self.ret_tx.send(Err(Error::CouldNotGetData)).is_err() { + if self + .ret_tx + .send(Err(ExecutorError::CouldNotGetData)) + .is_err() + { debug!( "Receiver of requested batch not available for timed out digest {}", digest @@ -126,7 +130,7 @@ impl BatchRequester { &self, digest: HashValue, signers: Vec, - ret_tx: oneshot::Sender, Error>>, + ret_tx: oneshot::Sender>>, ) { let mut request_state = BatchRequesterState::new(signers, ret_tx, self.retry_limit); let network_sender = self.network_sender.clone(); diff --git a/consensus/src/quorum_store/batch_store.rs b/consensus/src/quorum_store/batch_store.rs index 90383181e717e..ad841076a7b81 100644 --- a/consensus/src/quorum_store/batch_store.rs +++ b/consensus/src/quorum_store/batch_store.rs @@ -14,7 +14,7 @@ use crate::{ use anyhow::bail; use aptos_consensus_types::proof_of_store::{ProofOfStore, SignedBatchInfo}; use aptos_crypto::HashValue; -use aptos_executor_types::Error; +use aptos_executor_types::{ExecutorError, ExecutorResult}; use aptos_logger::prelude::*; use aptos_types::{ transaction::SignedTransaction, validator_signer::ValidatorSigner, @@ -362,19 +362,22 @@ impl BatchStore { self.last_certified_time.load(Ordering::Relaxed) } - fn get_batch_from_db(&self, digest: &HashValue) -> Result { + fn get_batch_from_db(&self, digest: &HashValue) -> ExecutorResult { counters::GET_BATCH_FROM_DB_COUNT.inc(); match self.db.get_batch(digest) { Ok(Some(value)) => Ok(value), Ok(None) | Err(_) => { error!("Could not get batch from db"); - Err(Error::CouldNotGetData) + Err(ExecutorError::CouldNotGetData) }, } } - pub(crate) fn get_batch_from_local(&self, digest: &HashValue) -> Result { + pub(crate) fn get_batch_from_local( + &self, + digest: &HashValue, + ) -> ExecutorResult { if let Some(value) = self.db_cache.get(digest) { if value.payload_storage_mode() == StorageMode::PersistedOnly { self.get_batch_from_db(digest) @@ -383,7 +386,7 @@ impl BatchStore { Ok(value.clone()) } } else { - Err(Error::CouldNotGetData) + Err(ExecutorError::CouldNotGetData) } } } @@ -395,7 +398,7 @@ pub trait BatchReader: Send + Sync { fn get_batch( &self, proof: ProofOfStore, - ) -> oneshot::Receiver, Error>>; + ) -> oneshot::Receiver>>; } impl BatchReader for BatchStore { @@ -406,7 +409,7 @@ impl BatchReader for Batch fn get_batch( &self, proof: ProofOfStore, - ) -> oneshot::Receiver, Error>> { + ) -> oneshot::Receiver>> { let (tx, rx) = oneshot::channel(); if let Ok(mut value) = self.get_batch_from_local(proof.digest()) { diff --git a/consensus/src/quorum_store/tests/proof_coordinator_test.rs b/consensus/src/quorum_store/tests/proof_coordinator_test.rs index acfb1d035a999..fb9445416327e 100644 --- a/consensus/src/quorum_store/tests/proof_coordinator_test.rs +++ b/consensus/src/quorum_store/tests/proof_coordinator_test.rs @@ -15,7 +15,7 @@ use aptos_consensus_types::proof_of_store::{ BatchId, ProofOfStore, SignedBatchInfo, SignedBatchInfoMsg, }; use aptos_crypto::HashValue; -use aptos_executor_types::Error; +use aptos_executor_types::ExecutorResult; use aptos_types::{ transaction::SignedTransaction, validator_verifier::random_validator_verifier, PeerId, }; @@ -31,7 +31,7 @@ impl BatchReader for MockBatchReader { Some(self.peer) } - fn get_batch(&self, _proof: ProofOfStore) -> Receiver, Error>> { + fn get_batch(&self, _proof: ProofOfStore) -> Receiver>> { unimplemented!(); } } diff --git a/consensus/src/state_computer.rs b/consensus/src/state_computer.rs index 4653c319adb9c..fa9deb07b0252 100644 --- a/consensus/src/state_computer.rs +++ b/consensus/src/state_computer.rs @@ -6,6 +6,7 @@ use crate::{ block_storage::tracing::{observe_block, BlockStage}, counters, error::StateSyncError, + execution_pipeline::ExecutionPipeline, monitor, payload_manager::PayloadManager, state_replication::{StateComputer, StateComputerCommitCallBackType}, @@ -17,7 +18,7 @@ use anyhow::Result; use aptos_consensus_notifications::ConsensusNotificationSender; use aptos_consensus_types::{block::Block, common::Round, executed_block::ExecutedBlock}; use aptos_crypto::HashValue; -use aptos_executor_types::{BlockExecutorTrait, Error as ExecutionError, StateComputeResult}; +use aptos_executor_types::{BlockExecutorTrait, ExecutorResult, StateComputeResult}; use aptos_infallible::Mutex; use aptos_logger::prelude::*; use aptos_types::{ @@ -25,10 +26,12 @@ use aptos_types::{ ledger_info::LedgerInfoWithSignatures, transaction::Transaction, }; use fail::fail_point; -use futures::{SinkExt, StreamExt}; +use futures::{future::BoxFuture, SinkExt, StreamExt}; use std::{boxed::Box, sync::Arc}; use tokio::sync::Mutex as AsyncMutex; +pub type StateComputeResultFut = BoxFuture<'static, ExecutorResult>; + type NotificationType = ( Box, Vec, @@ -60,6 +63,7 @@ pub struct ExecutionProxy { transaction_shuffler: Mutex>>, maybe_block_gas_limit: Mutex>, transaction_deduper: Mutex>>, + execution_pipeline: ExecutionPipeline, } impl ExecutionProxy { @@ -84,6 +88,7 @@ impl ExecutionProxy { callback(); } }); + let execution_pipeline = ExecutionPipeline::spawn(executor.clone(), handle); Self { executor, txn_notifier, @@ -95,6 +100,7 @@ impl ExecutionProxy { transaction_shuffler: Mutex::new(None), maybe_block_gas_limit: Mutex::new(None), transaction_deduper: Mutex::new(None), + execution_pipeline, } } } @@ -102,18 +108,13 @@ impl ExecutionProxy { // TODO: filter duplicated transaction before executing #[async_trait::async_trait] impl StateComputer for ExecutionProxy { - async fn compute( + async fn schedule_compute( &self, // The block to be executed. block: &Block, // The parent block id. parent_block_id: HashValue, - ) -> Result { - fail_point!("consensus::compute", |_| { - Err(ExecutionError::InternalError { - error: "Injected error in compute".into(), - }) - }); + ) -> StateComputeResultFut { let block_id = block.id(); debug!( block = %block, @@ -124,48 +125,54 @@ impl StateComputer for ExecutionProxy { let payload_manager = self.payload_manager.lock().as_ref().unwrap().clone(); let txn_deduper = self.transaction_deduper.lock().as_ref().unwrap().clone(); let txn_shuffler = self.transaction_shuffler.lock().as_ref().unwrap().clone(); - let txns = payload_manager.get_transactions(block).await?; + let txn_notifier = self.txn_notifier.clone(); + let txns = match payload_manager.get_transactions(block).await { + Ok(txns) => txns, + Err(err) => return Box::pin(async move { Err(err) }), + }; let deduped_txns = txn_deduper.dedup(txns); let shuffled_txns = txn_shuffler.shuffle(deduped_txns); - let block_gas_limit = *self.maybe_block_gas_limit.lock(); + let maybe_block_gas_limit = *self.maybe_block_gas_limit.lock(); // TODO: figure out error handling for the prologue txn - let executor = self.executor.clone(); - + let timestamp = block.timestamp_usecs(); let transactions_to_execute = block.transactions_to_execute( &self.validators.lock(), shuffled_txns.clone(), - block_gas_limit, + maybe_block_gas_limit, ); - let compute_result = monitor!( - "execute_block", - tokio::task::spawn_blocking(move || { - executor.execute_block( - (block_id, transactions_to_execute).into(), - parent_block_id, - block_gas_limit, - ) - }) - .await - ) - .expect("spawn_blocking failed")?; - - observe_block(block.timestamp_usecs(), BlockStage::EXECUTED); + let fut = self + .execution_pipeline + .queue( + (block_id, transactions_to_execute).into(), + parent_block_id, + maybe_block_gas_limit, + ) + .await; - // notify mempool about failed transaction - if let Err(e) = self - .txn_notifier - .notify_failed_txn(shuffled_txns, &compute_result) - .await - { - error!( - error = ?e, "Failed to notify mempool of rejected txns", + Box::pin(async move { + debug!( + block_id = block_id, + "Got state compute result, post processing." ); - } - Ok(compute_result) + let compute_result = fut.await?; + observe_block(timestamp, BlockStage::EXECUTED); + + // notify mempool about failed transaction + if let Err(e) = txn_notifier + .notify_failed_txn(shuffled_txns, &compute_result) + .await + { + error!( + error = ?e, "Failed to notify mempool of rejected txns", + ); + } + + Ok(compute_result) + }) } /// Send a successful commit. A future is fulfilled when the state is finalized. @@ -174,7 +181,7 @@ impl StateComputer for ExecutionProxy { blocks: &[Arc], finality_proof: LedgerInfoWithSignatures, callback: StateComputerCommitCallBackType, - ) -> Result<(), ExecutionError> { + ) -> ExecutorResult<()> { let mut latest_logical_time = self.write_mutex.lock().await; let mut block_ids = Vec::new(); @@ -359,7 +366,7 @@ async fn test_commit_sync_race() { _block: ExecutableBlock, _parent_block_id: HashValue, _maybe_block_gas_limit: Option, - ) -> Result { + ) -> ExecutorResult { Ok(StateComputeResult::new_dummy()) } @@ -368,7 +375,7 @@ async fn test_commit_sync_race() { _block: ExecutableBlock, _parent_block_id: HashValue, _maybe_block_gas_limit: Option, - ) -> Result { + ) -> ExecutorResult { todo!() } @@ -377,7 +384,7 @@ async fn test_commit_sync_race() { _block_id: HashValue, _parent_block_id: HashValue, _state_checkpoint_output: StateCheckpointOutput, - ) -> Result { + ) -> ExecutorResult { todo!() } @@ -386,7 +393,7 @@ async fn test_commit_sync_race() { _block_ids: Vec, ledger_info_with_sigs: LedgerInfoWithSignatures, _save_state_snapshots: bool, - ) -> Result<(), ExecutionError> { + ) -> ExecutorResult<()> { *self.time.lock() = LogicalTime::new( ledger_info_with_sigs.ledger_info().epoch(), ledger_info_with_sigs.ledger_info().round(), diff --git a/consensus/src/state_replication.rs b/consensus/src/state_replication.rs index 55ea3400f8576..08f20d7fbfa10 100644 --- a/consensus/src/state_replication.rs +++ b/consensus/src/state_replication.rs @@ -5,6 +5,7 @@ use crate::{ error::{QuorumStoreError, StateSyncError}, payload_manager::PayloadManager, + state_computer::StateComputeResultFut, transaction_deduper::TransactionDeduper, transaction_shuffler::TransactionShuffler, }; @@ -15,7 +16,7 @@ use aptos_consensus_types::{ executed_block::ExecutedBlock, }; use aptos_crypto::HashValue; -use aptos_executor_types::{Error as ExecutionError, StateComputeResult}; +use aptos_executor_types::{ExecutorResult, StateComputeResult}; use aptos_types::{epoch_state::EpochState, ledger_info::LedgerInfoWithSignatures}; use futures::future::BoxFuture; use std::{sync::Arc, time::Duration}; @@ -56,7 +57,19 @@ pub trait StateComputer: Send + Sync { block: &Block, // The parent block root hash. parent_block_id: HashValue, - ) -> Result; + ) -> ExecutorResult { + self.schedule_compute(block, parent_block_id).await.await + } + + async fn schedule_compute( + &self, + // The block that will be computed. + _block: &Block, + // The parent block root hash. + _parent_block_id: HashValue, + ) -> StateComputeResultFut { + unimplemented!("This state computer does not support scheduling"); + } /// Send a successful commit. A future is fulfilled when the state is finalized. async fn commit( @@ -64,7 +77,7 @@ pub trait StateComputer: Send + Sync { blocks: &[Arc], finality_proof: LedgerInfoWithSignatures, callback: StateComputerCommitCallBackType, - ) -> Result<(), ExecutionError>; + ) -> ExecutorResult<()>; /// Best effort state synchronization to the given target LedgerInfo. /// In case of success (`Result::Ok`) the LI of storage is at the given target. diff --git a/consensus/src/test_utils/mock_state_computer.rs b/consensus/src/test_utils/mock_state_computer.rs index b03e90f60d7b7..d95f374513a06 100644 --- a/consensus/src/test_utils/mock_state_computer.rs +++ b/consensus/src/test_utils/mock_state_computer.rs @@ -6,6 +6,7 @@ use crate::{ error::StateSyncError, experimental::buffer_manager::OrderedBlocks, payload_manager::PayloadManager, + state_computer::StateComputeResultFut, state_replication::{StateComputer, StateComputerCommitCallBackType}, test_utils::mock_storage::MockStorage, transaction_deduper::TransactionDeduper, @@ -14,7 +15,7 @@ use crate::{ use anyhow::{format_err, Result}; use aptos_consensus_types::{block::Block, common::Payload, executed_block::ExecutedBlock}; use aptos_crypto::HashValue; -use aptos_executor_types::{Error, StateComputeResult}; +use aptos_executor_types::{ExecutorError, ExecutorResult, StateComputeResult}; use aptos_infallible::Mutex; use aptos_logger::prelude::*; use aptos_types::{ @@ -47,7 +48,7 @@ impl MockStateComputer { } } - pub async fn commit_to_storage(&self, blocks: OrderedBlocks) -> Result<(), Error> { + pub async fn commit_to_storage(&self, blocks: OrderedBlocks) -> ExecutorResult<()> { let OrderedBlocks { ordered_blocks, ordered_proof, @@ -84,7 +85,7 @@ impl StateComputer for MockStateComputer { &self, block: &Block, _parent_block_id: HashValue, - ) -> Result { + ) -> ExecutorResult { self.block_cache.lock().insert( block.id(), block.payload().unwrap_or(&Payload::empty(false)).clone(), @@ -98,7 +99,7 @@ impl StateComputer for MockStateComputer { blocks: &[Arc], finality_proof: LedgerInfoWithSignatures, callback: StateComputerCommitCallBackType, - ) -> Result<(), Error> { + ) -> ExecutorResult<()> { assert!(!blocks.is_empty()); info!( "MockStateComputer commit put on queue {:?}", @@ -155,7 +156,7 @@ impl StateComputer for EmptyStateComputer { &self, _block: &Block, _parent_block_id: HashValue, - ) -> Result { + ) -> ExecutorResult { Ok(StateComputeResult::new_dummy()) } @@ -164,7 +165,7 @@ impl StateComputer for EmptyStateComputer { _blocks: &[Arc], _commit: LedgerInfoWithSignatures, _call_back: StateComputerCommitCallBackType, - ) -> Result<(), Error> { + ) -> ExecutorResult<()> { Ok(()) } @@ -206,19 +207,20 @@ impl RandomComputeResultStateComputer { #[async_trait::async_trait] impl StateComputer for RandomComputeResultStateComputer { - async fn compute( + async fn schedule_compute( &self, _block: &Block, parent_block_id: HashValue, - ) -> Result { + ) -> StateComputeResultFut { // trapdoor for Execution Error - if parent_block_id == self.random_compute_result_root_hash { - Err(Error::BlockNotFound(parent_block_id)) + let res = if parent_block_id == self.random_compute_result_root_hash { + Err(ExecutorError::BlockNotFound(parent_block_id)) } else { Ok(StateComputeResult::new_dummy_with_root_hash( self.random_compute_result_root_hash, )) - } + }; + Box::pin(async move { res }) } async fn commit( @@ -226,7 +228,7 @@ impl StateComputer for RandomComputeResultStateComputer { _blocks: &[Arc], _commit: LedgerInfoWithSignatures, _call_back: StateComputerCommitCallBackType, - ) -> Result<(), Error> { + ) -> ExecutorResult<()> { Ok(()) } diff --git a/consensus/src/test_utils/mock_storage.rs b/consensus/src/test_utils/mock_storage.rs index 871aab4bf388c..8f3e67c3ccffb 100644 --- a/consensus/src/test_utils/mock_storage.rs +++ b/consensus/src/test_utils/mock_storage.rs @@ -233,6 +233,10 @@ impl PersistentLivenessStorage for MockStorage { fn aptos_db(&self) -> Arc { unimplemented!() } + + fn consensus_db(&self) -> Arc { + unimplemented!() + } } /// A storage that ignores any requests, used in the tests that don't care about the storage. @@ -301,4 +305,8 @@ impl PersistentLivenessStorage for EmptyStorage { fn aptos_db(&self) -> Arc { unimplemented!() } + + fn consensus_db(&self) -> Arc { + unimplemented!() + } } diff --git a/crates/aptos-faucet/core/src/server/run.rs b/crates/aptos-faucet/core/src/server/run.rs index 6e1256ba75e10..54df2c8def77d 100644 --- a/crates/aptos-faucet/core/src/server/run.rs +++ b/crates/aptos-faucet/core/src/server/run.rs @@ -825,7 +825,7 @@ mod test { // Assert that the account exists now with the expected balance. let response = aptos_node_api_client - .get_account_balance(AccountAddress::from_hex(fund_request.address.unwrap()).unwrap()) + .get_account_balance(AccountAddress::from_str(&fund_request.address.unwrap()).unwrap()) .await?; assert_eq!(response.into_inner().get(), 10); @@ -883,7 +883,7 @@ mod test { // Assert that the account exists now with the expected balance. let response = aptos_node_api_client - .get_account_balance(AccountAddress::from_hex(fund_request.address.unwrap()).unwrap()) + .get_account_balance(AccountAddress::from_str(&fund_request.address.unwrap()).unwrap()) .await?; assert_eq!(response.into_inner().get(), 10); @@ -931,7 +931,7 @@ mod test { // Confirm that the account was given the full 1000 OCTA as requested. let response = aptos_node_api_client - .get_account_balance(AccountAddress::from_hex(fund_request.address.unwrap()).unwrap()) + .get_account_balance(AccountAddress::from_str(&fund_request.address.unwrap()).unwrap()) .await?; assert_eq!(response.into_inner().get(), 1000); @@ -949,7 +949,7 @@ mod test { // Confirm that the account was only given 100 OCTA (maximum_amount), not 1000. let response = aptos_node_api_client - .get_account_balance(AccountAddress::from_hex(fund_request.address.unwrap()).unwrap()) + .get_account_balance(AccountAddress::from_str(&fund_request.address.unwrap()).unwrap()) .await?; assert_eq!(response.into_inner().get(), 100); diff --git a/crates/aptos-faucet/integration-tests/common.py b/crates/aptos-faucet/integration-tests/common.py index 1102a6f9ab7dd..a0c2b61f0e266 100644 --- a/crates/aptos-faucet/integration-tests/common.py +++ b/crates/aptos-faucet/integration-tests/common.py @@ -1,18 +1,63 @@ # Copyright © Aptos Foundation # SPDX-License-Identifier: Apache-2.0 -from enum import Enum +from dataclasses import dataclass +from typing import Optional NODE_PORT = 8080 -class Network(Enum): - DEVNET = "devnet" - TESTNET = "testnet" +DEVNET = "devnet" +TESTNET = "testnet" +CUSTOM = "custom" + +@dataclass +class Network: + def __str__(self) -> str: + raise NotImplementedError() + + +class DevnetNetwork(Network): + def __str__(self) -> str: + return DEVNET + + def tag(self) -> str: + return str(self) + + +class TestnetNetwork(Network): def __str__(self): - return self.value + return TESTNET + + def tag(self) -> str: + return str(self) + + +class CustomNetwork(Network): + def __init__(self, tag: str): + self._tag = tag + + def __str__(self) -> str: + return self._tag + + def tag(self) -> str: + return self._tag + + +VALID_NETWORK_OPTIONS = [DEVNET, TESTNET, CUSTOM] + + +def network_from_str(str: str, tag: Optional[str]) -> Network: + if str == DEVNET: + return DevnetNetwork() + elif str == TESTNET: + return TestnetNetwork() + else: + if not tag: + raise ValueError("--tag must be provided for custom network") + return CustomNetwork(tag) -def build_image_name(image_repo_with_project: str, tag: str): - return f"{image_repo_with_project}/tools:{tag}" +def build_image_name(image_repo_with_project: str, network: Network): + return f"{image_repo_with_project}/tools:{network.tag()}" diff --git a/crates/aptos-faucet/integration-tests/local_testnet.py b/crates/aptos-faucet/integration-tests/local_testnet.py index d030444970b1b..7076f8f4f5407 100644 --- a/crates/aptos-faucet/integration-tests/local_testnet.py +++ b/crates/aptos-faucet/integration-tests/local_testnet.py @@ -16,7 +16,7 @@ # stop running it later using the container name. For an explanation of these # arguments, see the argument parser in main.py. def run_node(network: Network, image_repo_with_project: str, external_test_dir: str): - image_name = build_image_name(image_repo_with_project, str(network)) + image_name = build_image_name(image_repo_with_project, network) container_name = f"local-testnet-{network}" internal_mount_path = "/mymount" LOG.info(f"Trying to run local testnet from image: {image_name}") diff --git a/crates/aptos-faucet/integration-tests/main.py b/crates/aptos-faucet/integration-tests/main.py index 9bd41ecf69f07..c9ced3fa12b88 100644 --- a/crates/aptos-faucet/integration-tests/main.py +++ b/crates/aptos-faucet/integration-tests/main.py @@ -20,10 +20,11 @@ import argparse import logging import os +import platform import shutil import sys -from common import Network +from common import VALID_NETWORK_OPTIONS, Network, network_from_str from local_testnet import run_node, stop_node, wait_for_startup from prechecks import check_redis_is_running from tests import run_faucet_integration_tests @@ -59,9 +60,18 @@ def parse_args(): parser.add_argument( "--base-network", required=True, - type=Network, - choices=list(Network), - help="What branch the Aptos CLI used for the local testnet should be built from", + choices=VALID_NETWORK_OPTIONS, + help=( + "What branch the Aptos CLI used for the local testnet should be built " + 'from. If "custom", --tag must be set.' + ), + ) + parser.add_argument( + "--tag", + help=( + 'If --base-network is set to "custom", this must be set to the image tag' + "to use. Otherwise this has no effect." + ), ) parser.add_argument( "--base-startup-timeout", @@ -87,13 +97,24 @@ def main(): else: logging.getLogger().setLevel(logging.INFO) + if platform.system() == "Darwin" and platform.processor().startswith("arm"): + if not os.environ.get("DOCKER_DEFAULT_PLATFORM"): + os.environ["DOCKER_DEFAULT_PLATFORM"] = "linux/amd64" + LOG.info( + "Detected ARM Mac and DOCKER_DEFAULT_PLATFORM was not set, setting it " + "to linux/amd64" + ) + + # Build the Network. + network = network_from_str(args.base_network, args.tag) + # Verify that a local Redis instance is running. This is just a basic check that # something is listening at the expected port. check_redis_is_running() # Run a node and wait for it to start up. container_name = run_node( - args.base_network, args.image_repo_with_project, args.external_test_dir + network, args.image_repo_with_project, args.external_test_dir ) wait_for_startup(container_name, args.base_startup_timeout) diff --git a/crates/aptos-genesis/src/builder.rs b/crates/aptos-genesis/src/builder.rs index 107ce045af474..3a55895e6e0c3 100644 --- a/crates/aptos-genesis/src/builder.rs +++ b/crates/aptos-genesis/src/builder.rs @@ -10,8 +10,8 @@ use anyhow::ensure; use aptos_config::{ config::{ DiscoveryMethod, Identity, IdentityBlob, InitialSafetyRulesConfig, NetworkConfig, - NodeConfig, OnDiskStorageConfig, PeerRole, RoleType, SafetyRulesService, SecureBackend, - WaypointConfig, + NodeConfig, OnDiskStorageConfig, OverrideNodeConfig, PeerRole, PersistableConfig, RoleType, + SafetyRulesService, SecureBackend, WaypointConfig, }, generator::build_seed_for_network, keys::ConfigKey, @@ -55,7 +55,7 @@ const GENESIS_BLOB: &str = "genesis.blob"; pub struct ValidatorNodeConfig { pub name: String, pub index: usize, - pub config: NodeConfig, + pub config: OverrideNodeConfig, pub dir: PathBuf, pub account_private_key: Option>, pub genesis_stake_amount: u64, @@ -68,14 +68,14 @@ impl ValidatorNodeConfig { name: String, index: usize, base_dir: &Path, - mut config: NodeConfig, + mut config: OverrideNodeConfig, genesis_stake_amount: u64, commission_percentage: u64, ) -> anyhow::Result { // Create the data dir and set it appropriately let dir = base_dir.join(&name); std::fs::create_dir_all(dir.as_path())?; - config.set_data_dir(dir.clone()); + config.override_config_mut().set_data_dir(dir.clone()); Ok(ValidatorNodeConfig { name, @@ -95,7 +95,8 @@ impl ValidatorNodeConfig { self.account_private_key = validator_identity.account_private_key.map(ConfigKey::new); // Init network identity - let validator_network = self.config.validator_network.as_mut().unwrap(); + let config = self.config.override_config_mut(); + let validator_network = config.validator_network.as_mut().unwrap(); let validator_identity_file = self.dir.join(VALIDATOR_IDENTITY); validator_network.identity = Identity::from_file(validator_identity_file); @@ -150,25 +151,32 @@ impl ValidatorNodeConfig { } fn insert_genesis(&mut self, genesis: &Transaction) { - self.config.execution.genesis = Some(genesis.clone()); - self.config.execution.genesis_file_location = self.dir.join(GENESIS_BLOB) + let config = self.config.override_config_mut(); + config.execution.genesis = Some(genesis.clone()); + config.execution.genesis_file_location = self.dir.join(GENESIS_BLOB) } fn insert_waypoint(&mut self, waypoint: &Waypoint) { + let config = self.config.override_config_mut(); let waypoint_config = WaypointConfig::FromConfig(*waypoint); // Init safety rules let validator_identity_file = self.dir.join(VALIDATOR_IDENTITY); - self.config - .consensus - .safety_rules - .initial_safety_rules_config = + config.consensus.safety_rules.initial_safety_rules_config = InitialSafetyRulesConfig::from_file(validator_identity_file, waypoint_config.clone()); - self.config.base.waypoint = waypoint_config; + config.base.waypoint = waypoint_config; } fn save_config(&mut self) -> anyhow::Result<()> { - Ok(self.config.save_to_path(self.dir.join(CONFIG_FILE))?) + // Save the execution config to disk along with the full config. + self.config + .override_config_mut() + .save_to_path(self.dir.join(CONFIG_FILE))?; + + // Overwrite the full config with the override config + self.config + .save_config(self.dir.join(CONFIG_FILE)) + .map_err(Into::into) } } @@ -179,6 +187,7 @@ impl TryFrom<&ValidatorNodeConfig> for ValidatorConfiguration { let (_, _, private_identity, _) = config.get_key_objects(None)?; let validator_host = (&config .config + .override_config() .validator_network .as_ref() .unwrap() @@ -187,6 +196,7 @@ impl TryFrom<&ValidatorNodeConfig> for ValidatorConfiguration { let full_node_host = Some( (&config .config + .override_config() .full_node_networks .iter() .find(|network| network.network_id == NetworkId::Public) @@ -223,7 +233,7 @@ impl TryFrom<&ValidatorNodeConfig> for ValidatorConfiguration { pub struct FullnodeNodeConfig { pub name: String, - pub config: NodeConfig, + pub config: OverrideNodeConfig, pub dir: PathBuf, } @@ -231,7 +241,7 @@ impl FullnodeNodeConfig { pub fn public_fullnode( name: String, config_dir: &Path, - config: NodeConfig, + config: OverrideNodeConfig, waypoint: &Waypoint, genesis: &Transaction, ) -> anyhow::Result { @@ -240,7 +250,7 @@ impl FullnodeNodeConfig { fullnode_config.insert_waypoint(waypoint); fullnode_config.insert_genesis(genesis)?; fullnode_config.set_identity()?; - fullnode_config.config.randomize_ports(); + fullnode_config.randomize_ports(); fullnode_config.save_config()?; Ok(fullnode_config) @@ -249,7 +259,7 @@ impl FullnodeNodeConfig { pub fn validator_fullnode( name: String, config_dir: &Path, - fullnode_config: NodeConfig, + fullnode_config: OverrideNodeConfig, validator_config: &NodeConfig, waypoint: &Waypoint, genesis: &Transaction, @@ -259,29 +269,36 @@ impl FullnodeNodeConfig { fullnode_config.insert_waypoint(waypoint); fullnode_config.insert_genesis(genesis)?; - fullnode_config.config.randomize_ports(); + fullnode_config.randomize_ports(); fullnode_config.attach_to_validator(public_network, validator_config)?; fullnode_config.save_config()?; Ok(fullnode_config) } - fn new(name: String, config_dir: &Path, mut config: NodeConfig) -> anyhow::Result { + fn new( + name: String, + config_dir: &Path, + mut config: OverrideNodeConfig, + ) -> anyhow::Result { + let inner = config.override_config_mut(); + ensure!( - matches!(config.base.role, RoleType::FullNode), + matches!(inner.base.role, RoleType::FullNode), "config must be a FullNode config" ); let dir = config_dir.join(&name); std::fs::create_dir_all(&dir)?; - config.set_data_dir(dir.clone()); + inner.set_data_dir(dir.clone()); Ok(Self { name, config, dir }) } fn insert_waypoint(&mut self, waypoint: &Waypoint) { - self.config.base.waypoint = WaypointConfig::FromConfig(*waypoint); + let config = self.config.override_config_mut(); + config.base.waypoint = WaypointConfig::FromConfig(*waypoint); } fn insert_genesis(&mut self, genesis: &Transaction) -> anyhow::Result<()> { @@ -289,16 +306,23 @@ impl FullnodeNodeConfig { let genesis_file_location = self.dir.join("genesis.blob"); File::create(&genesis_file_location)?.write_all(&bcs::to_bytes(&genesis)?)?; - self.config.execution.genesis = Some(genesis.clone()); - self.config.execution.genesis_file_location = genesis_file_location; + let config = self.config.override_config_mut(); + config.execution.genesis = Some(genesis.clone()); + config.execution.genesis_file_location = genesis_file_location; Ok(()) } + fn randomize_ports(&mut self) { + let config = self.config.override_config_mut(); + config.randomize_ports(); + } + /// Sets identity for a public full node. Should only be run on a public full node fn set_identity(&mut self) -> anyhow::Result<()> { - if self - .config + let config = self.config.override_config_mut(); + + if config .full_node_networks .iter() .any(|config| config.network_id == NetworkId::Vfn) @@ -306,8 +330,7 @@ impl FullnodeNodeConfig { panic!("Shouldn't call set_identity on a Validator full node"); } - let public_network = self - .config + let public_network = config .full_node_networks .iter_mut() .find(|config| config.network_id == NetworkId::Public) @@ -328,8 +351,9 @@ impl FullnodeNodeConfig { "Validator config must be a Validator config" ); - let fullnode_public_network = self - .config + let config = self.config.override_config_mut(); + + let fullnode_public_network = config .full_node_networks .iter_mut() .find(|config| config.network_id == NetworkId::Public) @@ -345,8 +369,7 @@ impl FullnodeNodeConfig { .find(|config| config.network_id.is_vfn_network()) .expect("Validator should have vfn network"); - let fullnode_vfn_network = self - .config + let fullnode_vfn_network = config .full_node_networks .iter_mut() .find(|config| config.network_id.is_vfn_network()) @@ -360,13 +383,9 @@ impl FullnodeNodeConfig { Ok(()) } - pub fn config_path(&self) -> PathBuf { - self.dir.join("node.yaml") - } - fn save_config(&mut self) -> anyhow::Result<()> { self.config - .save_to_path(self.config_path()) + .save_config(self.dir.join(CONFIG_FILE)) .map_err(Into::into) } } @@ -414,7 +433,8 @@ pub struct GenesisConfiguration { pub gas_schedule: GasScheduleV2, } -pub type InitConfigFn = Arc; +pub type InitConfigFn = Arc; +pub type InitGenesisStakeFn = Arc; pub type InitGenesisConfigFn = Arc; /// Builder that builds a network of validator nodes that can run locally @@ -425,6 +445,7 @@ pub struct Builder { num_validators: NonZeroUsize, randomize_first_validator_ports: bool, init_config: Option, + init_genesis_stake: Option, init_genesis_config: Option, } @@ -439,6 +460,7 @@ impl Builder { num_validators: NonZeroUsize::new(1).unwrap(), randomize_first_validator_ports: true, init_config: None, + init_genesis_stake: None, init_genesis_config: None, }) } @@ -458,6 +480,14 @@ impl Builder { self } + pub fn with_init_genesis_stake( + mut self, + init_genesis_stake: Option, + ) -> Self { + self.init_genesis_stake = init_genesis_stake; + self + } + pub fn with_init_genesis_config( mut self, init_genesis_config: Option, @@ -519,18 +549,19 @@ impl Builder { { let name = index.to_string(); - let mut config = template.clone(); - let mut genesis_stake_amount = 10; + let mut override_config = template.clone(); + let mut base_config = NodeConfig::default(); if let Some(init_config) = &self.init_config { - (init_config)(index, &mut config, &mut genesis_stake_amount); + (init_config)(index, &mut override_config, &mut base_config); } let mut validator = ValidatorNodeConfig::new( name, index, self.config_dir.as_path(), - config, - genesis_stake_amount, + OverrideNodeConfig::new(override_config, base_config), + // Default value. Can be overriden by init_genesis_stake + 10, // Default to 0% commission for local node building. 0, )?; @@ -540,7 +571,7 @@ impl Builder { // By default, we don't start with VFNs, so ensure that the REST port is open let vfn_identity_path = validator.dir.join(VFN_IDENTITY); - let config = &mut validator.config; + let config = &mut validator.config.override_config_mut(); let fullnode_network_listen_address = if let Some(template_fullnode_config) = config.full_node_networks.first() { template_fullnode_config.listen_address.clone() @@ -591,6 +622,11 @@ impl Builder { ) -> anyhow::Result<(Transaction, Waypoint)> { let mut configs: Vec = Vec::new(); + if let Some(init_genesis_stake) = &self.init_genesis_stake { + for validator in validators.iter_mut() { + (init_genesis_stake)(validator.index, &mut validator.genesis_stake_amount); + } + } for validator in validators.iter() { configs.push(validator.try_into()?); } diff --git a/crates/aptos-genesis/src/test_utils.rs b/crates/aptos-genesis/src/test_utils.rs index fcfcf99c09c56..acf86fcb78aed 100644 --- a/crates/aptos-genesis/src/test_utils.rs +++ b/crates/aptos-genesis/src/test_utils.rs @@ -30,11 +30,12 @@ pub fn test_config() -> (NodeConfig, Ed25519PrivateKey) { let mut configs = validators.into_iter().map(|v| v.config).collect::>(); let mut config = configs.swap_remove(0); + let config = config.override_config_mut(); config.set_data_dir(path.path().to_path_buf()); let mut sr_test = aptos_config::config::SafetyRulesTestConfig::new(account_address.unwrap()); sr_test.consensus_key(consensus_private_key.unwrap()); config.consensus.safety_rules.test = Some(sr_test); - (config, root_key) + (config.clone(), root_key) } diff --git a/crates/aptos-protos/Cargo.lock b/crates/aptos-protos/Cargo.lock index f6d04b5ebb9fd..241ef5c768683 100644 --- a/crates/aptos-protos/Cargo.lock +++ b/crates/aptos-protos/Cargo.lock @@ -1036,9 +1036,9 @@ dependencies = [ [[package]] name = "webpki" -version = "0.22.0" +version = "0.22.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f095d78192e208183081cc07bc5515ef55216397af48b873e5edcd72637fa1bd" +checksum = "f0e74f82d49d545ad128049b7e88f6576df2da6b02e9ce565c6f533be576957e" dependencies = [ "ring", "untrusted", diff --git a/crates/aptos-protos/Cargo.toml b/crates/aptos-protos/Cargo.toml index b8123b2dbc01a..e0a8c0876f904 100644 --- a/crates/aptos-protos/Cargo.toml +++ b/crates/aptos-protos/Cargo.toml @@ -1,16 +1,19 @@ [package] name = "aptos-protos" -version = "1.0.0" -authors = ["Aptos Labs "] -description = "Aptos Protobuf Definitions" -repository = "https://github.com/aptos-labs/aptos-core" -homepage = "https://aptoslabs.com" -license = "Apache-2.0" -publish = false -edition = "2021" +version = "1.0.1" + +# Workspace inherited keys +authors = { workspace = true } +edition = { workspace = true } +homepage = { workspace = true } +license = { workspace = true } +publish = { workspace = true } +repository = { workspace = true } +rust-version = { workspace = true } [dependencies] -pbjson = "0.4.0" -prost = "0.11.3" -serde = { version = "1.0.137", features = ["derive"], default-features = false } -tonic = { version = "0.8.3", features = ["tls-roots", "transport", "prost", "gzip", "codegen"] } \ No newline at end of file +futures-core = { workspace = true } +pbjson = { workspace = true } +prost = { workspace = true } +serde = { workspace = true } +tonic = { workspace = true } diff --git a/crates/aptos-protos/src/lib.rs b/crates/aptos-protos/src/lib.rs index 28911035e6675..783e15c0b544e 100644 --- a/crates/aptos-protos/src/lib.rs +++ b/crates/aptos-protos/src/lib.rs @@ -1,5 +1,6 @@ // Copyright © Aptos Foundation // SPDX-License-Identifier: Apache-2.0 +#![allow(clippy::needless_borrow)] mod pb; pub use pb::aptos::*; diff --git a/crates/aptos-protos/src/pb/aptos.indexer.v1.tonic.rs b/crates/aptos-protos/src/pb/aptos.indexer.v1.tonic.rs index 47aa76ee5d227..41ad03c2a7a84 100644 --- a/crates/aptos-protos/src/pb/aptos.indexer.v1.tonic.rs +++ b/crates/aptos-protos/src/pb/aptos.indexer.v1.tonic.rs @@ -15,7 +15,7 @@ pub mod raw_data_client { /// Attempt to create a new client by connecting to a given endpoint. pub async fn connect(dst: D) -> Result where - D: std::convert::TryInto, + D: TryInto, D::Error: Into, { let conn = tonic::transport::Endpoint::new(dst)?.connect().await?; @@ -71,12 +71,28 @@ pub mod raw_data_client { self.inner = self.inner.accept_compressed(encoding); self } + /// Limits the maximum size of a decoded message. + /// + /// Default: `4MB` + #[must_use] + pub fn max_decoding_message_size(mut self, limit: usize) -> Self { + self.inner = self.inner.max_decoding_message_size(limit); + self + } + /// Limits the maximum size of an encoded message. + /// + /// Default: `usize::MAX` + #[must_use] + pub fn max_encoding_message_size(mut self, limit: usize) -> Self { + self.inner = self.inner.max_encoding_message_size(limit); + self + } /** Get transactions batch without any filtering from starting version and end if transaction count is present. */ pub async fn get_transactions( &mut self, request: impl tonic::IntoRequest, - ) -> Result< + ) -> std::result::Result< tonic::Response>, tonic::Status, > { @@ -93,7 +109,10 @@ pub mod raw_data_client { let path = http::uri::PathAndQuery::from_static( "/aptos.indexer.v1.RawData/GetTransactions", ); - self.inner.server_streaming(request.into_request(), path, codec).await + let mut req = request.into_request(); + req.extensions_mut() + .insert(GrpcMethod::new("aptos.indexer.v1.RawData", "GetTransactions")); + self.inner.server_streaming(req, path, codec).await } } } @@ -106,7 +125,7 @@ pub mod raw_data_server { pub trait RawData: Send + Sync + 'static { /// Server streaming response type for the GetTransactions method. type GetTransactionsStream: futures_core::Stream< - Item = Result, + Item = std::result::Result, > + Send + 'static; @@ -115,7 +134,10 @@ pub mod raw_data_server { async fn get_transactions( &self, request: tonic::Request, - ) -> Result, tonic::Status>; + ) -> std::result::Result< + tonic::Response, + tonic::Status, + >; } /// #[derive(Debug)] @@ -123,6 +145,8 @@ pub mod raw_data_server { inner: _Inner, accept_compression_encodings: EnabledCompressionEncodings, send_compression_encodings: EnabledCompressionEncodings, + max_decoding_message_size: Option, + max_encoding_message_size: Option, } struct _Inner(Arc); impl RawDataServer { @@ -135,6 +159,8 @@ pub mod raw_data_server { inner, accept_compression_encodings: Default::default(), send_compression_encodings: Default::default(), + max_decoding_message_size: None, + max_encoding_message_size: None, } } pub fn with_interceptor( @@ -158,6 +184,22 @@ pub mod raw_data_server { self.send_compression_encodings.enable(encoding); self } + /// Limits the maximum size of a decoded message. + /// + /// Default: `4MB` + #[must_use] + pub fn max_decoding_message_size(mut self, limit: usize) -> Self { + self.max_decoding_message_size = Some(limit); + self + } + /// Limits the maximum size of an encoded message. + /// + /// Default: `usize::MAX` + #[must_use] + pub fn max_encoding_message_size(mut self, limit: usize) -> Self { + self.max_encoding_message_size = Some(limit); + self + } } impl tonic::codegen::Service> for RawDataServer where @@ -171,7 +213,7 @@ pub mod raw_data_server { fn poll_ready( &mut self, _cx: &mut Context<'_>, - ) -> Poll> { + ) -> Poll> { Poll::Ready(Ok(())) } fn call(&mut self, req: http::Request) -> Self::Future { @@ -195,7 +237,7 @@ pub mod raw_data_server { &mut self, request: tonic::Request, ) -> Self::Future { - let inner = self.0.clone(); + let inner = Arc::clone(&self.0); let fut = async move { (*inner).get_transactions(request).await }; @@ -204,6 +246,8 @@ pub mod raw_data_server { } let accept_compression_encodings = self.accept_compression_encodings; let send_compression_encodings = self.send_compression_encodings; + let max_decoding_message_size = self.max_decoding_message_size; + let max_encoding_message_size = self.max_encoding_message_size; let inner = self.inner.clone(); let fut = async move { let inner = inner.0; @@ -213,6 +257,10 @@ pub mod raw_data_server { .apply_compression_config( accept_compression_encodings, send_compression_encodings, + ) + .apply_max_message_size_config( + max_decoding_message_size, + max_encoding_message_size, ); let res = grpc.server_streaming(method, req).await; Ok(res) @@ -241,12 +289,14 @@ pub mod raw_data_server { inner, accept_compression_encodings: self.accept_compression_encodings, send_compression_encodings: self.send_compression_encodings, + max_decoding_message_size: self.max_decoding_message_size, + max_encoding_message_size: self.max_encoding_message_size, } } } impl Clone for _Inner { fn clone(&self) -> Self { - Self(self.0.clone()) + Self(Arc::clone(&self.0)) } } impl std::fmt::Debug for _Inner { diff --git a/crates/aptos-protos/src/pb/aptos.internal.fullnode.v1.tonic.rs b/crates/aptos-protos/src/pb/aptos.internal.fullnode.v1.tonic.rs index 5a17208684ae7..2e03da7f88bcc 100644 --- a/crates/aptos-protos/src/pb/aptos.internal.fullnode.v1.tonic.rs +++ b/crates/aptos-protos/src/pb/aptos.internal.fullnode.v1.tonic.rs @@ -15,7 +15,7 @@ pub mod fullnode_data_client { /// Attempt to create a new client by connecting to a given endpoint. pub async fn connect(dst: D) -> Result where - D: std::convert::TryInto, + D: TryInto, D::Error: Into, { let conn = tonic::transport::Endpoint::new(dst)?.connect().await?; @@ -71,11 +71,27 @@ pub mod fullnode_data_client { self.inner = self.inner.accept_compressed(encoding); self } + /// Limits the maximum size of a decoded message. + /// + /// Default: `4MB` + #[must_use] + pub fn max_decoding_message_size(mut self, limit: usize) -> Self { + self.inner = self.inner.max_decoding_message_size(limit); + self + } + /// Limits the maximum size of an encoded message. + /// + /// Default: `usize::MAX` + #[must_use] + pub fn max_encoding_message_size(mut self, limit: usize) -> Self { + self.inner = self.inner.max_encoding_message_size(limit); + self + } /// pub async fn get_transactions_from_node( &mut self, request: impl tonic::IntoRequest, - ) -> Result< + ) -> std::result::Result< tonic::Response< tonic::codec::Streaming, >, @@ -94,7 +110,15 @@ pub mod fullnode_data_client { let path = http::uri::PathAndQuery::from_static( "/aptos.internal.fullnode.v1.FullnodeData/GetTransactionsFromNode", ); - self.inner.server_streaming(request.into_request(), path, codec).await + let mut req = request.into_request(); + req.extensions_mut() + .insert( + GrpcMethod::new( + "aptos.internal.fullnode.v1.FullnodeData", + "GetTransactionsFromNode", + ), + ); + self.inner.server_streaming(req, path, codec).await } } } @@ -107,7 +131,10 @@ pub mod fullnode_data_server { pub trait FullnodeData: Send + Sync + 'static { /// Server streaming response type for the GetTransactionsFromNode method. type GetTransactionsFromNodeStream: futures_core::Stream< - Item = Result, + Item = std::result::Result< + super::TransactionsFromNodeResponse, + tonic::Status, + >, > + Send + 'static; @@ -115,7 +142,10 @@ pub mod fullnode_data_server { async fn get_transactions_from_node( &self, request: tonic::Request, - ) -> Result, tonic::Status>; + ) -> std::result::Result< + tonic::Response, + tonic::Status, + >; } /// #[derive(Debug)] @@ -123,6 +153,8 @@ pub mod fullnode_data_server { inner: _Inner, accept_compression_encodings: EnabledCompressionEncodings, send_compression_encodings: EnabledCompressionEncodings, + max_decoding_message_size: Option, + max_encoding_message_size: Option, } struct _Inner(Arc); impl FullnodeDataServer { @@ -135,6 +167,8 @@ pub mod fullnode_data_server { inner, accept_compression_encodings: Default::default(), send_compression_encodings: Default::default(), + max_decoding_message_size: None, + max_encoding_message_size: None, } } pub fn with_interceptor( @@ -158,6 +192,22 @@ pub mod fullnode_data_server { self.send_compression_encodings.enable(encoding); self } + /// Limits the maximum size of a decoded message. + /// + /// Default: `4MB` + #[must_use] + pub fn max_decoding_message_size(mut self, limit: usize) -> Self { + self.max_decoding_message_size = Some(limit); + self + } + /// Limits the maximum size of an encoded message. + /// + /// Default: `usize::MAX` + #[must_use] + pub fn max_encoding_message_size(mut self, limit: usize) -> Self { + self.max_encoding_message_size = Some(limit); + self + } } impl tonic::codegen::Service> for FullnodeDataServer where @@ -171,7 +221,7 @@ pub mod fullnode_data_server { fn poll_ready( &mut self, _cx: &mut Context<'_>, - ) -> Poll> { + ) -> Poll> { Poll::Ready(Ok(())) } fn call(&mut self, req: http::Request) -> Self::Future { @@ -197,7 +247,7 @@ pub mod fullnode_data_server { super::GetTransactionsFromNodeRequest, >, ) -> Self::Future { - let inner = self.0.clone(); + let inner = Arc::clone(&self.0); let fut = async move { (*inner).get_transactions_from_node(request).await }; @@ -206,6 +256,8 @@ pub mod fullnode_data_server { } let accept_compression_encodings = self.accept_compression_encodings; let send_compression_encodings = self.send_compression_encodings; + let max_decoding_message_size = self.max_decoding_message_size; + let max_encoding_message_size = self.max_encoding_message_size; let inner = self.inner.clone(); let fut = async move { let inner = inner.0; @@ -215,6 +267,10 @@ pub mod fullnode_data_server { .apply_compression_config( accept_compression_encodings, send_compression_encodings, + ) + .apply_max_message_size_config( + max_decoding_message_size, + max_encoding_message_size, ); let res = grpc.server_streaming(method, req).await; Ok(res) @@ -243,12 +299,14 @@ pub mod fullnode_data_server { inner, accept_compression_encodings: self.accept_compression_encodings, send_compression_encodings: self.send_compression_encodings, + max_decoding_message_size: self.max_decoding_message_size, + max_encoding_message_size: self.max_encoding_message_size, } } } impl Clone for _Inner { fn clone(&self) -> Self { - Self(self.0.clone()) + Self(Arc::clone(&self.0)) } } impl std::fmt::Debug for _Inner { diff --git a/crates/aptos-rosetta/src/types/objects.rs b/crates/aptos-rosetta/src/types/objects.rs index 6320d8eb8b6aa..84edf54c240cb 100644 --- a/crates/aptos-rosetta/src/types/objects.rs +++ b/crates/aptos-rosetta/src/types/objects.rs @@ -1193,10 +1193,11 @@ async fn parse_operations_from_write_set( }, }; - let data = match write_op.bytes() { + let bytes = match write_op.bytes() { Some(bytes) => bytes, None => return Ok(vec![]), }; + let data = &bytes; // Determine operation match ( diff --git a/crates/aptos/CHANGELOG.md b/crates/aptos/CHANGELOG.md index 7c55766ace337..c4e24f62871fa 100644 --- a/crates/aptos/CHANGELOG.md +++ b/crates/aptos/CHANGELOG.md @@ -3,6 +3,9 @@ All notable changes to the Aptos CLI will be captured in this file. This project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html) and the format set out by [Keep a Changelog](https://keepachangelog.com/en/1.0.0/). ## Unreleased +### Added +- Added an option `--print-metadata` to the command `aptos move download` to print out the metadata of the package to be downloaded. + - Example: `aptos move download --account 0x1 --package AptosFramework --url https://mainnet.aptoslabs.com/v1 --print-metadata` ## [2.1.0] - 2023/08/24 ### Updated diff --git a/crates/aptos/Cargo.toml b/crates/aptos/Cargo.toml index f96a474bf544c..de4f0ed7f0f1f 100644 --- a/crates/aptos/Cargo.toml +++ b/crates/aptos/Cargo.toml @@ -40,7 +40,6 @@ aptos-sdk = { workspace = true } aptos-storage-interface = { workspace = true } aptos-telemetry = { workspace = true } aptos-temppath = { workspace = true } -aptos-transactional-test-harness = { workspace = true } aptos-types = { workspace = true } aptos-vm = { workspace = true, features = ["testing"] } aptos-vm-genesis = { workspace = true } @@ -72,7 +71,7 @@ once_cell = { workspace = true } rand = { workspace = true } regex = { workspace = true } reqwest = { workspace = true } -self_update = { version = "0.34.0", features = ["archive-zip", "compression-zip-deflate"] } +self_update = { version = "0.38.0", features = ["archive-zip", "compression-zip-deflate"] } serde = { workspace = true } serde_json = { workspace = true } serde_yaml = { workspace = true } diff --git a/crates/aptos/e2e/cases/account.py b/crates/aptos/e2e/cases/account.py index b6db5262ae40b..7fddef738e33a 100644 --- a/crates/aptos/e2e/cases/account.py +++ b/crates/aptos/e2e/cases/account.py @@ -40,7 +40,7 @@ def test_account_fund_with_faucet(run_helper: RunHelper, test_name=None): @test_case -def test_account_create(run_helper: RunHelper, test_name=None): +def test_account_create_and_transfer(run_helper: RunHelper, test_name=None): # Create the new account. run_helper.run_command( test_name, @@ -62,7 +62,59 @@ def test_account_create(run_helper: RunHelper, test_name=None): raise TestError( f"Account {OTHER_ACCOUNT_ONE.account_address} has balance {balance}, expected 0" ) + + transfer_amount = 1000 + + run_helper.run_command( + test_name, + [ + "aptos", + "account", + "transfer", + "--account", + OTHER_ACCOUNT_ONE.account_address, + "--amount", + str(transfer_amount), + "--assume-yes", + ], + ) + + balance = int( + run_helper.api_client.account_balance(OTHER_ACCOUNT_ONE.account_address) + ) + if balance != transfer_amount: + raise TestError( + f"Account {OTHER_ACCOUNT_ONE.account_address} has balance {balance}, expected {transfer_amount}" + ) + + +@test_case +def test_account_list(run_helper: RunHelper, test_name=None): + # List the created account + result = run_helper.run_command( + test_name, + [ + "aptos", + "account", + "list", + "--account", + OTHER_ACCOUNT_ONE.account_address, + ], + ) + + json_result = json.loads(result.stdout) + found_account = False + + # Check if the resource account is in the list + for module in json_result["Result"]: + if module.get("0x1::account::Account") != None: + found_account = True + + if not found_account: + raise TestError( + "Cannot find the account in the account list after account creation" + ) @test_case def test_account_lookup_address(run_helper: RunHelper, test_name=None): diff --git a/crates/aptos/e2e/cases/stake.py b/crates/aptos/e2e/cases/stake.py index a2c70fce86e9e..2de1febab1088 100644 --- a/crates/aptos/e2e/cases/stake.py +++ b/crates/aptos/e2e/cases/stake.py @@ -356,3 +356,63 @@ def test_stake_withdraw_stake_after_unlock(run_helper: RunHelper, test_name=None raise TestError( f"The stake should be decreased by {amount_to_withdraw}. Expected {current_stake - amount_to_withdraw}, got {result[0].get('total_stake')}" ) + + +@test_case +def test_stake_request_commission(run_helper: RunHelper, test_name=None): + # create a new account + run_helper.run_command( + test_name, + [ + "aptos", + "init", + "--profile", + "request_commission", + "--assume-yes", + "--network", + "local", + ], + input="\n", + ) + + # create staking contract + run_helper.run_command( + test_name, + [ + "aptos", + "stake", + "create-staking-contract", + "--profile", + "request_commission", + "--operator", + "request_commission", + "--voter", + "request_commission", + "--amount", + "3", + "--commission-percentage", + "1", + "--assume-yes", + ], + ) + + # run the request-commission command + response = run_helper.run_command( + test_name, + [ + "aptos", + "stake", + "request-commission", + "--profile", + "request_commission", + "--owner-address", + "request_commission", + "--operator-address", + "request_commission", + "--assume-yes", + ], + ) + + result = json.loads(response.stdout)["Result"] + if result.get("success") != True: + raise TestError("Did not execute [request-commission] successfully") diff --git a/crates/aptos/e2e/main.py b/crates/aptos/e2e/main.py index 4d7323fea7c9a..57890ade723ac 100644 --- a/crates/aptos/e2e/main.py +++ b/crates/aptos/e2e/main.py @@ -30,7 +30,8 @@ import sys from cases.account import ( - test_account_create, + test_account_create_and_transfer, + test_account_list, test_account_fund_with_faucet, test_account_lookup_address, test_account_resource_account, @@ -55,6 +56,7 @@ test_stake_create_staking_contract, test_stake_increase_lockup, test_stake_initialize_stake_owner, + test_stake_request_commission, test_stake_set_operator, test_stake_set_voter, test_stake_unlock_stake, @@ -137,7 +139,8 @@ def run_tests(run_helper): # Run account tests. test_account_fund_with_faucet(run_helper) - test_account_create(run_helper) + test_account_create_and_transfer(run_helper) + test_account_list(run_helper) test_account_lookup_address(run_helper) test_account_resource_account(run_helper) @@ -161,6 +164,7 @@ def run_tests(run_helper): test_stake_set_operator(run_helper) test_stake_set_voter(run_helper) test_stake_create_staking_contract(run_helper) + test_stake_request_commission(run_helper) # Run node subcommand group tests. test_node_show_validator_set(run_helper) diff --git a/crates/aptos/src/move_tool/mod.rs b/crates/aptos/src/move_tool/mod.rs index 395ad5a5ac871..e2c62fc4b12e6 100644 --- a/crates/aptos/src/move_tool/mod.rs +++ b/crates/aptos/src/move_tool/mod.rs @@ -8,7 +8,6 @@ mod manifest; pub mod package_hooks; mod show; pub mod stored_package; -mod transactional_tests_runner; use crate::{ account::derive_resource_account::ResourceAccountSeed, @@ -41,7 +40,6 @@ use aptos_gas_schedule::{MiscGasParameters, NativeGasParameters}; use aptos_rest_client::aptos_api_types::{ EntryFunctionId, HexEncodedBytes, IdentifierWrapper, MoveModuleId, }; -use aptos_transactional_test_harness::run_aptos_test; use aptos_types::{ account_address::{create_resource_address, AccountAddress}, transaction::{TransactionArgument, TransactionPayload}, @@ -71,7 +69,6 @@ use std::{ }; pub use stored_package::*; use tokio::task; -use transactional_tests_runner::TransactionalTestOpts; /// Tool for Move related operations /// @@ -99,7 +96,6 @@ pub enum MoveTool { #[clap(subcommand, hide = true)] Show(show::ShowTool), Test(TestPackage), - TransactionalTest(TransactionalTestOpts), VerifyPackage(VerifyPackage), View(ViewFunction), } @@ -126,7 +122,6 @@ impl MoveTool { MoveTool::RunScript(tool) => tool.execute_serialized().await, MoveTool::Show(tool) => tool.execute_serialized().await, MoveTool::Test(tool) => tool.execute_serialized().await, - MoveTool::TransactionalTest(tool) => tool.execute_serialized_success().await, MoveTool::VerifyPackage(tool) => tool.execute_serialized().await, MoveTool::View(tool) => tool.execute_serialized().await, } @@ -535,26 +530,6 @@ impl CliCommand<&'static str> for TestPackage { } } -#[async_trait] -impl CliCommand<()> for TransactionalTestOpts { - fn command_name(&self) -> &'static str { - "TransactionalTest" - } - - async fn execute(self) -> CliTypedResult<()> { - let root_path = self.root_path.display().to_string(); - - let requirements = vec![transactional_tests_runner::Requirements::new( - run_aptos_test, - "tests".to_string(), - root_path, - self.pattern.clone(), - )]; - - transactional_tests_runner::runner(&self, &requirements) - } -} - /// Proves a Move package /// /// This is a tool for formal verification of a Move package using @@ -1017,6 +992,9 @@ pub struct DownloadPackage { pub(crate) rest_options: RestOptions, #[clap(flatten)] pub(crate) profile_options: ProfileOptions, + /// Print metadata of the package + #[clap(long)] + pub print_metadata: bool, } #[async_trait] @@ -1041,6 +1019,9 @@ impl CliCommand<&'static str> for DownloadPackage { .to_owned(), )); } + if self.print_metadata { + println!("{}", package); + } let package_path = output_dir.join(package.name()); package .save_package_to_disk(package_path.as_path()) diff --git a/crates/aptos/src/move_tool/stored_package.rs b/crates/aptos/src/move_tool/stored_package.rs index 2f4dfa4961a4c..5f931d5ada0eb 100644 --- a/crates/aptos/src/move_tool/stored_package.rs +++ b/crates/aptos/src/move_tool/stored_package.rs @@ -10,7 +10,7 @@ use aptos_rest_client::Client; use aptos_types::account_address::AccountAddress; use move_package::compilation::package_layout::CompiledPackageLayout; use reqwest::Url; -use std::{fs, path::Path}; +use std::{fmt, fs, path::Path}; // TODO: this is a first naive implementation of the package registry. Before mainnet // we need to use tables for the package registry. @@ -30,6 +30,13 @@ pub struct CachedModuleMetadata<'a> { metadata: &'a ModuleMetadata, } +impl fmt::Display for CachedPackageMetadata<'_> { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + writeln!(f, "{}", self.metadata)?; + Ok(()) + } +} + impl CachedPackageRegistry { /// Creates a new registry. pub async fn create(url: Url, addr: AccountAddress) -> anyhow::Result { diff --git a/crates/aptos/src/move_tool/transactional_tests_runner.rs b/crates/aptos/src/move_tool/transactional_tests_runner.rs deleted file mode 100644 index 2725c3fb6525b..0000000000000 --- a/crates/aptos/src/move_tool/transactional_tests_runner.rs +++ /dev/null @@ -1,345 +0,0 @@ -// Copyright © Aptos Foundation -// SPDX-License-Identifier: Apache-2.0 - -use crate::common::types::{CliError, CliTypedResult}; -/// Most of the code below comes from the crate `datatest-stable`. Because the limitation of `datatest-stable`, -/// we are not able to run transactional tests as a subcommand of the Aptos CLI. Therefore, we need to duplicate code -/// here and make minor modifications. -/// -use clap::Parser; -use std::{ - io::{self, Write}, - num::NonZeroUsize, - panic::{catch_unwind, AssertUnwindSafe}, - path::{Path, PathBuf}, - process, - sync::mpsc::{channel, Sender}, - thread, -}; -use termcolor::{Color, ColorChoice, ColorSpec, StandardStream, WriteColor}; - -type Result = std::result::Result>; - -/// Run Move transactional tests -#[derive(Parser, Clone)] -pub struct TransactionalTestOpts { - /// The filter string is tested against the name of all tests, and only those tests whose names - /// contain the filter are run. - #[clap(long)] - pub filter: Option, - - /// Exactly match filters rather than match by substring - #[clap(long = "exact")] - pub filter_exact: bool, - - /// Number of threads used for running tests in parallel - #[clap(long, default_value = "32")] - pub test_threads: NonZeroUsize, - - /// Output minimal information - #[clap(long)] - pub quiet: bool, - - /// List all tests - #[clap(long)] - pub list: bool, - - /// Path to contain the tests - #[clap(long, value_parser)] - pub root_path: PathBuf, - - /// Pattern to match the test files - #[clap(long, default_value = r".*\.(mvir|move)$")] - pub pattern: String, -} - -/// Helper function to iterate through all the files in the given directory, skipping hidden files, -/// and return an iterator of their paths. -pub fn iterate_directory(path: &Path) -> impl Iterator { - walkdir::WalkDir::new(path) - .into_iter() - .map(::std::result::Result::unwrap) - .filter(|entry| { - entry.file_type().is_file() - && entry - .file_name() - .to_str() - .map_or(false, |s| !s.starts_with('.')) // Skip hidden files - }) - .map(|entry| entry.path().to_path_buf()) -} - -pub fn derive_test_name(root: &Path, path: &Path, test_name: &str) -> String { - let relative = path.strip_prefix(root).unwrap_or_else(|_| { - panic!( - "failed to strip prefix '{}' from path '{}'", - root.display(), - path.display() - ) - }); - let mut test_name = test_name.to_string(); - test_name = format!("{}::{}", test_name, relative.display()); - test_name -} - -struct Test { - testfn: Box Result<()> + Send>, - name: String, -} - -enum TestResult { - Ok, - Failed, - FailedWithMsg(String), -} - -pub(crate) fn runner(options: &TransactionalTestOpts, reqs: &[Requirements]) -> CliTypedResult<()> { - let mut tests: Vec = reqs.iter().flat_map(|req| req.expand()).collect(); - tests.sort_by(|a, b| a.name.cmp(&b.name)); - - if options.list { - for test in &tests { - println!("{}: test", test.name); - } - - return Ok(()); - } - - match run_tests(options, tests) { - Ok(true) => Ok(()), - Ok(false) => process::exit(101), - Err(e) => Err(CliError::UnexpectedError(format!( - "error: io error when running tests: {:?}", - e - ))), - } -} - -fn run_tests(options: &TransactionalTestOpts, tests: Vec) -> io::Result { - let total = tests.len(); - - // Filter out tests - let mut remaining = match &options.filter { - None => tests, - Some(filter) => tests - .into_iter() - .filter(|test| { - if options.filter_exact { - test.name == filter[..] - } else { - test.name.contains(&filter[..]) - } - }) - .rev() - .collect(), - }; - - let filtered_out = total - remaining.len(); - let mut summary = TestSummary::new(total, filtered_out); - - if !options.quiet { - summary.write_starting_msg()?; - } - - let (tx, rx) = channel(); - - let mut pending = 0; - while pending > 0 || !remaining.is_empty() { - while pending < options.test_threads.get() && !remaining.is_empty() { - let test = remaining.pop().unwrap(); - run_test(test, tx.clone()); - pending += 1; - } - - let (name, result) = rx.recv().unwrap(); - summary.handle_result(name, result)?; - - pending -= 1; - } - - // Write Test Summary - if !options.quiet { - summary.write_summary()?; - } - - Ok(summary.success()) -} - -fn run_test(test: Test, channel: Sender<(String, TestResult)>) { - let Test { name, testfn } = test; - - let cfg = thread::Builder::new().name(name.clone()); - cfg.spawn(move || { - let result = match catch_unwind(AssertUnwindSafe(testfn)) { - Ok(Ok(())) => TestResult::Ok, - Ok(Err(e)) => TestResult::FailedWithMsg(format!("{:?}", e)), - Err(_) => TestResult::Failed, - }; - - channel.send((name, result)).unwrap(); - }) - .unwrap(); -} - -struct TestSummary { - stdout: StandardStream, - total: usize, - filtered_out: usize, - passed: usize, - failed: Vec, -} - -impl TestSummary { - fn new(total: usize, filtered_out: usize) -> Self { - Self { - stdout: StandardStream::stdout(ColorChoice::Auto), - total, - filtered_out, - passed: 0, - failed: Vec::new(), - } - } - - fn handle_result(&mut self, name: String, result: TestResult) -> io::Result<()> { - write!(self.stdout, "test {} ... ", name)?; - match result { - TestResult::Ok => { - self.passed += 1; - self.write_ok()?; - }, - TestResult::Failed => { - self.failed.push(name); - self.write_failed()?; - }, - TestResult::FailedWithMsg(msg) => { - self.failed.push(name); - self.write_failed()?; - writeln!(self.stdout)?; - - write!(self.stdout, "Error: {}", msg)?; - }, - } - writeln!(self.stdout)?; - Ok(()) - } - - fn write_ok(&mut self) -> io::Result<()> { - self.stdout - .set_color(ColorSpec::new().set_fg(Some(Color::Green)))?; - write!(self.stdout, "ok")?; - self.stdout.reset()?; - Ok(()) - } - - fn write_failed(&mut self) -> io::Result<()> { - self.stdout - .set_color(ColorSpec::new().set_fg(Some(Color::Red)))?; - write!(self.stdout, "FAILED")?; - self.stdout.reset()?; - Ok(()) - } - - fn write_starting_msg(&mut self) -> io::Result<()> { - writeln!(self.stdout)?; - writeln!( - self.stdout, - "running {} tests", - self.total - self.filtered_out - )?; - Ok(()) - } - - fn write_summary(&mut self) -> io::Result<()> { - // Print out the failing tests - if !self.failed.is_empty() { - writeln!(self.stdout)?; - writeln!(self.stdout, "failures:")?; - for name in &self.failed { - writeln!(self.stdout, " {}", name)?; - } - } - - writeln!(self.stdout)?; - write!(self.stdout, "test result: ")?; - if self.failed.is_empty() { - self.write_ok()?; - } else { - self.write_failed()?; - } - writeln!( - self.stdout, - ". {} passed; {} failed; {} filtered out", - self.passed, - self.failed.len(), - self.filtered_out - )?; - writeln!(self.stdout)?; - Ok(()) - } - - fn success(&self) -> bool { - self.failed.is_empty() - } -} - -#[doc(hidden)] -pub struct Requirements { - test: fn(&Path) -> Result<()>, - test_name: String, - root: String, - pattern: String, -} - -impl Requirements { - #[doc(hidden)] - pub fn new( - test: fn(&Path) -> Result<()>, - test_name: String, - root: String, - pattern: String, - ) -> Self { - Self { - test, - test_name, - root, - pattern, - } - } - - /// Generate standard test descriptors ([`test::TestDescAndFn`]) from the descriptor of - /// `#[datatest::files(..)]`. - /// - /// Scans all files in a given directory, finds matching ones and generates a test descriptor - /// for each of them. - fn expand(&self) -> Vec { - let root = Path::new(&self.root).to_path_buf(); - - let re = regex::Regex::new(&self.pattern) - .unwrap_or_else(|_| panic!("invalid regular expression: '{}'", self.pattern)); - - let tests: Vec<_> = iterate_directory(&root) - .filter_map(|path| { - let input_path = path.to_string_lossy(); - if re.is_match(&input_path) { - let testfn = self.test; - let name = derive_test_name(&root, &path, &self.test_name); - let testfn = Box::new(move || (testfn)(&path)); - - Some(Test { testfn, name }) - } else { - None - } - }) - .collect(); - - // We want to avoid silent fails due to typos in regexp! - if tests.is_empty() { - panic!( - "no test cases found for test '{}'. Scanned directory: '{}' with pattern '{}'", - self.test_name, self.root, self.pattern, - ); - } - - tests - } -} diff --git a/crates/aptos/src/node/mod.rs b/crates/aptos/src/node/mod.rs index 22d6ff10a17fb..8a86c60000668 100644 --- a/crates/aptos/src/node/mod.rs +++ b/crates/aptos/src/node/mod.rs @@ -1150,6 +1150,7 @@ impl CliCommand<()> for RunLocalTestnet { let result = aptos_node::setup_test_environment_and_start_node( config_path, self.test_config_override, + None, Some(test_dir_copy), false, false, diff --git a/crates/aptos/src/test/mod.rs b/crates/aptos/src/test/mod.rs index bdd1836d06d10..fd6cf57110773 100644 --- a/crates/aptos/src/test/mod.rs +++ b/crates/aptos/src/test/mod.rs @@ -889,6 +889,7 @@ impl CliTestFramework { account: self.account_id(index), package, output_dir: Some(output_dir), + print_metadata: false, } .execute() .await diff --git a/dashboards/consensus.json b/dashboards/consensus.json index 4a1306f348ffd..14a63fce9c2fc 100644 --- a/dashboards/consensus.json +++ b/dashboards/consensus.json @@ -75,6 +75,7 @@ "axisColorMode": "text", "axisLabel": "blocks / s", "axisPlacement": "auto", + "axisShow": false, "barAlignment": 0, "drawStyle": "line", "fillOpacity": 0, @@ -144,6 +145,7 @@ "axisColorMode": "text", "axisLabel": "transactions / s", "axisPlacement": "auto", + "axisShow": false, "barAlignment": 0, "drawStyle": "line", "fillOpacity": 0, @@ -205,6 +207,7 @@ "axisColorMode": "text", "axisLabel": "", "axisPlacement": "auto", + "axisShow": false, "axisSoftMax": 100, "axisSoftMin": 66, "barAlignment": 0, @@ -267,6 +270,7 @@ "axisColorMode": "text", "axisLabel": "transactions / s", "axisPlacement": "auto", + "axisShow": false, "barAlignment": 0, "drawStyle": "line", "fillOpacity": 0, @@ -327,6 +331,7 @@ "axisColorMode": "text", "axisLabel": "transactions / s", "axisPlacement": "auto", + "axisShow": false, "barAlignment": 0, "drawStyle": "line", "fillOpacity": 0, @@ -388,6 +393,7 @@ "axisColorMode": "text", "axisLabel": "transactions / s", "axisPlacement": "auto", + "axisShow": false, "barAlignment": 0, "drawStyle": "line", "fillOpacity": 0, @@ -448,6 +454,7 @@ "axisColorMode": "text", "axisLabel": "transactions / s", "axisPlacement": "auto", + "axisShow": false, "barAlignment": 0, "drawStyle": "line", "fillOpacity": 0, @@ -521,6 +528,7 @@ "axisColorMode": "text", "axisLabel": "", "axisPlacement": "auto", + "axisShow": false, "barAlignment": 0, "drawStyle": "line", "fillOpacity": 0, @@ -581,6 +589,7 @@ "axisColorMode": "text", "axisLabel": "", "axisPlacement": "auto", + "axisShow": false, "barAlignment": 0, "drawStyle": "line", "fillOpacity": 0, @@ -641,6 +650,7 @@ "axisColorMode": "text", "axisLabel": "", "axisPlacement": "auto", + "axisShow": false, "barAlignment": 0, "drawStyle": "line", "fillOpacity": 0, @@ -701,6 +711,7 @@ "axisColorMode": "text", "axisLabel": "", "axisPlacement": "auto", + "axisShow": false, "barAlignment": 0, "drawStyle": "line", "fillOpacity": 0, @@ -761,6 +772,7 @@ "axisColorMode": "text", "axisLabel": "", "axisPlacement": "auto", + "axisShow": false, "barAlignment": 0, "drawStyle": "line", "fillOpacity": 0, @@ -821,6 +833,7 @@ "axisColorMode": "text", "axisLabel": "", "axisPlacement": "auto", + "axisShow": false, "barAlignment": 0, "drawStyle": "line", "fillOpacity": 0, @@ -881,6 +894,7 @@ "axisColorMode": "text", "axisLabel": "", "axisPlacement": "auto", + "axisShow": false, "barAlignment": 0, "drawStyle": "line", "fillOpacity": 0, @@ -941,6 +955,7 @@ "axisColorMode": "text", "axisLabel": "", "axisPlacement": "auto", + "axisShow": false, "barAlignment": 0, "drawStyle": "line", "fillOpacity": 0, @@ -979,7 +994,7 @@ { "datasource": { "type": "prometheus", "uid": "${Datasource}" }, "editorMode": "code", - "expr": "quantile(0.67, rate(aptos_consensus_block_tracing_sum{chain_name=~\"$chain_name\", cluster=~\"$cluster\", metrics_source=~\"$metrics_source\", namespace=~\"$namespace\", kubernetes_pod_name=~\"$kubernetes_pod_name\", role=~\"$role\", stage=\"ordered\"}[$interval]) / rate(aptos_consensus_block_tracing_count{chain_name=~\"$chain_name\", cluster=~\"$cluster\", metrics_source=~\"$metrics_source\", namespace=~\"$namespace\", kubernetes_pod_name=~\"$kubernetes_pod_name\", role=~\"$role\", stage=\"ordered\"}[$interval])) < 1000000", + "expr": "quantile(0.67, rate(aptos_consensus_block_tracing_sum{chain_name=~\"$chain_name\", cluster=~\"$cluster\", metrics_source=~\"$metrics_source\", namespace=~\"$namespace\", kubernetes_pod_name=~\"$kubernetes_pod_name\", role=~\"$role\", stage=\"ordered\"}[$interval]) / rate(aptos_consensus_block_tracing_count{chain_name=~\"$chain_name\", cluster=~\"$cluster\", metrics_source=~\"$metrics_source\", namespace=~\"$namespace\", kubernetes_pod_name=~\"$kubernetes_pod_name\", role=~\"$role\", stage=\"ordered\"}[$interval]))", "format": "time_series", "intervalFactor": 1, "legendFormat": "{{kubernetes_pod_name}}-{{role}}", @@ -1001,6 +1016,7 @@ "axisColorMode": "text", "axisLabel": "", "axisPlacement": "auto", + "axisShow": false, "barAlignment": 0, "drawStyle": "line", "fillOpacity": 0, @@ -1061,6 +1077,7 @@ "axisColorMode": "text", "axisLabel": "", "axisPlacement": "auto", + "axisShow": false, "barAlignment": 0, "drawStyle": "line", "fillOpacity": 0, @@ -1119,6 +1136,7 @@ "axisColorMode": "text", "axisLabel": "", "axisPlacement": "auto", + "axisShow": false, "barAlignment": 0, "drawStyle": "line", "fillOpacity": 0, @@ -1177,6 +1195,7 @@ "axisColorMode": "text", "axisLabel": "", "axisPlacement": "auto", + "axisShow": false, "barAlignment": 0, "drawStyle": "line", "fillOpacity": 0, @@ -1224,12 +1243,666 @@ "title": "Block ordered to committed (used for pipeline backpressure)", "type": "timeseries" }, + { + "datasource": { "type": "prometheus", "uid": "fHo-R604z" }, + "fieldConfig": { + "defaults": { + "color": { "mode": "palette-classic" }, + "custom": { + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "axisShow": false, + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { "legend": false, "tooltip": false, "viz": false }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { "type": "linear" }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { "group": "A", "mode": "none" }, + "thresholdsStyle": { "mode": "off" } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { "color": "green", "value": null }, + { "color": "red", "value": 80 } + ] + }, + "unit": "s" + }, + "overrides": [] + }, + "gridPos": { "h": 9, "w": 8, "x": 0, "y": 58 }, + "id": 178, + "options": { + "legend": { "calcs": [], "displayMode": "list", "placement": "bottom", "showLegend": true }, + "tooltip": { "mode": "single", "sort": "none" } + }, + "targets": [ + { + "datasource": { "type": "prometheus", "uid": "fHo-R604z" }, + "editorMode": "code", + "expr": "avg by (name) (rate(aptos_consensus_buffer_manager_phase_process_seconds_sum{chain_name=~\"$chain_name\", cluster=~\"$cluster\", namespace=~\"$namespace\", kubernetes_pod_name=~\"$kubernetes_pod_name\", role=~\"$role\"} [1m]))", + "instant": false, + "legendFormat": "{{name}}", + "range": true, + "refId": "A" + } + ], + "title": "BufferManager phase saturation (active per second)", + "type": "timeseries" + }, + { + "collapsed": true, + "gridPos": { "h": 1, "w": 24, "x": 0, "y": 67 }, + "id": 180, + "panels": [ + { + "datasource": { "type": "prometheus", "uid": "${Datasource}" }, + "description": "Duration between block generation time until the moment the node ordered the block.", + "fieldConfig": { + "defaults": { + "color": { "mode": "palette-classic" }, + "custom": { + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "axisShow": false, + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { "legend": false, "tooltip": false, "viz": false }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { "type": "linear" }, + "showPoints": "never", + "spanNulls": false, + "stacking": { "group": "A", "mode": "none" }, + "thresholdsStyle": { "mode": "off" } + }, + "mappings": [], + "thresholds": { "mode": "absolute", "steps": [{ "color": "green" }, { "color": "red", "value": 80 }] }, + "unit": "s" + }, + "overrides": [] + }, + "gridPos": { "h": 8, "w": 8, "x": 0, "y": 68 }, + "id": 188, + "options": { + "legend": { "calcs": [], "displayMode": "list", "placement": "bottom", "showLegend": false }, + "tooltip": { "mode": "multi", "sort": "none" } + }, + "pluginVersion": "9.1.1", + "targets": [ + { + "datasource": { "type": "prometheus", "uid": "${Datasource}" }, + "editorMode": "code", + "expr": "quantile(0.67, rate(aptos_consensus_block_tracing_sum{chain_name=~\"$chain_name\", cluster=~\"$cluster\", metrics_source=~\"$metrics_source\", namespace=~\"$namespace\", kubernetes_pod_name=~\"$kubernetes_pod_name\", role=~\"$role\", stage=\"ordered\"}[$interval]) / rate(aptos_consensus_block_tracing_count{chain_name=~\"$chain_name\", cluster=~\"$cluster\", metrics_source=~\"$metrics_source\", namespace=~\"$namespace\", kubernetes_pod_name=~\"$kubernetes_pod_name\", role=~\"$role\", stage=\"ordered\"}[$interval]))", + "format": "time_series", + "intervalFactor": 1, + "legendFormat": "{{kubernetes_pod_name}}-{{role}}", + "range": true, + "refId": "B" + } + ], + "title": "Block creation to ordered", + "type": "timeseries" + }, + { + "datasource": { "type": "prometheus", "uid": "${Datasource}" }, + "description": "Duration between block generation time until the moment the node executes the block.", + "fieldConfig": { + "defaults": { + "color": { "mode": "palette-classic" }, + "custom": { + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "axisShow": false, + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { "legend": false, "tooltip": false, "viz": false }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { "type": "linear" }, + "showPoints": "never", + "spanNulls": false, + "stacking": { "group": "A", "mode": "none" }, + "thresholdsStyle": { "mode": "off" } + }, + "mappings": [], + "thresholds": { "mode": "absolute", "steps": [{ "color": "green" }, { "color": "red", "value": 80 }] }, + "unit": "s" + }, + "overrides": [] + }, + "gridPos": { "h": 8, "w": 8, "x": 8, "y": 68 }, + "id": 189, + "options": { + "legend": { "calcs": [], "displayMode": "list", "placement": "bottom", "showLegend": false }, + "tooltip": { "mode": "multi", "sort": "none" } + }, + "pluginVersion": "9.1.1", + "targets": [ + { + "datasource": { "type": "prometheus", "uid": "${Datasource}" }, + "editorMode": "code", + "expr": "quantile(0.67, rate(aptos_consensus_block_tracing_sum{chain_name=~\"$chain_name\", cluster=~\"$cluster\", metrics_source=~\"$metrics_source\", namespace=~\"$namespace\", kubernetes_pod_name=~\"$kubernetes_pod_name\", role=~\"$role\", stage=\"executed\"}[$interval]) / rate(aptos_consensus_block_tracing_count{chain_name=~\"$chain_name\", cluster=~\"$cluster\", metrics_source=~\"$metrics_source\", namespace=~\"$namespace\", kubernetes_pod_name=~\"$kubernetes_pod_name\", role=~\"$role\", stage=\"executed\"}[$interval])) < 1000000", + "format": "time_series", + "intervalFactor": 1, + "legendFormat": "{{kubernetes_pod_name}}-{{role}}", + "range": true, + "refId": "B" + } + ], + "title": "Block creation to executed", + "type": "timeseries" + }, + { + "datasource": { "type": "prometheus", "uid": "${Datasource}" }, + "description": "Duration between block generation time until the moment the node commits it.", + "fieldConfig": { + "defaults": { + "color": { "mode": "palette-classic" }, + "custom": { + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "axisShow": false, + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { "legend": false, "tooltip": false, "viz": false }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { "type": "linear" }, + "showPoints": "never", + "spanNulls": false, + "stacking": { "group": "A", "mode": "none" }, + "thresholdsStyle": { "mode": "off" } + }, + "mappings": [], + "thresholds": { "mode": "absolute", "steps": [{ "color": "green" }, { "color": "red", "value": 80 }] }, + "unit": "s" + }, + "overrides": [] + }, + "gridPos": { "h": 8, "w": 8, "x": 16, "y": 68 }, + "id": 190, + "options": { + "legend": { "calcs": [], "displayMode": "list", "placement": "bottom", "showLegend": false }, + "tooltip": { "mode": "multi", "sort": "none" } + }, + "pluginVersion": "9.1.1", + "targets": [ + { + "datasource": { "type": "prometheus", "uid": "${Datasource}" }, + "editorMode": "code", + "expr": "quantile(0.67, rate(aptos_consensus_block_tracing_sum{chain_name=~\"$chain_name\", cluster=~\"$cluster\", metrics_source=~\"$metrics_source\", namespace=~\"$namespace\", kubernetes_pod_name=~\"$kubernetes_pod_name\", role=~\"$role\", stage=\"committed\"}[$interval]) / rate(aptos_consensus_block_tracing_count{chain_name=~\"$chain_name\", cluster=~\"$cluster\", metrics_source=~\"$metrics_source\", namespace=~\"$namespace\", kubernetes_pod_name=~\"$kubernetes_pod_name\", role=~\"$role\", stage=\"committed\"}[$interval])) < 1000000", + "legendFormat": "{{kubernetes_pod_name}}-{{role}}", + "range": true, + "refId": "C" + } + ], + "title": "Block creation to commit", + "type": "timeseries" + }, + { + "datasource": { "type": "prometheus", "uid": "${Datasource}" }, + "description": "", + "fieldConfig": { + "defaults": { + "color": { "mode": "palette-classic" }, + "custom": { + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "axisShow": false, + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { "legend": false, "tooltip": false, "viz": false }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { "type": "linear" }, + "showPoints": "never", + "spanNulls": false, + "stacking": { "group": "A", "mode": "none" }, + "thresholdsStyle": { "mode": "off" } + }, + "mappings": [], + "thresholds": { "mode": "absolute", "steps": [{ "color": "green" }, { "color": "red", "value": 80 }] }, + "unit": "s" + }, + "overrides": [] + }, + "gridPos": { "h": 8, "w": 8, "x": 0, "y": 76 }, + "id": 179, + "options": { + "legend": { "calcs": [], "displayMode": "list", "placement": "bottom", "showLegend": false }, + "tooltip": { "mode": "multi", "sort": "none" } + }, + "pluginVersion": "9.1.1", + "targets": [ + { + "datasource": { "type": "prometheus", "uid": "${Datasource}" }, + "editorMode": "code", + "expr": "quantile(0.67, rate(aptos_consensus_block_tracing_sum{chain_name=~\"$chain_name\", cluster=~\"$cluster\", metrics_source=~\"$metrics_source\", namespace=~\"$namespace\", kubernetes_pod_name=~\"$kubernetes_pod_name\", role=~\"$role\", stage=\"rand_bc_share\"}[$interval]) / rate(aptos_consensus_block_tracing_count{chain_name=~\"$chain_name\", cluster=~\"$cluster\", metrics_source=~\"$metrics_source\", namespace=~\"$namespace\", kubernetes_pod_name=~\"$kubernetes_pod_name\", role=~\"$role\", stage=\"rand_bc_share\"}[$interval]))", + "legendFormat": "{{kubernetes_pod_name}}-{{role}}", + "range": true, + "refId": "C" + } + ], + "title": "Block creation to broadcast share", + "type": "timeseries" + }, + { + "datasource": { "type": "prometheus", "uid": "${Datasource}" }, + "description": "", + "fieldConfig": { + "defaults": { + "color": { "mode": "palette-classic" }, + "custom": { + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "axisShow": false, + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { "legend": false, "tooltip": false, "viz": false }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { "type": "linear" }, + "showPoints": "never", + "spanNulls": false, + "stacking": { "group": "A", "mode": "none" }, + "thresholdsStyle": { "mode": "off" } + }, + "mappings": [], + "thresholds": { "mode": "absolute", "steps": [{ "color": "green" }, { "color": "red", "value": 80 }] }, + "unit": "s" + }, + "overrides": [] + }, + "gridPos": { "h": 8, "w": 8, "x": 8, "y": 76 }, + "id": 181, + "options": { + "legend": { "calcs": [], "displayMode": "list", "placement": "bottom", "showLegend": false }, + "tooltip": { "mode": "multi", "sort": "none" } + }, + "pluginVersion": "9.1.1", + "targets": [ + { + "datasource": { "type": "prometheus", "uid": "${Datasource}" }, + "editorMode": "code", + "expr": "quantile(0.67, rate(aptos_consensus_block_tracing_sum{chain_name=~\"$chain_name\", cluster=~\"$cluster\", metrics_source=~\"$metrics_source\", namespace=~\"$namespace\", kubernetes_pod_name=~\"$kubernetes_pod_name\", role=~\"$role\", stage=\"rand_add_share\"}[$interval]) / rate(aptos_consensus_block_tracing_count{chain_name=~\"$chain_name\", cluster=~\"$cluster\", metrics_source=~\"$metrics_source\", namespace=~\"$namespace\", kubernetes_pod_name=~\"$kubernetes_pod_name\", role=~\"$role\", stage=\"rand_add_share\"}[$interval]))", + "legendFormat": "{{kubernetes_pod_name}}-{{role}}", + "range": true, + "refId": "C" + } + ], + "title": "Block creation to add share", + "type": "timeseries" + }, + { + "datasource": { "type": "prometheus", "uid": "${Datasource}" }, + "description": "", + "fieldConfig": { + "defaults": { + "color": { "mode": "palette-classic" }, + "custom": { + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "axisShow": false, + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { "legend": false, "tooltip": false, "viz": false }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { "type": "linear" }, + "showPoints": "never", + "spanNulls": false, + "stacking": { "group": "A", "mode": "none" }, + "thresholdsStyle": { "mode": "off" } + }, + "mappings": [], + "thresholds": { "mode": "absolute", "steps": [{ "color": "green" }, { "color": "red", "value": 80 }] }, + "unit": "s" + }, + "overrides": [] + }, + "gridPos": { "h": 8, "w": 8, "x": 16, "y": 76 }, + "id": 182, + "options": { + "legend": { "calcs": [], "displayMode": "list", "placement": "bottom", "showLegend": false }, + "tooltip": { "mode": "multi", "sort": "none" } + }, + "pluginVersion": "9.1.1", + "targets": [ + { + "datasource": { "type": "prometheus", "uid": "${Datasource}" }, + "editorMode": "code", + "expr": "quantile(0.67, rate(aptos_consensus_block_tracing_sum{chain_name=~\"$chain_name\", cluster=~\"$cluster\", metrics_source=~\"$metrics_source\", namespace=~\"$namespace\", kubernetes_pod_name=~\"$kubernetes_pod_name\", role=~\"$role\", stage=\"rand_agg_decision\"}[$interval]) / rate(aptos_consensus_block_tracing_count{chain_name=~\"$chain_name\", cluster=~\"$cluster\", metrics_source=~\"$metrics_source\", namespace=~\"$namespace\", kubernetes_pod_name=~\"$kubernetes_pod_name\", role=~\"$role\", stage=\"rand_agg_decision\"}[$interval]))", + "legendFormat": "{{kubernetes_pod_name}}-{{role}}", + "range": true, + "refId": "C" + } + ], + "title": "Block creation to aggregate decision", + "type": "timeseries" + }, + { + "datasource": { "type": "prometheus", "uid": "${Datasource}" }, + "description": "", + "fieldConfig": { + "defaults": { + "color": { "mode": "palette-classic" }, + "custom": { + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "axisShow": false, + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { "legend": false, "tooltip": false, "viz": false }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { "type": "linear" }, + "showPoints": "never", + "spanNulls": false, + "stacking": { "group": "A", "mode": "none" }, + "thresholdsStyle": { "mode": "off" } + }, + "mappings": [], + "thresholds": { "mode": "absolute", "steps": [{ "color": "green" }, { "color": "red", "value": 80 }] }, + "unit": "s" + }, + "overrides": [] + }, + "gridPos": { "h": 8, "w": 8, "x": 0, "y": 84 }, + "id": 183, + "options": { + "legend": { "calcs": [], "displayMode": "list", "placement": "bottom", "showLegend": false }, + "tooltip": { "mode": "multi", "sort": "none" } + }, + "pluginVersion": "9.1.1", + "targets": [ + { + "datasource": { "type": "prometheus", "uid": "${Datasource}" }, + "editorMode": "code", + "expr": "quantile(0.67, rate(aptos_consensus_block_tracing_sum{chain_name=~\"$chain_name\", cluster=~\"$cluster\", metrics_source=~\"$metrics_source\", namespace=~\"$namespace\", kubernetes_pod_name=~\"$kubernetes_pod_name\", role=~\"$role\", stage=\"rand_add_decision\"}[$interval]) / rate(aptos_consensus_block_tracing_count{chain_name=~\"$chain_name\", cluster=~\"$cluster\", metrics_source=~\"$metrics_source\", namespace=~\"$namespace\", kubernetes_pod_name=~\"$kubernetes_pod_name\", role=~\"$role\", stage=\"rand_add_decision\"}[$interval]))", + "legendFormat": "{{kubernetes_pod_name}}-{{role}}", + "range": true, + "refId": "C" + } + ], + "title": "Block creation to add decision", + "type": "timeseries" + }, + { + "datasource": { "type": "prometheus", "uid": "${Datasource}" }, + "description": "", + "fieldConfig": { + "defaults": { + "color": { "mode": "palette-classic" }, + "custom": { + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "axisShow": false, + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { "legend": false, "tooltip": false, "viz": false }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { "type": "linear" }, + "showPoints": "never", + "spanNulls": false, + "stacking": { "group": "A", "mode": "none" }, + "thresholdsStyle": { "mode": "off" } + }, + "mappings": [], + "thresholds": { "mode": "absolute", "steps": [{ "color": "green" }, { "color": "red", "value": 80 }] }, + "unit": "s" + }, + "overrides": [] + }, + "gridPos": { "h": 8, "w": 8, "x": 8, "y": 84 }, + "id": 184, + "options": { + "legend": { "calcs": [], "displayMode": "list", "placement": "bottom", "showLegend": false }, + "tooltip": { "mode": "multi", "sort": "none" } + }, + "pluginVersion": "9.1.1", + "targets": [ + { + "datasource": { "type": "prometheus", "uid": "${Datasource}" }, + "editorMode": "code", + "expr": "quantile(0.67, rate(aptos_consensus_block_tracing_sum{chain_name=~\"$chain_name\", cluster=~\"$cluster\", metrics_source=~\"$metrics_source\", namespace=~\"$namespace\", kubernetes_pod_name=~\"$kubernetes_pod_name\", role=~\"$role\", stage=\"rand_bc_decision\"}[$interval]) / rate(aptos_consensus_block_tracing_count{chain_name=~\"$chain_name\", cluster=~\"$cluster\", metrics_source=~\"$metrics_source\", namespace=~\"$namespace\", kubernetes_pod_name=~\"$kubernetes_pod_name\", role=~\"$role\", stage=\"rand_bc_decision\"}[$interval]))", + "legendFormat": "{{kubernetes_pod_name}}-{{role}}", + "range": true, + "refId": "C" + } + ], + "title": "Block creation to broadcast decision", + "type": "timeseries" + }, + { + "datasource": { "type": "prometheus", "uid": "${Datasource}" }, + "description": "", + "fieldConfig": { + "defaults": { + "color": { "mode": "palette-classic" }, + "custom": { + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "axisShow": false, + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { "legend": false, "tooltip": false, "viz": false }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { "type": "linear" }, + "showPoints": "never", + "spanNulls": false, + "stacking": { "group": "A", "mode": "none" }, + "thresholdsStyle": { "mode": "off" } + }, + "mappings": [], + "thresholds": { "mode": "absolute", "steps": [{ "color": "green" }, { "color": "red", "value": 80 }] }, + "unit": "s" + }, + "overrides": [] + }, + "gridPos": { "h": 8, "w": 8, "x": 16, "y": 84 }, + "id": 187, + "options": { + "legend": { "calcs": [], "displayMode": "list", "placement": "bottom", "showLegend": false }, + "tooltip": { "mode": "multi", "sort": "none" } + }, + "pluginVersion": "9.1.1", + "targets": [ + { + "datasource": { "type": "prometheus", "uid": "${Datasource}" }, + "editorMode": "code", + "expr": "quantile(0.67, rate(aptos_consensus_block_tracing_sum{chain_name=~\"$chain_name\", cluster=~\"$cluster\", metrics_source=~\"$metrics_source\", namespace=~\"$namespace\", kubernetes_pod_name=~\"$kubernetes_pod_name\", role=~\"$role\", stage=\"rand_re_bc_share\"}[$interval]) / rate(aptos_consensus_block_tracing_count{chain_name=~\"$chain_name\", cluster=~\"$cluster\", metrics_source=~\"$metrics_source\", namespace=~\"$namespace\", kubernetes_pod_name=~\"$kubernetes_pod_name\", role=~\"$role\", stage=\"rand_re_bc_share\"}[$interval]))", + "legendFormat": "{{kubernetes_pod_name}}-{{role}}", + "range": true, + "refId": "C" + } + ], + "title": "Block creation to re-broadcast share", + "type": "timeseries" + }, + { + "datasource": { "type": "prometheus", "uid": "${Datasource}" }, + "description": "", + "fieldConfig": { + "defaults": { + "color": { "mode": "palette-classic" }, + "custom": { + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "axisShow": false, + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { "legend": false, "tooltip": false, "viz": false }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { "type": "linear" }, + "showPoints": "never", + "spanNulls": false, + "stacking": { "group": "A", "mode": "none" }, + "thresholdsStyle": { "mode": "off" } + }, + "mappings": [], + "thresholds": { "mode": "absolute", "steps": [{ "color": "green" }, { "color": "red", "value": 80 }] }, + "unit": "s" + }, + "overrides": [] + }, + "gridPos": { "h": 8, "w": 8, "x": 0, "y": 92 }, + "id": 185, + "options": { + "legend": { "calcs": [], "displayMode": "list", "placement": "bottom", "showLegend": false }, + "tooltip": { "mode": "multi", "sort": "none" } + }, + "pluginVersion": "9.1.1", + "targets": [ + { + "datasource": { "type": "prometheus", "uid": "${Datasource}" }, + "editorMode": "code", + "expr": "quantile(0.67, rate(aptos_consensus_block_tracing_sum{chain_name=~\"$chain_name\", cluster=~\"$cluster\", metrics_source=~\"$metrics_source\", namespace=~\"$namespace\", kubernetes_pod_name=~\"$kubernetes_pod_name\", role=~\"$role\", stage=\"rand_ack_decision\"}[$interval]) / rate(aptos_consensus_block_tracing_count{chain_name=~\"$chain_name\", cluster=~\"$cluster\", metrics_source=~\"$metrics_source\", namespace=~\"$namespace\", kubernetes_pod_name=~\"$kubernetes_pod_name\", role=~\"$role\", stage=\"rand_ack_decision\"}[$interval]))", + "legendFormat": "{{kubernetes_pod_name}}-{{role}}", + "range": true, + "refId": "C" + } + ], + "title": "Block creation to ack back decision", + "type": "timeseries" + }, + { + "datasource": { "type": "prometheus", "uid": "${Datasource}" }, + "description": "", + "fieldConfig": { + "defaults": { + "color": { "mode": "palette-classic" }, + "custom": { + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "axisShow": false, + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { "legend": false, "tooltip": false, "viz": false }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { "type": "linear" }, + "showPoints": "never", + "spanNulls": false, + "stacking": { "group": "A", "mode": "none" }, + "thresholdsStyle": { "mode": "off" } + }, + "mappings": [], + "thresholds": { "mode": "absolute", "steps": [{ "color": "green" }, { "color": "red", "value": 80 }] }, + "unit": "s" + }, + "overrides": [] + }, + "gridPos": { "h": 8, "w": 8, "x": 8, "y": 92 }, + "id": 186, + "options": { + "legend": { "calcs": [], "displayMode": "list", "placement": "bottom", "showLegend": false }, + "tooltip": { "mode": "multi", "sort": "none" } + }, + "pluginVersion": "9.1.1", + "targets": [ + { + "datasource": { "type": "prometheus", "uid": "${Datasource}" }, + "editorMode": "code", + "expr": "quantile(0.67, rate(aptos_consensus_block_tracing_sum{chain_name=~\"$chain_name\", cluster=~\"$cluster\", metrics_source=~\"$metrics_source\", namespace=~\"$namespace\", kubernetes_pod_name=~\"$kubernetes_pod_name\", role=~\"$role\", stage=\"rand_acked_decision\"}[$interval]) / rate(aptos_consensus_block_tracing_count{chain_name=~\"$chain_name\", cluster=~\"$cluster\", metrics_source=~\"$metrics_source\", namespace=~\"$namespace\", kubernetes_pod_name=~\"$kubernetes_pod_name\", role=~\"$role\", stage=\"rand_acked_decision\"}[$interval]))", + "legendFormat": "{{kubernetes_pod_name}}-{{role}}", + "range": true, + "refId": "C" + } + ], + "title": "Block creation to receive acked decision", + "type": "timeseries" + } + ], + "title": "[TEST] Randomness Latency", + "type": "row" + }, { "collapsed": false, "datasource": { "type": "prometheus", "uid": "${Datasource}" }, "editable": false, "error": false, - "gridPos": { "h": 1, "w": 24, "x": 0, "y": 58 }, + "gridPos": { "h": 1, "w": 24, "x": 0, "y": 68 }, "id": 70, "panels": [], "span": 0, @@ -1249,7 +1922,7 @@ "fieldConfig": { "defaults": { "unit": "" }, "overrides": [] }, "fill": 0, "fillGradient": 0, - "gridPos": { "h": 8, "w": 8, "x": 0, "y": 59 }, + "gridPos": { "h": 8, "w": 8, "x": 0, "y": 69 }, "hiddenSeries": false, "id": 44, "legend": { @@ -1270,7 +1943,7 @@ "nullPointMode": "null", "options": { "alertThreshold": true }, "percentage": false, - "pluginVersion": "10.2.0-59981", + "pluginVersion": "10.2.0-60477", "pointradius": 2, "points": false, "renderer": "flot", @@ -1314,7 +1987,7 @@ "fieldConfig": { "defaults": { "unit": "" }, "overrides": [] }, "fill": 0, "fillGradient": 0, - "gridPos": { "h": 8, "w": 8, "x": 8, "y": 59 }, + "gridPos": { "h": 8, "w": 8, "x": 8, "y": 69 }, "hiddenSeries": false, "id": 118, "legend": { @@ -1335,7 +2008,7 @@ "nullPointMode": "null", "options": { "alertThreshold": true }, "percentage": false, - "pluginVersion": "10.2.0-59981", + "pluginVersion": "10.2.0-60477", "pointradius": 2, "points": false, "renderer": "flot", @@ -1379,7 +2052,7 @@ "fieldConfig": { "defaults": { "unit": "" }, "overrides": [] }, "fill": 0, "fillGradient": 0, - "gridPos": { "h": 8, "w": 8, "x": 16, "y": 59 }, + "gridPos": { "h": 8, "w": 8, "x": 16, "y": 69 }, "hiddenSeries": false, "id": 119, "legend": { @@ -1400,7 +2073,7 @@ "nullPointMode": "null", "options": { "alertThreshold": true }, "percentage": false, - "pluginVersion": "10.2.0-59981", + "pluginVersion": "10.2.0-60477", "pointradius": 2, "points": false, "renderer": "flot", @@ -1444,7 +2117,7 @@ "fieldConfig": { "defaults": { "unit": "" }, "overrides": [] }, "fill": 0, "fillGradient": 0, - "gridPos": { "h": 8, "w": 8, "x": 0, "y": 67 }, + "gridPos": { "h": 8, "w": 8, "x": 0, "y": 77 }, "hiddenSeries": false, "id": 120, "legend": { @@ -1465,7 +2138,7 @@ "nullPointMode": "null", "options": { "alertThreshold": true }, "percentage": false, - "pluginVersion": "10.2.0-59981", + "pluginVersion": "10.2.0-60477", "pointradius": 2, "points": false, "renderer": "flot", @@ -1509,7 +2182,7 @@ "fieldConfig": { "defaults": { "unit": "" }, "overrides": [] }, "fill": 0, "fillGradient": 0, - "gridPos": { "h": 8, "w": 8, "x": 8, "y": 67 }, + "gridPos": { "h": 8, "w": 8, "x": 8, "y": 77 }, "hiddenSeries": false, "id": 121, "legend": { @@ -1530,7 +2203,7 @@ "nullPointMode": "null", "options": { "alertThreshold": true }, "percentage": false, - "pluginVersion": "10.2.0-59981", + "pluginVersion": "10.2.0-60477", "pointradius": 2, "points": false, "renderer": "flot", @@ -1574,7 +2247,7 @@ "fieldConfig": { "defaults": { "unit": "" }, "overrides": [] }, "fill": 0, "fillGradient": 0, - "gridPos": { "h": 8, "w": 8, "x": 16, "y": 67 }, + "gridPos": { "h": 8, "w": 8, "x": 16, "y": 77 }, "hiddenSeries": false, "id": 122, "legend": { @@ -1595,7 +2268,7 @@ "nullPointMode": "null", "options": { "alertThreshold": true }, "percentage": false, - "pluginVersion": "10.2.0-59981", + "pluginVersion": "10.2.0-60477", "pointradius": 2, "points": false, "renderer": "flot", @@ -1639,7 +2312,7 @@ "fieldConfig": { "defaults": { "unit": "" }, "overrides": [] }, "fill": 0, "fillGradient": 0, - "gridPos": { "h": 8, "w": 8, "x": 0, "y": 75 }, + "gridPos": { "h": 8, "w": 8, "x": 0, "y": 85 }, "hiddenSeries": false, "id": 123, "legend": { @@ -1660,7 +2333,7 @@ "nullPointMode": "null", "options": { "alertThreshold": true }, "percentage": false, - "pluginVersion": "10.2.0-59981", + "pluginVersion": "10.2.0-60477", "pointradius": 2, "points": false, "renderer": "flot", @@ -1704,7 +2377,7 @@ "fieldConfig": { "defaults": { "unit": "" }, "overrides": [] }, "fill": 0, "fillGradient": 0, - "gridPos": { "h": 8, "w": 8, "x": 8, "y": 75 }, + "gridPos": { "h": 8, "w": 8, "x": 8, "y": 85 }, "hiddenSeries": false, "id": 124, "legend": { @@ -1725,7 +2398,7 @@ "nullPointMode": "null", "options": { "alertThreshold": true }, "percentage": false, - "pluginVersion": "10.2.0-59981", + "pluginVersion": "10.2.0-60477", "pointradius": 2, "points": false, "renderer": "flot", @@ -1769,7 +2442,7 @@ "fieldConfig": { "defaults": { "unit": "" }, "overrides": [] }, "fill": 0, "fillGradient": 0, - "gridPos": { "h": 8, "w": 8, "x": 16, "y": 75 }, + "gridPos": { "h": 8, "w": 8, "x": 16, "y": 85 }, "hiddenSeries": false, "id": 125, "legend": { @@ -1790,7 +2463,7 @@ "nullPointMode": "null", "options": { "alertThreshold": true }, "percentage": false, - "pluginVersion": "10.2.0-59981", + "pluginVersion": "10.2.0-60477", "pointradius": 2, "points": false, "renderer": "flot", @@ -1834,7 +2507,7 @@ "fieldConfig": { "defaults": { "unit": "" }, "overrides": [] }, "fill": 0, "fillGradient": 0, - "gridPos": { "h": 8, "w": 8, "x": 0, "y": 83 }, + "gridPos": { "h": 8, "w": 8, "x": 0, "y": 93 }, "hiddenSeries": false, "id": 126, "legend": { @@ -1855,7 +2528,7 @@ "nullPointMode": "null", "options": { "alertThreshold": true }, "percentage": false, - "pluginVersion": "10.2.0-59981", + "pluginVersion": "10.2.0-60477", "pointradius": 2, "points": false, "renderer": "flot", @@ -1899,7 +2572,7 @@ "fieldConfig": { "defaults": { "unit": "" }, "overrides": [] }, "fill": 0, "fillGradient": 0, - "gridPos": { "h": 8, "w": 8, "x": 8, "y": 83 }, + "gridPos": { "h": 8, "w": 8, "x": 8, "y": 93 }, "hiddenSeries": false, "id": 83, "legend": { @@ -1920,7 +2593,7 @@ "nullPointMode": "null", "options": { "alertThreshold": true }, "percentage": false, - "pluginVersion": "10.2.0-59981", + "pluginVersion": "10.2.0-60477", "pointradius": 2, "points": false, "renderer": "flot", @@ -1943,7 +2616,7 @@ "thresholds": [], "timeRegions": [], "title": "Main Event Loop Current running function", - "tooltip": { "shared": true, "sort": 0, "value_type": "individual" }, + "tooltip": { "shared": true, "sort": 2, "value_type": "individual" }, "type": "graph", "xaxis": { "format": "", "logBase": 0, "mode": "time", "show": true, "values": [] }, "yaxes": [ @@ -1964,7 +2637,7 @@ "fieldConfig": { "defaults": { "unit": "" }, "overrides": [] }, "fill": 0, "fillGradient": 0, - "gridPos": { "h": 8, "w": 8, "x": 16, "y": 83 }, + "gridPos": { "h": 8, "w": 8, "x": 16, "y": 93 }, "hiddenSeries": false, "id": 129, "legend": { @@ -1985,7 +2658,7 @@ "nullPointMode": "null", "options": { "alertThreshold": true }, "percentage": false, - "pluginVersion": "10.2.0-59981", + "pluginVersion": "10.2.0-60477", "pointradius": 2, "points": false, "renderer": "flot", @@ -2029,7 +2702,7 @@ "fieldConfig": { "defaults": { "unit": "" }, "overrides": [] }, "fill": 0, "fillGradient": 0, - "gridPos": { "h": 8, "w": 8, "x": 0, "y": 91 }, + "gridPos": { "h": 8, "w": 8, "x": 0, "y": 101 }, "hiddenSeries": false, "id": 159, "legend": { @@ -2050,7 +2723,7 @@ "nullPointMode": "null", "options": { "alertThreshold": true }, "percentage": false, - "pluginVersion": "10.2.0-59981", + "pluginVersion": "10.2.0-60477", "pointradius": 2, "points": false, "renderer": "flot", @@ -2094,7 +2767,7 @@ "fieldConfig": { "defaults": { "unit": "" }, "overrides": [] }, "fill": 0, "fillGradient": 0, - "gridPos": { "h": 8, "w": 8, "x": 8, "y": 91 }, + "gridPos": { "h": 8, "w": 8, "x": 8, "y": 101 }, "hiddenSeries": false, "id": 160, "legend": { @@ -2115,7 +2788,7 @@ "nullPointMode": "null", "options": { "alertThreshold": true }, "percentage": false, - "pluginVersion": "10.2.0-59981", + "pluginVersion": "10.2.0-60477", "pointradius": 2, "points": false, "renderer": "flot", @@ -2152,7 +2825,7 @@ "datasource": { "type": "prometheus", "uid": "${Datasource}" }, "editable": false, "error": false, - "gridPos": { "h": 1, "w": 24, "x": 0, "y": 99 }, + "gridPos": { "h": 1, "w": 24, "x": 0, "y": 109 }, "id": 18, "panels": [], "span": 0, @@ -2171,6 +2844,7 @@ "axisColorMode": "text", "axisLabel": "round #", "axisPlacement": "auto", + "axisShow": false, "barAlignment": 0, "drawStyle": "line", "fillOpacity": 0, @@ -2198,7 +2872,7 @@ }, "overrides": [] }, - "gridPos": { "h": 8, "w": 8, "x": 0, "y": 100 }, + "gridPos": { "h": 8, "w": 8, "x": 0, "y": 110 }, "id": 45, "options": { "legend": { "calcs": [], "displayMode": "list", "placement": "bottom", "showLegend": false }, @@ -2227,6 +2901,7 @@ "axisColorMode": "text", "axisLabel": "# rounds", "axisPlacement": "auto", + "axisShow": false, "barAlignment": 0, "drawStyle": "line", "fillOpacity": 0, @@ -2254,7 +2929,7 @@ }, "overrides": [] }, - "gridPos": { "h": 8, "w": 8, "x": 8, "y": 100 }, + "gridPos": { "h": 8, "w": 8, "x": 8, "y": 110 }, "id": 14, "options": { "legend": { "calcs": [], "displayMode": "list", "placement": "bottom", "showLegend": false }, @@ -2287,6 +2962,7 @@ "axisColorMode": "text", "axisLabel": "# timeout rounds", "axisPlacement": "auto", + "axisShow": false, "barAlignment": 0, "drawStyle": "line", "fillOpacity": 0, @@ -2314,7 +2990,7 @@ }, "overrides": [] }, - "gridPos": { "h": 8, "w": 8, "x": 16, "y": 100 }, + "gridPos": { "h": 8, "w": 8, "x": 16, "y": 110 }, "id": 16, "options": { "legend": { "calcs": [], "displayMode": "list", "placement": "bottom", "showLegend": false }, @@ -2347,6 +3023,7 @@ "axisColorMode": "text", "axisLabel": "# timeouts", "axisPlacement": "auto", + "axisShow": false, "barAlignment": 0, "drawStyle": "line", "fillOpacity": 0, @@ -2374,7 +3051,7 @@ }, "overrides": [] }, - "gridPos": { "h": 8, "w": 8, "x": 0, "y": 108 }, + "gridPos": { "h": 8, "w": 8, "x": 0, "y": 118 }, "id": 26, "options": { "legend": { "calcs": [], "displayMode": "list", "placement": "bottom", "showLegend": false }, @@ -2407,6 +3084,7 @@ "axisColorMode": "text", "axisLabel": "timeout val", "axisPlacement": "auto", + "axisShow": false, "barAlignment": 0, "drawStyle": "line", "fillOpacity": 0, @@ -2434,7 +3112,7 @@ }, "overrides": [] }, - "gridPos": { "h": 8, "w": 8, "x": 8, "y": 108 }, + "gridPos": { "h": 8, "w": 8, "x": 8, "y": 118 }, "id": 27, "options": { "legend": { "calcs": [], "displayMode": "list", "placement": "bottom", "showLegend": false }, @@ -2459,7 +3137,7 @@ "datasource": { "type": "prometheus", "uid": "${Datasource}" }, "editable": false, "error": false, - "gridPos": { "h": 1, "w": 24, "x": 0, "y": 116 }, + "gridPos": { "h": 1, "w": 24, "x": 0, "y": 126 }, "id": 59, "panels": [], "span": 0, @@ -2478,6 +3156,7 @@ "axisColorMode": "text", "axisLabel": "Epoch #", "axisPlacement": "auto", + "axisShow": false, "barAlignment": 0, "drawStyle": "line", "fillOpacity": 0, @@ -2505,7 +3184,7 @@ }, "overrides": [] }, - "gridPos": { "h": 8, "w": 8, "x": 0, "y": 117 }, + "gridPos": { "h": 8, "w": 8, "x": 0, "y": 127 }, "id": 60, "options": { "legend": { "calcs": [], "displayMode": "list", "placement": "bottom", "showLegend": false }, @@ -2535,6 +3214,7 @@ "axisColorMode": "text", "axisLabel": "# Validators", "axisPlacement": "auto", + "axisShow": false, "barAlignment": 0, "drawStyle": "line", "fillOpacity": 0, @@ -2562,7 +3242,7 @@ }, "overrides": [] }, - "gridPos": { "h": 8, "w": 8, "x": 8, "y": 117 }, + "gridPos": { "h": 8, "w": 8, "x": 8, "y": 127 }, "id": 61, "options": { "legend": { "calcs": [], "displayMode": "list", "placement": "bottom", "showLegend": false }, @@ -2592,6 +3272,7 @@ "axisColorMode": "text", "axisLabel": "# Validators", "axisPlacement": "auto", + "axisShow": false, "barAlignment": 0, "drawStyle": "line", "fillOpacity": 0, @@ -2619,7 +3300,7 @@ }, "overrides": [] }, - "gridPos": { "h": 8, "w": 8, "x": 16, "y": 117 }, + "gridPos": { "h": 8, "w": 8, "x": 16, "y": 127 }, "id": 141, "options": { "legend": { "calcs": [], "displayMode": "list", "placement": "bottom", "showLegend": false }, @@ -2645,7 +3326,7 @@ "datasource": { "type": "prometheus", "uid": "${Datasource}" }, "editable": false, "error": false, - "gridPos": { "h": 1, "w": 24, "x": 0, "y": 125 }, + "gridPos": { "h": 1, "w": 24, "x": 0, "y": 135 }, "id": 55, "panels": [], "span": 0, @@ -2664,6 +3345,7 @@ "axisColorMode": "text", "axisLabel": "# blocks", "axisPlacement": "auto", + "axisShow": false, "barAlignment": 0, "drawStyle": "line", "fillOpacity": 0, @@ -2691,7 +3373,7 @@ }, "overrides": [] }, - "gridPos": { "h": 8, "w": 8, "x": 0, "y": 126 }, + "gridPos": { "h": 8, "w": 8, "x": 0, "y": 136 }, "id": 8, "options": { "legend": { "calcs": [], "displayMode": "list", "placement": "bottom", "showLegend": false }, @@ -2722,6 +3404,7 @@ "axisColorMode": "text", "axisLabel": "# transactions / block", "axisPlacement": "auto", + "axisShow": false, "barAlignment": 0, "drawStyle": "line", "fillOpacity": 0, @@ -2749,7 +3432,7 @@ }, "overrides": [] }, - "gridPos": { "h": 8, "w": 8, "x": 8, "y": 126 }, + "gridPos": { "h": 8, "w": 8, "x": 8, "y": 136 }, "id": 57, "options": { "legend": { "calcs": [], "displayMode": "list", "placement": "bottom", "showLegend": false }, @@ -2780,6 +3463,7 @@ "axisColorMode": "text", "axisLabel": "", "axisPlacement": "auto", + "axisShow": false, "barAlignment": 0, "drawStyle": "line", "fillOpacity": 0, @@ -2807,7 +3491,7 @@ }, "overrides": [] }, - "gridPos": { "h": 8, "w": 8, "x": 16, "y": 126 }, + "gridPos": { "h": 8, "w": 8, "x": 16, "y": 136 }, "id": 56, "options": { "legend": { "calcs": [], "displayMode": "list", "placement": "bottom", "showLegend": false }, @@ -2840,6 +3524,7 @@ "axisColorMode": "text", "axisLabel": "# blocks", "axisPlacement": "auto", + "axisShow": false, "barAlignment": 0, "drawStyle": "line", "fillOpacity": 0, @@ -2867,7 +3552,7 @@ }, "overrides": [] }, - "gridPos": { "h": 8, "w": 8, "x": 0, "y": 134 }, + "gridPos": { "h": 8, "w": 8, "x": 0, "y": 144 }, "id": 115, "options": { "legend": { "calcs": [], "displayMode": "list", "placement": "bottom", "showLegend": false }, @@ -2900,6 +3585,7 @@ "axisColorMode": "text", "axisLabel": "# blocks", "axisPlacement": "auto", + "axisShow": false, "barAlignment": 0, "drawStyle": "line", "fillOpacity": 0, @@ -2927,7 +3613,7 @@ }, "overrides": [] }, - "gridPos": { "h": 8, "w": 8, "x": 8, "y": 134 }, + "gridPos": { "h": 8, "w": 8, "x": 8, "y": 144 }, "id": 130, "options": { "legend": { "calcs": [], "displayMode": "list", "placement": "bottom", "showLegend": false }, @@ -2960,6 +3646,7 @@ "axisColorMode": "text", "axisLabel": "# blocks", "axisPlacement": "auto", + "axisShow": false, "barAlignment": 0, "drawStyle": "line", "fillOpacity": 0, @@ -2987,7 +3674,7 @@ }, "overrides": [] }, - "gridPos": { "h": 8, "w": 8, "x": 16, "y": 134 }, + "gridPos": { "h": 8, "w": 8, "x": 16, "y": 144 }, "id": 131, "options": { "legend": { "calcs": [], "displayMode": "list", "placement": "bottom", "showLegend": false }, @@ -3020,6 +3707,7 @@ "axisColorMode": "text", "axisLabel": "# blocks", "axisPlacement": "auto", + "axisShow": false, "barAlignment": 0, "drawStyle": "line", "fillOpacity": 0, @@ -3047,7 +3735,7 @@ }, "overrides": [] }, - "gridPos": { "h": 8, "w": 8, "x": 0, "y": 142 }, + "gridPos": { "h": 8, "w": 8, "x": 0, "y": 152 }, "id": 158, "options": { "legend": { "calcs": [], "displayMode": "list", "placement": "bottom", "showLegend": false }, @@ -3074,7 +3762,7 @@ "datasource": { "type": "prometheus", "uid": "${Datasource}" }, "editable": false, "error": false, - "gridPos": { "h": 1, "w": 24, "x": 0, "y": 150 }, + "gridPos": { "h": 1, "w": 24, "x": 0, "y": 160 }, "id": 4, "panels": [], "span": 0, @@ -3093,6 +3781,7 @@ "axisColorMode": "text", "axisLabel": "", "axisPlacement": "auto", + "axisShow": false, "barAlignment": 0, "drawStyle": "line", "fillOpacity": 0, @@ -3120,7 +3809,7 @@ }, "overrides": [] }, - "gridPos": { "h": 8, "w": 8, "x": 0, "y": 151 }, + "gridPos": { "h": 8, "w": 8, "x": 0, "y": 161 }, "id": 150, "options": { "legend": { "calcs": [], "displayMode": "list", "placement": "bottom", "showLegend": false }, @@ -3152,6 +3841,7 @@ "axisColorMode": "text", "axisLabel": "", "axisPlacement": "auto", + "axisShow": false, "barAlignment": 0, "drawStyle": "line", "fillOpacity": 0, @@ -3179,7 +3869,7 @@ }, "overrides": [] }, - "gridPos": { "h": 8, "w": 8, "x": 8, "y": 151 }, + "gridPos": { "h": 8, "w": 8, "x": 8, "y": 161 }, "id": 80, "options": { "legend": { "calcs": [], "displayMode": "list", "placement": "bottom", "showLegend": false }, @@ -3211,6 +3901,7 @@ "axisColorMode": "text", "axisLabel": "", "axisPlacement": "auto", + "axisShow": false, "barAlignment": 0, "drawStyle": "line", "fillOpacity": 0, @@ -3238,7 +3929,7 @@ }, "overrides": [] }, - "gridPos": { "h": 8, "w": 8, "x": 16, "y": 151 }, + "gridPos": { "h": 8, "w": 8, "x": 16, "y": 161 }, "id": 79, "options": { "legend": { "calcs": [], "displayMode": "list", "placement": "bottom", "showLegend": false }, @@ -3280,6 +3971,7 @@ "axisColorMode": "text", "axisLabel": "", "axisPlacement": "auto", + "axisShow": false, "barAlignment": 0, "drawStyle": "line", "fillOpacity": 0, @@ -3307,7 +3999,7 @@ }, "overrides": [] }, - "gridPos": { "h": 8, "w": 6, "x": 0, "y": 159 }, + "gridPos": { "h": 8, "w": 6, "x": 0, "y": 169 }, "id": 85, "options": { "legend": { "calcs": [], "displayMode": "list", "placement": "bottom", "showLegend": false }, @@ -3339,6 +4031,7 @@ "axisColorMode": "text", "axisLabel": "", "axisPlacement": "auto", + "axisShow": false, "barAlignment": 0, "drawStyle": "line", "fillOpacity": 0, @@ -3366,7 +4059,7 @@ }, "overrides": [] }, - "gridPos": { "h": 8, "w": 6, "x": 6, "y": 159 }, + "gridPos": { "h": 8, "w": 6, "x": 6, "y": 169 }, "id": 88, "options": { "legend": { "calcs": [], "displayMode": "list", "placement": "bottom", "showLegend": false }, @@ -3396,6 +4089,7 @@ "axisColorMode": "text", "axisLabel": "", "axisPlacement": "auto", + "axisShow": false, "barAlignment": 0, "drawStyle": "line", "fillOpacity": 0, @@ -3423,7 +4117,7 @@ }, "overrides": [] }, - "gridPos": { "h": 8, "w": 6, "x": 12, "y": 159 }, + "gridPos": { "h": 8, "w": 6, "x": 12, "y": 169 }, "id": 86, "options": { "legend": { "calcs": [], "displayMode": "list", "placement": "bottom", "showLegend": false }, @@ -3453,6 +4147,7 @@ "axisColorMode": "text", "axisLabel": "", "axisPlacement": "auto", + "axisShow": false, "barAlignment": 0, "drawStyle": "line", "fillOpacity": 0, @@ -3480,7 +4175,7 @@ }, "overrides": [] }, - "gridPos": { "h": 8, "w": 6, "x": 18, "y": 159 }, + "gridPos": { "h": 8, "w": 6, "x": 18, "y": 169 }, "id": 87, "options": { "legend": { "calcs": [], "displayMode": "list", "placement": "bottom", "showLegend": false }, @@ -3512,6 +4207,7 @@ "axisColorMode": "text", "axisLabel": "# state syncs", "axisPlacement": "auto", + "axisShow": false, "barAlignment": 0, "drawStyle": "line", "fillOpacity": 0, @@ -3539,7 +4235,7 @@ }, "overrides": [] }, - "gridPos": { "h": 8, "w": 8, "x": 0, "y": 167 }, + "gridPos": { "h": 8, "w": 8, "x": 0, "y": 177 }, "id": 21, "options": { "legend": { "calcs": [], "displayMode": "list", "placement": "bottom", "showLegend": false }, @@ -3572,6 +4268,7 @@ "axisColorMode": "text", "axisLabel": "# blocks", "axisPlacement": "auto", + "axisShow": false, "barAlignment": 0, "drawStyle": "line", "fillOpacity": 0, @@ -3599,7 +4296,7 @@ }, "overrides": [] }, - "gridPos": { "h": 8, "w": 8, "x": 8, "y": 167 }, + "gridPos": { "h": 8, "w": 8, "x": 8, "y": 177 }, "id": 24, "options": { "legend": { "calcs": [], "displayMode": "list", "placement": "bottom", "showLegend": false }, @@ -3632,6 +4329,7 @@ "axisColorMode": "text", "axisLabel": "total time", "axisPlacement": "auto", + "axisShow": false, "barAlignment": 0, "drawStyle": "line", "fillOpacity": 0, @@ -3659,7 +4357,7 @@ }, "overrides": [] }, - "gridPos": { "h": 8, "w": 8, "x": 0, "y": 175 }, + "gridPos": { "h": 8, "w": 8, "x": 0, "y": 185 }, "id": 25, "options": { "legend": { "calcs": [], "displayMode": "list", "placement": "bottom", "showLegend": false }, @@ -3690,6 +4388,7 @@ "axisColorMode": "text", "axisLabel": "total time", "axisPlacement": "auto", + "axisShow": false, "barAlignment": 0, "drawStyle": "line", "fillOpacity": 0, @@ -3717,7 +4416,7 @@ }, "overrides": [] }, - "gridPos": { "h": 8, "w": 8, "x": 8, "y": 175 }, + "gridPos": { "h": 8, "w": 8, "x": 8, "y": 185 }, "id": 23, "options": { "legend": { "calcs": [], "displayMode": "list", "placement": "bottom", "showLegend": false }, @@ -3742,7 +4441,7 @@ "datasource": { "type": "prometheus", "uid": "${Datasource}" }, "editable": false, "error": false, - "gridPos": { "h": 1, "w": 24, "x": 0, "y": 183 }, + "gridPos": { "h": 1, "w": 24, "x": 0, "y": 193 }, "id": 52, "panels": [], "span": 0, @@ -3761,6 +4460,7 @@ "axisColorMode": "text", "axisLabel": "proposals / s", "axisPlacement": "auto", + "axisShow": false, "barAlignment": 0, "drawStyle": "line", "fillOpacity": 0, @@ -3788,7 +4488,7 @@ }, "overrides": [] }, - "gridPos": { "h": 8, "w": 8, "x": 0, "y": 184 }, + "gridPos": { "h": 8, "w": 8, "x": 0, "y": 194 }, "id": 28, "options": { "legend": { "calcs": [], "displayMode": "list", "placement": "bottom", "showLegend": false }, @@ -3821,6 +4521,7 @@ "axisColorMode": "text", "axisLabel": "# votes", "axisPlacement": "auto", + "axisShow": false, "barAlignment": 0, "drawStyle": "line", "fillOpacity": 0, @@ -3848,7 +4549,7 @@ }, "overrides": [] }, - "gridPos": { "h": 8, "w": 8, "x": 8, "y": 184 }, + "gridPos": { "h": 8, "w": 8, "x": 8, "y": 194 }, "id": 53, "options": { "legend": { "calcs": [], "displayMode": "list", "placement": "bottom", "showLegend": false }, @@ -3875,7 +4576,7 @@ "datasource": { "type": "prometheus", "uid": "${Datasource}" }, "editable": false, "error": false, - "gridPos": { "h": 1, "w": 24, "x": 0, "y": 192 }, + "gridPos": { "h": 1, "w": 24, "x": 0, "y": 202 }, "id": 64, "panels": [], "span": 0, @@ -3894,6 +4595,7 @@ "axisColorMode": "text", "axisLabel": "dropped messages / s", "axisPlacement": "auto", + "axisShow": false, "barAlignment": 0, "drawStyle": "line", "fillOpacity": 0, @@ -3921,7 +4623,7 @@ }, "overrides": [] }, - "gridPos": { "h": 8, "w": 5, "x": 0, "y": 193 }, + "gridPos": { "h": 8, "w": 5, "x": 0, "y": 203 }, "id": 91, "options": { "legend": { "calcs": [], "displayMode": "list", "placement": "bottom", "showLegend": false }, @@ -3952,6 +4654,7 @@ "axisColorMode": "text", "axisLabel": "dropped messages / s", "axisPlacement": "auto", + "axisShow": false, "barAlignment": 0, "drawStyle": "line", "fillOpacity": 0, @@ -3979,7 +4682,7 @@ }, "overrides": [] }, - "gridPos": { "h": 8, "w": 5, "x": 5, "y": 193 }, + "gridPos": { "h": 8, "w": 5, "x": 5, "y": 203 }, "id": 147, "options": { "legend": { "calcs": [], "displayMode": "list", "placement": "bottom", "showLegend": false }, @@ -4010,6 +4713,7 @@ "axisColorMode": "text", "axisLabel": "dropped messages / s", "axisPlacement": "auto", + "axisShow": false, "barAlignment": 0, "drawStyle": "line", "fillOpacity": 0, @@ -4037,7 +4741,7 @@ }, "overrides": [] }, - "gridPos": { "h": 8, "w": 5, "x": 10, "y": 193 }, + "gridPos": { "h": 8, "w": 5, "x": 10, "y": 203 }, "id": 149, "options": { "legend": { "calcs": [], "displayMode": "list", "placement": "bottom", "showLegend": false }, @@ -4068,6 +4772,7 @@ "axisColorMode": "text", "axisLabel": "dropped messages / s", "axisPlacement": "auto", + "axisShow": false, "barAlignment": 0, "drawStyle": "line", "fillOpacity": 0, @@ -4095,7 +4800,7 @@ }, "overrides": [] }, - "gridPos": { "h": 8, "w": 4, "x": 15, "y": 193 }, + "gridPos": { "h": 8, "w": 4, "x": 15, "y": 203 }, "id": 148, "options": { "legend": { "calcs": [], "displayMode": "list", "placement": "bottom", "showLegend": false }, @@ -4126,6 +4831,7 @@ "axisColorMode": "text", "axisLabel": "dropped messages / s", "axisPlacement": "auto", + "axisShow": false, "barAlignment": 0, "drawStyle": "line", "fillOpacity": 0, @@ -4153,7 +4859,7 @@ }, "overrides": [] }, - "gridPos": { "h": 8, "w": 5, "x": 19, "y": 193 }, + "gridPos": { "h": 8, "w": 5, "x": 19, "y": 203 }, "id": 157, "options": { "legend": { "calcs": [], "displayMode": "list", "placement": "bottom", "showLegend": false }, @@ -4184,6 +4890,7 @@ "axisColorMode": "text", "axisLabel": "", "axisPlacement": "auto", + "axisShow": false, "barAlignment": 0, "drawStyle": "line", "fillOpacity": 0, @@ -4211,7 +4918,7 @@ }, "overrides": [] }, - "gridPos": { "h": 9, "w": 8, "x": 0, "y": 201 }, + "gridPos": { "h": 9, "w": 8, "x": 0, "y": 211 }, "id": 65, "options": { "legend": { "calcs": [], "displayMode": "list", "placement": "bottom", "showLegend": false }, @@ -4242,6 +4949,7 @@ "axisColorMode": "text", "axisLabel": "", "axisPlacement": "auto", + "axisShow": false, "barAlignment": 0, "drawStyle": "line", "fillOpacity": 0, @@ -4269,7 +4977,7 @@ }, "overrides": [] }, - "gridPos": { "h": 9, "w": 11, "x": 8, "y": 201 }, + "gridPos": { "h": 9, "w": 11, "x": 8, "y": 211 }, "id": 104, "options": { "legend": { "calcs": [], "displayMode": "list", "placement": "bottom", "showLegend": false }, @@ -4289,6 +4997,183 @@ "title": "Network -> Consensus Dequeue rate", "type": "timeseries" }, + { + "datasource": { "type": "prometheus", "uid": "${Datasource}" }, + "description": "", + "fieldConfig": { + "defaults": { + "color": { "mode": "palette-classic" }, + "custom": { + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "axisShow": false, + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { "legend": false, "tooltip": false, "viz": false }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { "type": "linear" }, + "showPoints": "never", + "spanNulls": false, + "stacking": { "group": "A", "mode": "none" }, + "thresholdsStyle": { "mode": "off" } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { "color": "green", "value": null }, + { "color": "red", "value": 80 } + ] + }, + "unit": "short" + }, + "overrides": [] + }, + "gridPos": { "h": 9, "w": 8, "x": 0, "y": 220 }, + "id": 194, + "options": { + "legend": { "calcs": [], "displayMode": "list", "placement": "bottom", "showLegend": false }, + "tooltip": { "mode": "multi", "sort": "none" } + }, + "pluginVersion": "9.1.1", + "targets": [ + { + "datasource": { "type": "prometheus", "uid": "${Datasource}" }, + "editorMode": "code", + "expr": "rate(aptos_consensus_rpc_channel_msgs_count{chain_name=~\"$chain_name\", cluster=~\"$cluster\", namespace=\"$namespace\", state=\"enqueued\", kubernetes_pod_name=~\"$kubernetes_pod_name\", role=~\"$role\"}[$interval])", + "legendFormat": "{{kubernetes_pod_name}}-{{role}}", + "range": true, + "refId": "A" + } + ], + "title": "RPC Enqueue rate", + "type": "timeseries" + }, + { + "datasource": { "type": "prometheus", "uid": "${Datasource}" }, + "description": "", + "fieldConfig": { + "defaults": { + "color": { "mode": "palette-classic" }, + "custom": { + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "axisShow": false, + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { "legend": false, "tooltip": false, "viz": false }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { "type": "linear" }, + "showPoints": "never", + "spanNulls": false, + "stacking": { "group": "A", "mode": "none" }, + "thresholdsStyle": { "mode": "off" } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { "color": "green", "value": null }, + { "color": "red", "value": 80 } + ] + }, + "unit": "short" + }, + "overrides": [] + }, + "gridPos": { "h": 9, "w": 8, "x": 8, "y": 220 }, + "id": 195, + "options": { + "legend": { "calcs": [], "displayMode": "list", "placement": "bottom", "showLegend": false }, + "tooltip": { "mode": "multi", "sort": "none" } + }, + "pluginVersion": "9.1.1", + "targets": [ + { + "datasource": { "type": "prometheus", "uid": "${Datasource}" }, + "editorMode": "code", + "expr": "rate(aptos_consensus_rpc_channel_msgs_count{chain_name=~\"$chain_name\", cluster=~\"$cluster\", namespace=\"$namespace\", state=\"dequeued\", kubernetes_pod_name=~\"$kubernetes_pod_name\", role=~\"$role\"}[$interval])", + "legendFormat": "{{kubernetes_pod_name}}-{{role}}", + "range": true, + "refId": "A" + } + ], + "title": "RPC Dequeue rate", + "type": "timeseries" + }, + { + "datasource": { "type": "prometheus", "uid": "${Datasource}" }, + "description": "", + "fieldConfig": { + "defaults": { + "color": { "mode": "palette-classic" }, + "custom": { + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "axisShow": false, + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { "legend": false, "tooltip": false, "viz": false }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { "type": "linear" }, + "showPoints": "never", + "spanNulls": false, + "stacking": { "group": "A", "mode": "none" }, + "thresholdsStyle": { "mode": "off" } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { "color": "green", "value": null }, + { "color": "red", "value": 80 } + ] + }, + "unit": "short" + }, + "overrides": [] + }, + "gridPos": { "h": 9, "w": 8, "x": 16, "y": 220 }, + "id": 196, + "options": { + "legend": { "calcs": [], "displayMode": "list", "placement": "bottom", "showLegend": false }, + "tooltip": { "mode": "multi", "sort": "none" } + }, + "pluginVersion": "9.1.1", + "targets": [ + { + "datasource": { "type": "prometheus", "uid": "${Datasource}" }, + "editorMode": "code", + "expr": "rate(aptos_consensus_rpc_channel_msgs_count{chain_name=~\"$chain_name\", cluster=~\"$cluster\", namespace=\"$namespace\", state=\"dropped\", kubernetes_pod_name=~\"$kubernetes_pod_name\", role=~\"$role\"}[$interval])", + "legendFormat": "{{kubernetes_pod_name}}-{{role}}", + "range": true, + "refId": "A" + } + ], + "title": "RPC Dropped rate", + "type": "timeseries" + }, { "datasource": { "type": "prometheus", "uid": "${Datasource}" }, "description": "The channel metrics related to consensus network -> main loop. (It only keeps one message per peer per type).", @@ -4300,6 +5185,7 @@ "axisColorMode": "text", "axisLabel": "", "axisPlacement": "auto", + "axisShow": false, "barAlignment": 0, "drawStyle": "line", "fillOpacity": 0, @@ -4327,7 +5213,7 @@ }, "overrides": [] }, - "gridPos": { "h": 9, "w": 8, "x": 0, "y": 210 }, + "gridPos": { "h": 9, "w": 8, "x": 0, "y": 229 }, "id": 66, "options": { "legend": { "calcs": [], "displayMode": "list", "placement": "bottom", "showLegend": false }, @@ -4358,6 +5244,7 @@ "axisColorMode": "text", "axisLabel": "", "axisPlacement": "auto", + "axisShow": false, "barAlignment": 0, "drawStyle": "line", "fillOpacity": 0, @@ -4385,7 +5272,7 @@ }, "overrides": [] }, - "gridPos": { "h": 9, "w": 8, "x": 8, "y": 210 }, + "gridPos": { "h": 9, "w": 8, "x": 8, "y": 229 }, "id": 106, "options": { "legend": { "calcs": [], "displayMode": "list", "placement": "bottom", "showLegend": false }, @@ -4416,6 +5303,7 @@ "axisColorMode": "text", "axisLabel": "dropped messages / s", "axisPlacement": "auto", + "axisShow": false, "barAlignment": 0, "drawStyle": "line", "fillOpacity": 0, @@ -4443,7 +5331,7 @@ }, "overrides": [] }, - "gridPos": { "h": 9, "w": 8, "x": 16, "y": 210 }, + "gridPos": { "h": 9, "w": 8, "x": 16, "y": 229 }, "id": 93, "options": { "legend": { "calcs": [], "displayMode": "list", "placement": "bottom", "showLegend": false }, @@ -4472,6 +5360,7 @@ "axisColorMode": "text", "axisLabel": "", "axisPlacement": "auto", + "axisShow": false, "barAlignment": 0, "drawStyle": "line", "fillOpacity": 0, @@ -4499,7 +5388,7 @@ }, "overrides": [] }, - "gridPos": { "h": 9, "w": 8, "x": 0, "y": 219 }, + "gridPos": { "h": 9, "w": 8, "x": 0, "y": 238 }, "id": 67, "options": { "legend": { "calcs": [], "displayMode": "list", "placement": "bottom", "showLegend": false }, @@ -4530,6 +5419,7 @@ "axisColorMode": "text", "axisLabel": "", "axisPlacement": "auto", + "axisShow": false, "barAlignment": 0, "drawStyle": "line", "fillOpacity": 0, @@ -4557,7 +5447,7 @@ }, "overrides": [] }, - "gridPos": { "h": 9, "w": 8, "x": 8, "y": 219 }, + "gridPos": { "h": 9, "w": 8, "x": 8, "y": 238 }, "id": 105, "options": { "legend": { "calcs": [], "displayMode": "list", "placement": "bottom", "showLegend": false }, @@ -4588,6 +5478,7 @@ "axisColorMode": "text", "axisLabel": "messages dropped / s", "axisPlacement": "auto", + "axisShow": false, "barAlignment": 0, "drawStyle": "line", "fillOpacity": 0, @@ -4615,7 +5506,7 @@ }, "overrides": [] }, - "gridPos": { "h": 9, "w": 8, "x": 16, "y": 219 }, + "gridPos": { "h": 9, "w": 8, "x": 16, "y": 238 }, "id": 92, "options": { "legend": { "calcs": [], "displayMode": "list", "placement": "bottom", "showLegend": false }, @@ -4644,6 +5535,7 @@ "axisColorMode": "text", "axisLabel": "", "axisPlacement": "auto", + "axisShow": false, "barAlignment": 0, "drawStyle": "line", "fillOpacity": 0, @@ -4671,7 +5563,7 @@ }, "overrides": [] }, - "gridPos": { "h": 9, "w": 8, "x": 0, "y": 228 }, + "gridPos": { "h": 9, "w": 8, "x": 0, "y": 247 }, "id": 107, "options": { "legend": { "calcs": [], "displayMode": "list", "placement": "bottom", "showLegend": false }, @@ -4702,6 +5594,7 @@ "axisColorMode": "text", "axisLabel": "", "axisPlacement": "auto", + "axisShow": false, "barAlignment": 0, "drawStyle": "line", "fillOpacity": 0, @@ -4729,7 +5622,7 @@ }, "overrides": [] }, - "gridPos": { "h": 9, "w": 8, "x": 8, "y": 228 }, + "gridPos": { "h": 9, "w": 8, "x": 8, "y": 247 }, "id": 108, "options": { "legend": { "calcs": [], "displayMode": "list", "placement": "bottom", "showLegend": false }, @@ -4760,6 +5653,7 @@ "axisColorMode": "text", "axisLabel": "dropped messages / s", "axisPlacement": "auto", + "axisShow": false, "barAlignment": 0, "drawStyle": "line", "fillOpacity": 0, @@ -4787,7 +5681,7 @@ }, "overrides": [] }, - "gridPos": { "h": 9, "w": 8, "x": 16, "y": 228 }, + "gridPos": { "h": 9, "w": 8, "x": 16, "y": 247 }, "id": 109, "options": { "legend": { "calcs": [], "displayMode": "list", "placement": "bottom", "showLegend": false }, @@ -4818,6 +5712,7 @@ "axisColorMode": "text", "axisLabel": "", "axisPlacement": "auto", + "axisShow": false, "barAlignment": 0, "drawStyle": "line", "fillOpacity": 0, @@ -4834,18 +5729,12 @@ "thresholdsStyle": { "mode": "off" } }, "mappings": [], - "thresholds": { - "mode": "absolute", - "steps": [ - { "color": "green", "value": null }, - { "color": "red", "value": 80 } - ] - }, + "thresholds": { "mode": "absolute", "steps": [{ "color": "green" }, { "color": "red", "value": 80 }] }, "unit": "short" }, "overrides": [] }, - "gridPos": { "h": 9, "w": 8, "x": 0, "y": 237 }, + "gridPos": { "h": 9, "w": 8, "x": 0, "y": 256 }, "id": 110, "options": { "legend": { "calcs": [], "displayMode": "list", "placement": "bottom", "showLegend": false }, @@ -4876,6 +5765,7 @@ "axisColorMode": "text", "axisLabel": "", "axisPlacement": "auto", + "axisShow": false, "barAlignment": 0, "drawStyle": "line", "fillOpacity": 0, @@ -4892,18 +5782,12 @@ "thresholdsStyle": { "mode": "off" } }, "mappings": [], - "thresholds": { - "mode": "absolute", - "steps": [ - { "color": "green", "value": null }, - { "color": "red", "value": 80 } - ] - }, + "thresholds": { "mode": "absolute", "steps": [{ "color": "green" }, { "color": "red", "value": 80 }] }, "unit": "short" }, "overrides": [] }, - "gridPos": { "h": 9, "w": 8, "x": 8, "y": 237 }, + "gridPos": { "h": 9, "w": 8, "x": 8, "y": 256 }, "id": 111, "options": { "legend": { "calcs": [], "displayMode": "list", "placement": "bottom", "showLegend": false }, @@ -4934,6 +5818,7 @@ "axisColorMode": "text", "axisLabel": "dropped messages / s", "axisPlacement": "auto", + "axisShow": false, "barAlignment": 0, "drawStyle": "line", "fillOpacity": 0, @@ -4961,7 +5846,7 @@ }, "overrides": [] }, - "gridPos": { "h": 9, "w": 8, "x": 16, "y": 237 }, + "gridPos": { "h": 9, "w": 8, "x": 16, "y": 256 }, "id": 112, "options": { "legend": { "calcs": [], "displayMode": "list", "placement": "bottom", "showLegend": false }, @@ -4983,7 +5868,7 @@ }, { "datasource": { "type": "prometheus", "uid": "${Datasource}" }, - "description": "The pending messages in the self message queue (for proposal and vote) and timeout queue (for local timeout).", + "description": "The channel metrics related to consensus network -> main loop. (It only keeps one message per peer per type).", "fieldConfig": { "defaults": { "color": { "mode": "palette-classic" }, @@ -4992,6 +5877,7 @@ "axisColorMode": "text", "axisLabel": "", "axisPlacement": "auto", + "axisShow": false, "barAlignment": 0, "drawStyle": "line", "fillOpacity": 0, @@ -5008,18 +5894,171 @@ "thresholdsStyle": { "mode": "off" } }, "mappings": [], - "thresholds": { - "mode": "absolute", - "steps": [ - { "color": "green", "value": null }, - { "color": "red", "value": 80 } - ] + "thresholds": { "mode": "absolute", "steps": [{ "color": "green" }, { "color": "red", "value": 80 }] }, + "unit": "short" + }, + "overrides": [] + }, + "gridPos": { "h": 9, "w": 8, "x": 0, "y": 265 }, + "id": 191, + "options": { + "legend": { "calcs": [], "displayMode": "list", "placement": "bottom", "showLegend": false }, + "tooltip": { "mode": "multi", "sort": "none" } + }, + "pluginVersion": "9.1.1", + "targets": [ + { + "datasource": { "type": "prometheus", "uid": "${Datasource}" }, + "editorMode": "code", + "expr": "rate(aptos_consensus_buffer_manager_rand_msgs_count{chain_name=~\"$chain_name\", cluster=~\"$cluster\", namespace=\"$namespace\", state=\"enqueued\", kubernetes_pod_name=~\"$kubernetes_pod_name\"}[$__rate_interval])", + "legendFormat": "{{kubernetes_pod_name}}-{{role}}", + "range": true, + "refId": "A" + } + ], + "title": "Buffer manager rand msg Enqueue rate", + "type": "timeseries" + }, + { + "datasource": { "type": "prometheus", "uid": "${Datasource}" }, + "description": "The channel metrics related to consensus network -> main loop. (It only keeps one message per peer per type).", + "fieldConfig": { + "defaults": { + "color": { "mode": "palette-classic" }, + "custom": { + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "axisShow": false, + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { "legend": false, "tooltip": false, "viz": false }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { "type": "linear" }, + "showPoints": "never", + "spanNulls": false, + "stacking": { "group": "A", "mode": "none" }, + "thresholdsStyle": { "mode": "off" } }, + "mappings": [], + "thresholds": { "mode": "absolute", "steps": [{ "color": "green" }, { "color": "red", "value": 80 }] }, "unit": "short" }, "overrides": [] }, - "gridPos": { "h": 9, "w": 8, "x": 0, "y": 246 }, + "gridPos": { "h": 9, "w": 8, "x": 8, "y": 265 }, + "id": 192, + "options": { + "legend": { "calcs": [], "displayMode": "list", "placement": "bottom", "showLegend": false }, + "tooltip": { "mode": "multi", "sort": "none" } + }, + "pluginVersion": "9.1.1", + "targets": [ + { + "datasource": { "type": "prometheus", "uid": "${Datasource}" }, + "editorMode": "code", + "expr": "rate(aptos_consensus_buffer_manager_rand_msgs_count{chain_name=~\"$chain_name\", cluster=~\"$cluster\",namespace=\"$namespace\", state=\"dequeued\", kubernetes_pod_name=~\"$kubernetes_pod_name\"}[$interval])", + "legendFormat": "{{kubernetes_pod_name}}-{{role}}", + "range": true, + "refId": "A" + } + ], + "title": "Buffer manager rand msg Dequeue rate", + "type": "timeseries" + }, + { + "datasource": { "type": "prometheus", "uid": "${Datasource}" }, + "description": "The channel metrics related to consensus network -> main loop. (It only keeps one message per peer per type).", + "fieldConfig": { + "defaults": { + "color": { "mode": "palette-classic" }, + "custom": { + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "dropped messages / s", + "axisPlacement": "auto", + "axisShow": false, + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { "legend": false, "tooltip": false, "viz": false }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { "type": "linear" }, + "showPoints": "never", + "spanNulls": false, + "stacking": { "group": "A", "mode": "none" }, + "thresholdsStyle": { "mode": "off" } + }, + "mappings": [], + "thresholds": { "mode": "absolute", "steps": [{ "color": "green" }, { "color": "red", "value": 80 }] }, + "unit": "short" + }, + "overrides": [] + }, + "gridPos": { "h": 9, "w": 8, "x": 16, "y": 265 }, + "id": 193, + "options": { + "legend": { "calcs": [], "displayMode": "list", "placement": "bottom", "showLegend": false }, + "tooltip": { "mode": "multi", "sort": "none" } + }, + "pluginVersion": "9.1.1", + "targets": [ + { + "datasource": { "type": "prometheus", "uid": "${Datasource}" }, + "editorMode": "code", + "expr": "rate(aptos_consensus_buffer_manager_rand_msgs_count{chain_name=~\"$chain_name\", cluster=~\"$cluster\", metrics_source=~\"$metrics_source\", namespace=~\"$namespace\", state=\"dropped\", kubernetes_pod_name=~\"$kubernetes_pod_name\", role=~\"$role\"}[$interval])", + "legendFormat": "{{kubernetes_pod_name}}-{{role}}", + "range": true, + "refId": "C" + } + ], + "title": "Buffer manager rand msg Dropped rate", + "type": "timeseries" + }, + { + "datasource": { "type": "prometheus", "uid": "${Datasource}" }, + "description": "The pending messages in the self message queue (for proposal and vote) and timeout queue (for local timeout).", + "fieldConfig": { + "defaults": { + "color": { "mode": "palette-classic" }, + "custom": { + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "axisShow": false, + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { "legend": false, "tooltip": false, "viz": false }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { "type": "linear" }, + "showPoints": "never", + "spanNulls": false, + "stacking": { "group": "A", "mode": "none" }, + "thresholdsStyle": { "mode": "off" } + }, + "mappings": [], + "thresholds": { "mode": "absolute", "steps": [{ "color": "green" }, { "color": "red", "value": 80 }] }, + "unit": "short" + }, + "overrides": [] + }, + "gridPos": { "h": 9, "w": 8, "x": 0, "y": 274 }, "id": 68, "options": { "legend": { "calcs": [], "displayMode": "list", "placement": "bottom", "showLegend": false }, @@ -5048,6 +6087,7 @@ "axisColorMode": "text", "axisLabel": "", "axisPlacement": "auto", + "axisShow": false, "barAlignment": 0, "drawStyle": "line", "fillOpacity": 0, @@ -5064,18 +6104,12 @@ "thresholdsStyle": { "mode": "off" } }, "mappings": [], - "thresholds": { - "mode": "absolute", - "steps": [ - { "color": "green", "value": null }, - { "color": "red", "value": 80 } - ] - }, + "thresholds": { "mode": "absolute", "steps": [{ "color": "green" }, { "color": "red", "value": 80 }] }, "unit": "short" }, "overrides": [] }, - "gridPos": { "h": 9, "w": 8, "x": 8, "y": 246 }, + "gridPos": { "h": 9, "w": 8, "x": 8, "y": 274 }, "id": 94, "options": { "legend": { "calcs": [], "displayMode": "list", "placement": "bottom", "showLegend": false }, @@ -5104,6 +6138,7 @@ "axisColorMode": "text", "axisLabel": "", "axisPlacement": "auto", + "axisShow": false, "barAlignment": 0, "drawStyle": "line", "fillOpacity": 0, @@ -5120,18 +6155,12 @@ "thresholdsStyle": { "mode": "off" } }, "mappings": [], - "thresholds": { - "mode": "absolute", - "steps": [ - { "color": "green", "value": null }, - { "color": "red", "value": 80 } - ] - }, + "thresholds": { "mode": "absolute", "steps": [{ "color": "green" }, { "color": "red", "value": 80 }] }, "unit": "short" }, "overrides": [] }, - "gridPos": { "h": 9, "w": 8, "x": 16, "y": 246 }, + "gridPos": { "h": 9, "w": 8, "x": 16, "y": 274 }, "id": 117, "options": { "legend": { "calcs": [], "displayMode": "list", "placement": "bottom", "showLegend": false }, @@ -5162,6 +6191,7 @@ "axisColorMode": "text", "axisLabel": "", "axisPlacement": "auto", + "axisShow": false, "barAlignment": 0, "drawStyle": "line", "fillOpacity": 0, @@ -5178,18 +6208,12 @@ "thresholdsStyle": { "mode": "off" } }, "mappings": [], - "thresholds": { - "mode": "absolute", - "steps": [ - { "color": "green", "value": null }, - { "color": "red", "value": 80 } - ] - }, + "thresholds": { "mode": "absolute", "steps": [{ "color": "green" }, { "color": "red", "value": 80 }] }, "unit": "short" }, "overrides": [] }, - "gridPos": { "h": 9, "w": 8, "x": 0, "y": 255 }, + "gridPos": { "h": 9, "w": 8, "x": 0, "y": 283 }, "id": 113, "options": { "legend": { "calcs": [], "displayMode": "list", "placement": "bottom", "showLegend": false }, @@ -5220,6 +6244,7 @@ "axisColorMode": "text", "axisLabel": "", "axisPlacement": "auto", + "axisShow": false, "barAlignment": 0, "drawStyle": "line", "fillOpacity": 0, @@ -5236,18 +6261,12 @@ "thresholdsStyle": { "mode": "off" } }, "mappings": [], - "thresholds": { - "mode": "absolute", - "steps": [ - { "color": "green", "value": null }, - { "color": "red", "value": 80 } - ] - }, + "thresholds": { "mode": "absolute", "steps": [{ "color": "green" }, { "color": "red", "value": 80 }] }, "unit": "short" }, "overrides": [] }, - "gridPos": { "h": 9, "w": 8, "x": 8, "y": 255 }, + "gridPos": { "h": 9, "w": 8, "x": 8, "y": 283 }, "id": 114, "options": { "legend": { "calcs": [], "displayMode": "list", "placement": "bottom", "showLegend": false }, @@ -5269,7 +6288,7 @@ }, { "collapsed": false, - "gridPos": { "h": 1, "w": 24, "x": 0, "y": 264 }, + "gridPos": { "h": 1, "w": 24, "x": 0, "y": 292 }, "id": 137, "panels": [], "title": "Collected votes", @@ -5286,6 +6305,7 @@ "axisColorMode": "text", "axisLabel": "", "axisPlacement": "auto", + "axisShow": false, "barAlignment": 0, "drawStyle": "line", "fillOpacity": 0, @@ -5302,17 +6322,11 @@ "thresholdsStyle": { "mode": "off" } }, "mappings": [], - "thresholds": { - "mode": "absolute", - "steps": [ - { "color": "green", "value": null }, - { "color": "red", "value": 80 } - ] - } + "thresholds": { "mode": "absolute", "steps": [{ "color": "green" }, { "color": "red", "value": 80 }] } }, "overrides": [] }, - "gridPos": { "h": 8, "w": 8, "x": 0, "y": 265 }, + "gridPos": { "h": 8, "w": 8, "x": 0, "y": 293 }, "id": 135, "options": { "legend": { "calcs": [], "displayMode": "list", "placement": "bottom", "showLegend": false }, @@ -5342,6 +6356,7 @@ "axisColorMode": "text", "axisLabel": "", "axisPlacement": "auto", + "axisShow": false, "barAlignment": 0, "drawStyle": "line", "fillOpacity": 0, @@ -5358,17 +6373,11 @@ "thresholdsStyle": { "mode": "off" } }, "mappings": [], - "thresholds": { - "mode": "absolute", - "steps": [ - { "color": "green", "value": null }, - { "color": "red", "value": 80 } - ] - } + "thresholds": { "mode": "absolute", "steps": [{ "color": "green" }, { "color": "red", "value": 80 }] } }, "overrides": [] }, - "gridPos": { "h": 8, "w": 8, "x": 8, "y": 265 }, + "gridPos": { "h": 8, "w": 8, "x": 8, "y": 293 }, "id": 138, "options": { "legend": { "calcs": [], "displayMode": "list", "placement": "bottom", "showLegend": false }, @@ -5398,6 +6407,7 @@ "axisColorMode": "text", "axisLabel": "", "axisPlacement": "auto", + "axisShow": false, "barAlignment": 0, "drawStyle": "line", "fillOpacity": 0, @@ -5414,17 +6424,11 @@ "thresholdsStyle": { "mode": "off" } }, "mappings": [], - "thresholds": { - "mode": "absolute", - "steps": [ - { "color": "green", "value": null }, - { "color": "red", "value": 80 } - ] - } + "thresholds": { "mode": "absolute", "steps": [{ "color": "green" }, { "color": "red", "value": 80 }] } }, "overrides": [] }, - "gridPos": { "h": 8, "w": 8, "x": 16, "y": 265 }, + "gridPos": { "h": 8, "w": 8, "x": 16, "y": 293 }, "id": 139, "options": { "legend": { "calcs": [], "displayMode": "list", "placement": "bottom", "showLegend": false }, @@ -5454,6 +6458,7 @@ "axisColorMode": "text", "axisLabel": "", "axisPlacement": "auto", + "axisShow": false, "barAlignment": 0, "drawStyle": "line", "fillOpacity": 0, @@ -5470,17 +6475,11 @@ "thresholdsStyle": { "mode": "off" } }, "mappings": [], - "thresholds": { - "mode": "absolute", - "steps": [ - { "color": "green", "value": null }, - { "color": "red", "value": 80 } - ] - } + "thresholds": { "mode": "absolute", "steps": [{ "color": "green" }, { "color": "red", "value": 80 }] } }, "overrides": [] }, - "gridPos": { "h": 8, "w": 8, "x": 0, "y": 273 }, + "gridPos": { "h": 8, "w": 8, "x": 0, "y": 301 }, "id": 142, "options": { "legend": { "calcs": [], "displayMode": "list", "placement": "bottom", "showLegend": false }, @@ -5510,6 +6509,7 @@ "axisColorMode": "text", "axisLabel": "", "axisPlacement": "auto", + "axisShow": false, "barAlignment": 0, "drawStyle": "line", "fillOpacity": 0, @@ -5526,17 +6526,11 @@ "thresholdsStyle": { "mode": "off" } }, "mappings": [], - "thresholds": { - "mode": "absolute", - "steps": [ - { "color": "green", "value": null }, - { "color": "red", "value": 80 } - ] - } + "thresholds": { "mode": "absolute", "steps": [{ "color": "green" }, { "color": "red", "value": 80 }] } }, "overrides": [] }, - "gridPos": { "h": 8, "w": 8, "x": 8, "y": 273 }, + "gridPos": { "h": 8, "w": 8, "x": 8, "y": 301 }, "id": 143, "options": { "legend": { "calcs": [], "displayMode": "list", "placement": "bottom", "showLegend": false }, @@ -5566,6 +6560,7 @@ "axisColorMode": "text", "axisLabel": "", "axisPlacement": "auto", + "axisShow": false, "barAlignment": 0, "drawStyle": "line", "fillOpacity": 0, @@ -5582,17 +6577,11 @@ "thresholdsStyle": { "mode": "off" } }, "mappings": [], - "thresholds": { - "mode": "absolute", - "steps": [ - { "color": "green", "value": null }, - { "color": "red", "value": 80 } - ] - } + "thresholds": { "mode": "absolute", "steps": [{ "color": "green" }, { "color": "red", "value": 80 }] } }, "overrides": [] }, - "gridPos": { "h": 8, "w": 8, "x": 16, "y": 273 }, + "gridPos": { "h": 8, "w": 8, "x": 16, "y": 301 }, "id": 146, "options": { "legend": { "calcs": [], "displayMode": "list", "placement": "bottom", "showLegend": false }, @@ -5641,7 +6630,7 @@ "thresholds": { "mode": "absolute", "steps": [ - { "color": "transparent", "value": null }, + { "color": "transparent" }, { "color": "red", "value": 60 }, { "color": "orange", "value": 72 }, { "color": "yellow", "value": 80 }, @@ -5651,7 +6640,7 @@ }, "overrides": [] }, - "gridPos": { "h": 8, "w": 8, "x": 0, "y": 281 }, + "gridPos": { "h": 8, "w": 8, "x": 0, "y": 309 }, "id": 144, "options": { "legend": { "calcs": [], "displayMode": "list", "placement": "bottom", "showLegend": false }, @@ -5727,7 +6716,7 @@ "thresholds": { "mode": "absolute", "steps": [ - { "color": "transparent", "value": null }, + { "color": "transparent" }, { "color": "red", "value": 60 }, { "color": "orange", "value": 72 }, { "color": "yellow", "value": 80 }, @@ -5737,7 +6726,7 @@ }, "overrides": [] }, - "gridPos": { "h": 8, "w": 8, "x": 8, "y": 281 }, + "gridPos": { "h": 8, "w": 8, "x": 8, "y": 309 }, "id": 145, "options": { "legend": { "calcs": [], "displayMode": "list", "placement": "bottom", "showLegend": false }, @@ -5785,7 +6774,7 @@ }, { "collapsed": false, - "gridPos": { "h": 1, "w": 24, "x": 0, "y": 289 }, + "gridPos": { "h": 1, "w": 24, "x": 0, "y": 317 }, "id": 152, "panels": [], "title": "Backpressures", @@ -5821,7 +6810,7 @@ "thresholds": { "mode": "absolute", "steps": [ - { "color": "transparent", "value": null }, + { "color": "transparent" }, { "color": "red", "value": 60 }, { "color": "orange", "value": 72 }, { "color": "yellow", "value": 80 }, @@ -5831,7 +6820,7 @@ }, "overrides": [] }, - "gridPos": { "h": 8, "w": 8, "x": 0, "y": 290 }, + "gridPos": { "h": 8, "w": 8, "x": 0, "y": 318 }, "id": 153, "options": { "legend": { "calcs": [], "displayMode": "list", "placement": "bottom", "showLegend": false }, @@ -5880,7 +6869,7 @@ "thresholds": { "mode": "absolute", "steps": [ - { "color": "transparent", "value": null }, + { "color": "transparent" }, { "color": "red", "value": 60 }, { "color": "orange", "value": 72 }, { "color": "yellow", "value": 80 }, @@ -5890,7 +6879,7 @@ }, "overrides": [] }, - "gridPos": { "h": 8, "w": 8, "x": 8, "y": 290 }, + "gridPos": { "h": 8, "w": 8, "x": 8, "y": 318 }, "id": 154, "options": { "legend": { "calcs": [], "displayMode": "list", "placement": "bottom", "showLegend": false }, @@ -5939,7 +6928,7 @@ "thresholds": { "mode": "absolute", "steps": [ - { "color": "transparent", "value": null }, + { "color": "transparent" }, { "color": "red", "value": 60 }, { "color": "orange", "value": 72 }, { "color": "yellow", "value": 80 }, @@ -5949,7 +6938,7 @@ }, "overrides": [] }, - "gridPos": { "h": 8, "w": 8, "x": 16, "y": 290 }, + "gridPos": { "h": 8, "w": 8, "x": 16, "y": 318 }, "id": 155, "options": { "legend": { "calcs": [], "displayMode": "list", "placement": "bottom", "showLegend": false }, @@ -5998,7 +6987,7 @@ "thresholds": { "mode": "absolute", "steps": [ - { "color": "transparent", "value": null }, + { "color": "transparent" }, { "color": "red", "value": 60 }, { "color": "orange", "value": 72 }, { "color": "yellow", "value": 80 }, @@ -6008,7 +6997,7 @@ }, "overrides": [] }, - "gridPos": { "h": 8, "w": 8, "x": 0, "y": 298 }, + "gridPos": { "h": 8, "w": 8, "x": 0, "y": 326 }, "id": 156, "options": { "legend": { "calcs": [], "displayMode": "list", "placement": "bottom", "showLegend": false }, @@ -6264,6 +7253,6 @@ "timezone": "browser", "title": "consensus", "uid": "consensus", - "version": 11, + "version": 21, "weekStart": "" } diff --git a/dashboards/consensus.json.gz b/dashboards/consensus.json.gz index 3764dd610d301..a1537f1787f5c 100644 Binary files a/dashboards/consensus.json.gz and b/dashboards/consensus.json.gz differ diff --git a/dashboards/end-to-end-txn-latency.json b/dashboards/end-to-end-txn-latency.json index dd6c72d99f8e2..908af32a1ef08 100644 --- a/dashboards/end-to-end-txn-latency.json +++ b/dashboards/end-to-end-txn-latency.json @@ -77,6 +77,7 @@ "axisColorMode": "text", "axisLabel": "", "axisPlacement": "auto", + "axisShow": false, "barAlignment": 0, "drawStyle": "line", "fillOpacity": 10, @@ -237,6 +238,7 @@ "axisColorMode": "text", "axisLabel": "", "axisPlacement": "auto", + "axisShow": false, "barAlignment": 0, "drawStyle": "line", "fillOpacity": 10, @@ -348,6 +350,7 @@ "axisColorMode": "text", "axisLabel": "", "axisPlacement": "auto", + "axisShow": false, "barAlignment": 0, "drawStyle": "line", "fillOpacity": 0, @@ -409,6 +412,7 @@ "axisColorMode": "text", "axisLabel": "", "axisPlacement": "auto", + "axisShow": false, "barAlignment": 0, "drawStyle": "line", "fillOpacity": 0, @@ -478,6 +482,7 @@ "axisColorMode": "text", "axisLabel": "", "axisPlacement": "auto", + "axisShow": false, "barAlignment": 0, "drawStyle": "line", "fillOpacity": 0, @@ -537,6 +542,7 @@ "axisColorMode": "text", "axisLabel": "", "axisPlacement": "auto", + "axisShow": false, "barAlignment": 0, "drawStyle": "line", "fillOpacity": 0, @@ -594,6 +600,7 @@ "axisColorMode": "text", "axisLabel": "", "axisPlacement": "auto", + "axisShow": false, "barAlignment": 0, "drawStyle": "line", "fillOpacity": 0, @@ -658,6 +665,7 @@ "axisColorMode": "text", "axisLabel": "", "axisPlacement": "auto", + "axisShow": false, "barAlignment": 0, "drawStyle": "line", "fillOpacity": 0, @@ -719,6 +727,7 @@ "axisColorMode": "text", "axisLabel": "", "axisPlacement": "auto", + "axisShow": false, "barAlignment": 0, "drawStyle": "line", "fillOpacity": 0, @@ -779,6 +788,7 @@ "axisColorMode": "text", "axisLabel": "", "axisPlacement": "auto", + "axisShow": false, "barAlignment": 0, "drawStyle": "line", "fillOpacity": 0, @@ -840,6 +850,7 @@ "axisColorMode": "text", "axisLabel": "", "axisPlacement": "auto", + "axisShow": false, "barAlignment": 0, "drawStyle": "line", "fillOpacity": 0, @@ -901,6 +912,7 @@ "axisColorMode": "text", "axisLabel": "", "axisPlacement": "auto", + "axisShow": false, "barAlignment": 0, "drawStyle": "line", "fillOpacity": 0, @@ -968,6 +980,7 @@ "axisColorMode": "text", "axisLabel": "", "axisPlacement": "auto", + "axisShow": false, "barAlignment": 0, "drawStyle": "line", "fillOpacity": 0, @@ -1027,7 +1040,7 @@ "refId": "C" } ], - "title": "Block timestamp to synced", + "title": "Avg. Block timestamp to synced", "type": "timeseries" }, { @@ -1041,6 +1054,7 @@ "axisColorMode": "text", "axisLabel": "", "axisPlacement": "auto", + "axisShow": false, "barAlignment": 0, "drawStyle": "line", "fillOpacity": 0, @@ -1080,7 +1094,7 @@ { "datasource": { "type": "prometheus", "uid": "${Datasource}" }, "editorMode": "code", - "expr": "(sum(rate(aptos_data_client_sync_latencies_sum{chain_name=~\"$chain_name\", namespace=~\"$namespace\",label=\"propose_to_sync_latency\",role=\"fullnode\",kubernetes_pod_name=~\".*aptos-node-.*-fullnode-.*\"}[$__rate_interval])) / sum(rate(aptos_data_client_sync_latencies_count{chain_name=~\"$chain_name\", namespace=~\"$namespace\",label=\"propose_to_sync_latency\",kubernetes_pod_name=~\".*aptos-node-.*-fullnode-.*\"}[$__rate_interval])))\n-\nquantile(0.67, rate(aptos_consensus_block_tracing_sum{chain_name=~\"$chain_name\", cluster=~\"$cluster\", metrics_source=~\"$metrics_source\", namespace=~\"$namespace\", kubernetes_pod_name=~\"$kubernetes_pod_name\", role=~\"$role\", stage=\"committed\"}[$__rate_interval]) / rate(aptos_consensus_block_tracing_count{chain_name=~\"$chain_name\", cluster=~\"$cluster\", metrics_source=~\"$metrics_source\", namespace=~\"$namespace\", kubernetes_pod_name=~\"$kubernetes_pod_name\", role=~\"$role\", stage=\"committed\"}[$__rate_interval])) < 1000000", + "expr": "(sum(rate(aptos_data_client_sync_latencies_sum{chain_name=~\"$chain_name\", namespace=~\"$namespace\",label=\"propose_to_sync_latency\",role=\"fullnode\",kubernetes_pod_name=~\".*aptos-node-.*-fullnode-.*\"}[$__rate_interval])) / sum(rate(aptos_data_client_sync_latencies_count{chain_name=~\"$chain_name\", namespace=~\"$namespace\",label=\"propose_to_sync_latency\",kubernetes_pod_name=~\".*aptos-node-.*-fullnode-.*\"}[$__rate_interval])))\n-\nquantile(0.67, rate(aptos_consensus_block_tracing_sum{chain_name=~\"$chain_name\", cluster=~\"$cluster\", metrics_source=~\"$metrics_source\", namespace=~\"$namespace\", kubernetes_pod_name=~\"$kubernetes_pod_name\", role=~\"$role\", stage=\"committed\"}[$__rate_interval]) / rate(aptos_consensus_block_tracing_count{chain_name=~\"$chain_name\", cluster=~\"$cluster\", metrics_source=~\"$metrics_source\", namespace=~\"$namespace\", kubernetes_pod_name=~\"$kubernetes_pod_name\", role=~\"$role\", stage=\"committed\"}[$__rate_interval]))", "format": "time_series", "hide": false, "intervalFactor": 1, @@ -1091,7 +1105,7 @@ { "datasource": { "type": "prometheus", "uid": "${Datasource}" }, "editorMode": "code", - "expr": "(sum by (role) (rate(aptos_data_client_sync_latencies_sum{chain_name=~\"$chain_name\", namespace=~\"$namespace\",label=\"propose_to_sync_latency\",role=\"fullnode\",kubernetes_pod_name=~\"^(pfn|public-fullnode-).*\"}[$__rate_interval])) / sum by (role) (rate(aptos_data_client_sync_latencies_count{chain_name=~\"$chain_name\", namespace=~\"$namespace\",label=\"propose_to_sync_latency\",kubernetes_pod_name=~\"^(pfn|public-fullnode-).*\"}[$__rate_interval]))) \n-\nquantile(0.67, rate(aptos_consensus_block_tracing_sum{chain_name=~\"$chain_name\", cluster=~\"$cluster\", metrics_source=~\"$metrics_source\", namespace=~\"$namespace\", kubernetes_pod_name=~\"$kubernetes_pod_name\", role=~\"$role\", stage=\"committed\"}[$__rate_interval]) / rate(aptos_consensus_block_tracing_count{chain_name=~\"$chain_name\", cluster=~\"$cluster\", metrics_source=~\"$metrics_source\", namespace=~\"$namespace\", kubernetes_pod_name=~\"$kubernetes_pod_name\", role=~\"$role\", stage=\"committed\"}[$__rate_interval])) < 1000000", + "expr": "(sum by (role) (rate(aptos_data_client_sync_latencies_sum{chain_name=~\"$chain_name\", namespace=~\"$namespace\",label=\"propose_to_sync_latency\",role=\"fullnode\",kubernetes_pod_name=~\"^(pfn|public-fullnode-).*\"}[$__rate_interval])) / sum by (role) (rate(aptos_data_client_sync_latencies_count{chain_name=~\"$chain_name\", namespace=~\"$namespace\",label=\"propose_to_sync_latency\",kubernetes_pod_name=~\"^(pfn|public-fullnode-).*\"}[$__rate_interval]))) \n-\nquantile(0.67, rate(aptos_consensus_block_tracing_sum{chain_name=~\"$chain_name\", cluster=~\"$cluster\", metrics_source=~\"$metrics_source\", namespace=~\"$namespace\", kubernetes_pod_name=~\"$kubernetes_pod_name\", role=~\"$role\", stage=\"committed\"}[$__rate_interval]) / rate(aptos_consensus_block_tracing_count{chain_name=~\"$chain_name\", cluster=~\"$cluster\", metrics_source=~\"$metrics_source\", namespace=~\"$namespace\", kubernetes_pod_name=~\"$kubernetes_pod_name\", role=~\"$role\", stage=\"committed\"}[$__rate_interval]))", "format": "time_series", "hide": false, "intervalFactor": 1, @@ -1100,7 +1114,7 @@ "refId": "C" } ], - "title": "State sync latencies", + "title": "Avg. State sync latencies", "type": "timeseries" }, { @@ -1114,6 +1128,7 @@ "axisColorMode": "text", "axisLabel": "", "axisPlacement": "auto", + "axisShow": false, "barAlignment": 0, "drawStyle": "line", "fillOpacity": 0, @@ -1153,7 +1168,7 @@ { "datasource": { "type": "prometheus", "uid": "${Datasource}" }, "editorMode": "code", - "expr": "(sum(rate(aptos_data_client_sync_latencies_sum{chain_name=~\"$chain_name\", namespace=~\"$namespace\",label=\"propose_to_sync_latency\",role=\"fullnode\",kubernetes_pod_name=~\".*aptos-node-.*-fullnode-.*\"}[$__rate_interval])) / sum(rate(aptos_data_client_sync_latencies_count{chain_name=~\"$chain_name\", namespace=~\"$namespace\",label=\"propose_to_sync_latency\",kubernetes_pod_name=~\".*aptos-node-.*-fullnode-.*\"}[$__rate_interval])))\n-\nquantile(0.67, rate(aptos_consensus_block_tracing_sum{chain_name=~\"$chain_name\", cluster=~\"$cluster\", metrics_source=~\"$metrics_source\", namespace=~\"$namespace\", kubernetes_pod_name=~\"$kubernetes_pod_name\", role=~\"$role\", stage=\"committed\"}[$__rate_interval]) / rate(aptos_consensus_block_tracing_count{chain_name=~\"$chain_name\", cluster=~\"$cluster\", metrics_source=~\"$metrics_source\", namespace=~\"$namespace\", kubernetes_pod_name=~\"$kubernetes_pod_name\", role=~\"$role\", stage=\"committed\"}[$__rate_interval])) < 1000000", + "expr": "(sum(rate(aptos_data_client_sync_latencies_sum{chain_name=~\"$chain_name\", namespace=~\"$namespace\",label=\"propose_to_sync_latency\",role=\"fullnode\",kubernetes_pod_name=~\".*aptos-node-.*-fullnode-.*\"}[$__rate_interval])) / sum(rate(aptos_data_client_sync_latencies_count{chain_name=~\"$chain_name\", namespace=~\"$namespace\",label=\"propose_to_sync_latency\",kubernetes_pod_name=~\".*aptos-node-.*-fullnode-.*\"}[$__rate_interval])))\n-\nquantile(0.67, rate(aptos_consensus_block_tracing_sum{chain_name=~\"$chain_name\", cluster=~\"$cluster\", metrics_source=~\"$metrics_source\", namespace=~\"$namespace\", kubernetes_pod_name=~\"$kubernetes_pod_name\", role=~\"$role\", stage=\"committed\"}[$__rate_interval]) / rate(aptos_consensus_block_tracing_count{chain_name=~\"$chain_name\", cluster=~\"$cluster\", metrics_source=~\"$metrics_source\", namespace=~\"$namespace\", kubernetes_pod_name=~\"$kubernetes_pod_name\", role=~\"$role\", stage=\"committed\"}[$__rate_interval]))", "format": "time_series", "hide": false, "intervalFactor": 1, @@ -1176,6 +1191,7 @@ "axisColorMode": "text", "axisLabel": "", "axisPlacement": "auto", + "axisShow": false, "barAlignment": 0, "drawStyle": "line", "fillOpacity": 0, @@ -1238,6 +1254,7 @@ "axisColorMode": "text", "axisLabel": "", "axisPlacement": "auto", + "axisShow": false, "barAlignment": 0, "drawStyle": "line", "fillOpacity": 10, @@ -1303,7 +1320,7 @@ "refId": "A" } ], - "title": "State sync latencies stacked (VFN in Forge)", + "title": "(Ignore) State sync latencies stacked (VFN in Forge)", "type": "timeseries" }, { @@ -1317,6 +1334,7 @@ "axisColorMode": "text", "axisLabel": "", "axisPlacement": "auto", + "axisShow": false, "barAlignment": 0, "drawStyle": "line", "fillOpacity": 10, @@ -1382,13 +1400,12 @@ "refId": "A" } ], - "title": "State sync latencies stacked (PFN in Forge)", + "title": "(Ignore) State sync latencies stacked (PFN in Forge)", "type": "timeseries" } ], "refresh": "", "schemaVersion": 38, - "style": "dark", "tags": ["aptos-core"], "templating": { "list": [ @@ -1564,8 +1581,8 @@ "multiFormat": "", "name": "interval", "options": [ - { "selected": false, "text": "auto", "value": "$__auto_interval_interval" }, - { "selected": true, "text": "1m", "value": "1m" }, + { "selected": true, "text": "auto", "value": "$__auto_interval_interval" }, + { "selected": false, "text": "1m", "value": "1m" }, { "selected": false, "text": "5m", "value": "5m" }, { "selected": false, "text": "10m", "value": "10m" }, { "selected": false, "text": "30m", "value": "30m" }, @@ -1595,6 +1612,6 @@ "timezone": "", "title": "end-to-end-txn-latency", "uid": "ae591b2c-8a2f-445d-9122-ee53f99400df", - "version": 42, + "version": 43, "weekStart": "" } diff --git a/dashboards/end-to-end-txn-latency.json.gz b/dashboards/end-to-end-txn-latency.json.gz index d5f75b210941c..d9123480f1de5 100644 Binary files a/dashboards/end-to-end-txn-latency.json.gz and b/dashboards/end-to-end-txn-latency.json.gz differ diff --git a/dashboards/execution.json b/dashboards/execution.json index 01b6b3c32e78f..c737e746c421f 100644 --- a/dashboards/execution.json +++ b/dashboards/execution.json @@ -67,6 +67,7 @@ "axisColorMode": "text", "axisLabel": "", "axisPlacement": "auto", + "axisShow": false, "barAlignment": 0, "drawStyle": "line", "fillOpacity": 0, @@ -187,7 +188,7 @@ "nullPointMode": "null", "options": { "alertThreshold": true }, "percentage": false, - "pluginVersion": "10.2.0-59981", + "pluginVersion": "10.2.0-60139", "pointradius": 2, "points": false, "renderer": "flot", @@ -280,7 +281,7 @@ "nullPointMode": "null", "options": { "alertThreshold": true }, "percentage": false, - "pluginVersion": "10.2.0-59981", + "pluginVersion": "10.2.0-60139", "pointradius": 2, "points": false, "renderer": "flot", @@ -361,6 +362,7 @@ "axisColorMode": "text", "axisLabel": "", "axisPlacement": "auto", + "axisShow": false, "barAlignment": 0, "drawStyle": "line", "fillOpacity": 0, @@ -534,7 +536,7 @@ "nullPointMode": "null", "options": { "alertThreshold": true }, "percentage": false, - "pluginVersion": "10.2.0-59981", + "pluginVersion": "10.2.0-60139", "pointradius": 2, "points": false, "renderer": "flot", @@ -628,7 +630,7 @@ "nullPointMode": "null", "options": { "alertThreshold": true }, "percentage": false, - "pluginVersion": "10.2.0-59981", + "pluginVersion": "10.2.0-60139", "pointradius": 2, "points": false, "renderer": "flot", @@ -749,7 +751,7 @@ "nullPointMode": "null", "options": { "alertThreshold": true }, "percentage": false, - "pluginVersion": "10.2.0-59981", + "pluginVersion": "10.2.0-60139", "pointradius": 2, "points": false, "renderer": "flot", @@ -921,7 +923,7 @@ "nullPointMode": "null", "options": { "alertThreshold": true }, "percentage": false, - "pluginVersion": "10.2.0-59981", + "pluginVersion": "10.2.0-60139", "pointradius": 2, "points": false, "renderer": "flot", @@ -1022,7 +1024,7 @@ "max": false, "min": false, "rightSide": false, - "show": true, + "show": false, "total": false, "values": false }, @@ -1031,7 +1033,7 @@ "nullPointMode": "null", "options": { "alertThreshold": true }, "percentage": false, - "pluginVersion": "10.2.0-59981", + "pluginVersion": "10.2.0-60139", "pointradius": 2, "points": false, "renderer": "flot", @@ -1046,7 +1048,7 @@ "editorMode": "code", "expr": "block_executor_concurrency{chain_name=~\"$chain_name\", cluster=~\"$cluster\", metrics_source=~\"$metrics_source\", namespace=~\"$namespace\", kubernetes_pod_name=~\"$kubernetes_pod_name\", role=~\"$role\"}", "hide": false, - "legendFormat": "block_stm_concurrency", + "legendFormat": "{{kubernetes_pod_name}}", "range": true, "refId": "A" } @@ -1074,6 +1076,7 @@ "axisColorMode": "text", "axisLabel": "", "axisPlacement": "auto", + "axisShow": false, "barAlignment": 0, "drawStyle": "line", "fillOpacity": 0, @@ -1194,7 +1197,7 @@ "nullPointMode": "null", "options": { "alertThreshold": true }, "percentage": false, - "pluginVersion": "10.2.0-59981", + "pluginVersion": "10.2.0-60139", "pointradius": 2, "points": false, "renderer": "flot", @@ -1228,2499 +1231,2503 @@ "yaxis": { "align": false } }, { - "collapsed": false, + "collapsed": true, "datasource": { "type": "prometheus", "uid": "${Datasource}" }, "editable": false, "error": false, "gridPos": { "h": 1, "w": 24, "x": 0, "y": 41 }, "id": 13, - "panels": [], + "panels": [ + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": { "type": "prometheus", "uid": "${Datasource}" }, + "description": "p50, p95 and p99 of the time spent to execute a block", + "editable": false, + "error": false, + "fieldConfig": { "defaults": { "unit": "" }, "overrides": [] }, + "fill": 0, + "fillGradient": 0, + "gridPos": { "h": 8, "w": 8, "x": 0, "y": 2 }, + "hiddenSeries": false, + "id": 9, + "legend": { + "alignAsTable": false, + "avg": false, + "current": false, + "hideEmpty": false, + "hideZero": false, + "max": false, + "min": false, + "rightSide": false, + "show": false, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "nullPointMode": "null", + "options": { "alertThreshold": true }, + "percentage": false, + "pluginVersion": "10.2.0-60139", + "pointradius": 2, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "span": 0, + "stack": false, + "steppedLine": false, + "targets": [ + { + "datasource": { "type": "prometheus", "uid": "${Datasource}" }, + "expr": "histogram_quantile(0.50, sum(rate(aptos_executor_execute_block_seconds_bucket{chain_name=~\"$chain_name\", cluster=~\"$cluster\", metrics_source=~\"$metrics_source\", namespace=~\"$namespace\", kubernetes_pod_name=~\"$kubernetes_pod_name\", role=~\"$role\"}[$interval])) by (kubernetes_pod_name, role, le))", + "format": "time_series", + "intervalFactor": 1, + "legendFormat": "{{kubernetes_pod_name}}-{{role}}", + "refId": "C" + } + ], + "thresholds": [], + "timeRegions": [], + "title": "Block execution time [p50]", + "tooltip": { "shared": true, "sort": 0, "value_type": "individual" }, + "type": "graph", + "xaxis": { "format": "", "logBase": 0, "mode": "time", "show": true, "values": [] }, + "yaxes": [ + { "format": "s", "logBase": 1, "show": true }, + { "format": "short", "logBase": 1, "show": true } + ], + "yaxis": { "align": false } + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": { "type": "prometheus", "uid": "${Datasource}" }, + "description": "p50, p95 and p99 of the time spent to execute a block", + "editable": false, + "error": false, + "fieldConfig": { "defaults": { "unit": "" }, "overrides": [] }, + "fill": 0, + "fillGradient": 0, + "gridPos": { "h": 8, "w": 8, "x": 8, "y": 2 }, + "hiddenSeries": false, + "id": 24, + "legend": { + "alignAsTable": false, + "avg": false, + "current": false, + "hideEmpty": false, + "hideZero": false, + "max": false, + "min": false, + "rightSide": false, + "show": false, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "nullPointMode": "null", + "options": { "alertThreshold": true }, + "percentage": false, + "pluginVersion": "10.2.0-60139", + "pointradius": 2, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "span": 0, + "stack": false, + "steppedLine": false, + "targets": [ + { + "datasource": { "type": "prometheus", "uid": "${Datasource}" }, + "expr": "histogram_quantile(0.90, sum(rate(aptos_executor_execute_block_seconds_bucket{chain_name=~\"$chain_name\", cluster=~\"$cluster\", metrics_source=~\"$metrics_source\", namespace=~\"$namespace\", kubernetes_pod_name=~\"$kubernetes_pod_name\", role=~\"$role\"}[$interval])) by (kubernetes_pod_name, role, le))", + "format": "time_series", + "intervalFactor": 1, + "legendFormat": "{{kubernetes_pod_name}}-{{role}}", + "refId": "C" + } + ], + "thresholds": [], + "timeRegions": [], + "title": "Block execution time [p90]", + "tooltip": { "shared": true, "sort": 0, "value_type": "individual" }, + "type": "graph", + "xaxis": { "format": "", "logBase": 0, "mode": "time", "show": true, "values": [] }, + "yaxes": [ + { "format": "s", "logBase": 1, "show": true }, + { "format": "short", "logBase": 1, "show": true } + ], + "yaxis": { "align": false } + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": { "type": "prometheus", "uid": "${Datasource}" }, + "description": "p50, p95 and p99 of the time spent to execute a block", + "editable": false, + "error": false, + "fieldConfig": { "defaults": { "unit": "" }, "overrides": [] }, + "fill": 0, + "fillGradient": 0, + "gridPos": { "h": 8, "w": 8, "x": 16, "y": 2 }, + "hiddenSeries": false, + "id": 23, + "legend": { + "alignAsTable": false, + "avg": false, + "current": false, + "hideEmpty": false, + "hideZero": false, + "max": false, + "min": false, + "rightSide": false, + "show": false, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "nullPointMode": "null", + "options": { "alertThreshold": true }, + "percentage": false, + "pluginVersion": "10.2.0-60139", + "pointradius": 2, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "span": 0, + "stack": false, + "steppedLine": false, + "targets": [ + { + "datasource": { "type": "prometheus", "uid": "${Datasource}" }, + "expr": "histogram_quantile(0.99, sum(rate(aptos_executor_execute_block_seconds_bucket{chain_name=~\"$chain_name\", cluster=~\"$cluster\", metrics_source=~\"$metrics_source\", namespace=~\"$namespace\", kubernetes_pod_name=~\"$kubernetes_pod_name\", role=~\"$role\"}[$interval])) by (kubernetes_pod_name, role, le))", + "format": "time_series", + "intervalFactor": 1, + "legendFormat": "{{kubernetes_pod_name}}-{{role}}", + "refId": "C" + } + ], + "thresholds": [], + "timeRegions": [], + "title": "Block execution time [p99]", + "tooltip": { "shared": true, "sort": 0, "value_type": "individual" }, + "type": "graph", + "xaxis": { "format": "", "logBase": 0, "mode": "time", "show": true, "values": [] }, + "yaxes": [ + { "format": "s", "logBase": 1, "show": true }, + { "format": "short", "logBase": 1, "show": true } + ], + "yaxis": { "align": false } + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": { "type": "prometheus", "uid": "${Datasource}" }, + "description": "p50, p95 and p99 of the time spent for persisting execution result to storage when committing blocks", + "editable": false, + "error": false, + "fieldConfig": { "defaults": { "unit": "" }, "overrides": [] }, + "fill": 0, + "fillGradient": 0, + "gridPos": { "h": 8, "w": 8, "x": 0, "y": 10 }, + "hiddenSeries": false, + "id": 18, + "legend": { + "alignAsTable": false, + "avg": false, + "current": false, + "hideEmpty": false, + "hideZero": false, + "max": false, + "min": false, + "rightSide": false, + "show": false, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "nullPointMode": "null", + "options": { "alertThreshold": true }, + "percentage": false, + "pluginVersion": "10.2.0-60139", + "pointradius": 2, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "span": 0, + "stack": false, + "steppedLine": false, + "targets": [ + { + "datasource": { "type": "prometheus", "uid": "${Datasource}" }, + "expr": "histogram_quantile(0.50, sum(rate(aptos_executor_save_transactions_seconds_bucket{chain_name=~\"$chain_name\", cluster=~\"$cluster\", metrics_source=~\"$metrics_source\", namespace=~\"$namespace\", kubernetes_pod_name=~\"$kubernetes_pod_name\", role=~\"$role\"}[$interval])) by (kubernetes_pod_name, role,le))", + "format": "time_series", + "intervalFactor": 1, + "legendFormat": "{{kubernetes_pod_name}}-{{role}}", + "refId": "B" + } + ], + "thresholds": [], + "timeRegions": [], + "title": "Time spent in storage to save transactions [p50]", + "tooltip": { "shared": true, "sort": 0, "value_type": "individual" }, + "type": "graph", + "xaxis": { "format": "", "logBase": 0, "mode": "time", "show": true, "values": [] }, + "yaxes": [ + { "format": "s", "logBase": 1, "show": true }, + { "format": "short", "logBase": 1, "show": true } + ], + "yaxis": { "align": false } + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": { "type": "prometheus", "uid": "${Datasource}" }, + "description": "p50, p95 and p99 of the time spent for persisting execution result to storage when committing blocks", + "editable": false, + "error": false, + "fieldConfig": { "defaults": { "unit": "" }, "overrides": [] }, + "fill": 0, + "fillGradient": 0, + "gridPos": { "h": 8, "w": 8, "x": 8, "y": 10 }, + "hiddenSeries": false, + "id": 25, + "legend": { + "alignAsTable": false, + "avg": false, + "current": false, + "hideEmpty": false, + "hideZero": false, + "max": false, + "min": false, + "rightSide": false, + "show": false, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "nullPointMode": "null", + "options": { "alertThreshold": true }, + "percentage": false, + "pluginVersion": "10.2.0-60139", + "pointradius": 2, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "span": 0, + "stack": false, + "steppedLine": false, + "targets": [ + { + "datasource": { "type": "prometheus", "uid": "${Datasource}" }, + "expr": "histogram_quantile(0.90, sum(rate(aptos_executor_save_transactions_seconds_bucket{chain_name=~\"$chain_name\", cluster=~\"$cluster\", metrics_source=~\"$metrics_source\", namespace=~\"$namespace\", kubernetes_pod_name=~\"$kubernetes_pod_name\", role=~\"$role\"}[$interval])) by (kubernetes_pod_name, role,le))", + "format": "time_series", + "intervalFactor": 1, + "legendFormat": "{{kubernetes_pod_name}}-{{role}}", + "refId": "B" + } + ], + "thresholds": [], + "timeRegions": [], + "title": "Time spent in storage to save transactions [p90]", + "tooltip": { "shared": true, "sort": 0, "value_type": "individual" }, + "type": "graph", + "xaxis": { "format": "", "logBase": 0, "mode": "time", "show": true, "values": [] }, + "yaxes": [ + { "format": "s", "logBase": 1, "show": true }, + { "format": "short", "logBase": 1, "show": true } + ], + "yaxis": { "align": false } + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": { "type": "prometheus", "uid": "${Datasource}" }, + "description": "p50, p95 and p99 of the time spent for persisting execution result to storage when committing blocks", + "editable": false, + "error": false, + "fieldConfig": { "defaults": { "unit": "" }, "overrides": [] }, + "fill": 0, + "fillGradient": 0, + "gridPos": { "h": 8, "w": 8, "x": 16, "y": 10 }, + "hiddenSeries": false, + "id": 26, + "legend": { + "alignAsTable": false, + "avg": false, + "current": false, + "hideEmpty": false, + "hideZero": false, + "max": false, + "min": false, + "rightSide": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "nullPointMode": "null", + "options": { "alertThreshold": true }, + "percentage": false, + "pluginVersion": "10.2.0-60139", + "pointradius": 2, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "span": 0, + "stack": false, + "steppedLine": false, + "targets": [ + { + "datasource": { "type": "prometheus", "uid": "${Datasource}" }, + "expr": "histogram_quantile(0.99, sum(rate(aptos_executor_save_transactions_seconds_bucket{chain_name=~\"$chain_name\", cluster=~\"$cluster\", metrics_source=~\"$metrics_source\", namespace=~\"$namespace\", kubernetes_pod_name=~\"$kubernetes_pod_name\", role=~\"$role\"}[$interval])) by (kubernetes_pod_name, role,le))", + "format": "time_series", + "intervalFactor": 1, + "legendFormat": "{{kubernetes_pod_name}}-{{role}}", + "refId": "B" + } + ], + "thresholds": [], + "timeRegions": [], + "title": "Time spent in storage to save transactions [p99]", + "tooltip": { "shared": true, "sort": 0, "value_type": "individual" }, + "type": "graph", + "xaxis": { "format": "", "logBase": 0, "mode": "time", "show": true, "values": [] }, + "yaxes": [ + { "format": "s", "logBase": 1, "show": true }, + { "format": "short", "logBase": 1, "show": true } + ], + "yaxis": { "align": false } + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": { "type": "prometheus", "uid": "${Datasource}" }, + "description": "p50, p95 and p99 of the time spent to execute a block in VM", + "editable": false, + "error": false, + "fieldConfig": { "defaults": { "unit": "" }, "overrides": [] }, + "fill": 0, + "fillGradient": 0, + "gridPos": { "h": 8, "w": 8, "x": 0, "y": 18 }, + "hiddenSeries": false, + "id": 16, + "legend": { + "alignAsTable": false, + "avg": false, + "current": false, + "hideEmpty": false, + "hideZero": false, + "max": false, + "min": false, + "rightSide": false, + "show": false, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "nullPointMode": "null", + "options": { "alertThreshold": true }, + "percentage": false, + "pluginVersion": "10.2.0-60139", + "pointradius": 2, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "span": 0, + "stack": false, + "steppedLine": false, + "targets": [ + { + "datasource": { "type": "prometheus", "uid": "${Datasource}" }, + "expr": "histogram_quantile(0.50, sum(rate(aptos_executor_vm_execute_block_seconds_bucket{chain_name=~\"$chain_name\", cluster=~\"$cluster\", metrics_source=~\"$metrics_source\", namespace=~\"$namespace\", kubernetes_pod_name=~\"$kubernetes_pod_name\", role=~\"$role\"}[$interval])) by (kubernetes_pod_name, role, le))", + "format": "time_series", + "intervalFactor": 1, + "legendFormat": "{{kubernetes_pod_name}}-{{role}}", + "refId": "B" + } + ], + "thresholds": [], + "timeRegions": [], + "title": "VM execute block time [p50]", + "tooltip": { "shared": true, "sort": 0, "value_type": "individual" }, + "type": "graph", + "xaxis": { "format": "", "logBase": 0, "mode": "time", "show": true, "values": [] }, + "yaxes": [ + { "format": "s", "logBase": 1, "show": true }, + { "format": "short", "logBase": 1, "show": true } + ], + "yaxis": { "align": false } + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": { "type": "prometheus", "uid": "${Datasource}" }, + "description": "p50, p95 and p99 of the time spent to execute a block in VM", + "editable": false, + "error": false, + "fieldConfig": { "defaults": { "unit": "" }, "overrides": [] }, + "fill": 0, + "fillGradient": 0, + "gridPos": { "h": 8, "w": 8, "x": 8, "y": 18 }, + "hiddenSeries": false, + "id": 27, + "legend": { + "alignAsTable": false, + "avg": false, + "current": false, + "hideEmpty": false, + "hideZero": false, + "max": false, + "min": false, + "rightSide": false, + "show": false, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "nullPointMode": "null", + "options": { "alertThreshold": true }, + "percentage": false, + "pluginVersion": "10.2.0-60139", + "pointradius": 2, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "span": 0, + "stack": false, + "steppedLine": false, + "targets": [ + { + "datasource": { "type": "prometheus", "uid": "${Datasource}" }, + "expr": "histogram_quantile(0.90, sum(rate(aptos_executor_vm_execute_block_seconds_bucket{chain_name=~\"$chain_name\", cluster=~\"$cluster\", metrics_source=~\"$metrics_source\", namespace=~\"$namespace\", kubernetes_pod_name=~\"$kubernetes_pod_name\", role=~\"$role\"}[$interval])) by (kubernetes_pod_name, role, le))", + "format": "time_series", + "intervalFactor": 1, + "legendFormat": "{{kubernetes_pod_name}}-{{role}}", + "refId": "B" + } + ], + "thresholds": [], + "timeRegions": [], + "title": "VM execute block time [p90]", + "tooltip": { "shared": true, "sort": 0, "value_type": "individual" }, + "type": "graph", + "xaxis": { "format": "", "logBase": 0, "mode": "time", "show": true, "values": [] }, + "yaxes": [ + { "format": "s", "logBase": 1, "show": true }, + { "format": "short", "logBase": 1, "show": true } + ], + "yaxis": { "align": false } + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": { "type": "prometheus", "uid": "${Datasource}" }, + "description": "p50, p95 and p99 of the time spent to execute a block in VM", + "editable": false, + "error": false, + "fieldConfig": { "defaults": { "unit": "" }, "overrides": [] }, + "fill": 0, + "fillGradient": 0, + "gridPos": { "h": 8, "w": 8, "x": 16, "y": 18 }, + "hiddenSeries": false, + "id": 28, + "legend": { + "alignAsTable": false, + "avg": false, + "current": false, + "hideEmpty": false, + "hideZero": false, + "max": false, + "min": false, + "rightSide": false, + "show": false, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "nullPointMode": "null", + "options": { "alertThreshold": true }, + "percentage": false, + "pluginVersion": "10.2.0-60139", + "pointradius": 2, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "span": 0, + "stack": false, + "steppedLine": false, + "targets": [ + { + "datasource": { "type": "prometheus", "uid": "${Datasource}" }, + "expr": "histogram_quantile(0.99, sum(rate(aptos_executor_vm_execute_block_seconds_bucket{chain_name=~\"$chain_name\", cluster=~\"$cluster\", metrics_source=~\"$metrics_source\", namespace=~\"$namespace\", kubernetes_pod_name=~\"$kubernetes_pod_name\", role=~\"$role\"}[$interval])) by (kubernetes_pod_name, role, le))", + "format": "time_series", + "intervalFactor": 1, + "legendFormat": "{{kubernetes_pod_name}}-{{role}}", + "refId": "B" + } + ], + "thresholds": [], + "timeRegions": [], + "title": "VM execute block time [p99]", + "tooltip": { "shared": true, "sort": 0, "value_type": "individual" }, + "type": "graph", + "xaxis": { "format": "", "logBase": 0, "mode": "time", "show": true, "values": [] }, + "yaxes": [ + { "format": "s", "logBase": 1, "show": true }, + { "format": "short", "logBase": 1, "show": true } + ], + "yaxis": { "align": false } + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": { "type": "prometheus", "uid": "${Datasource}" }, + "description": "p50, p95 and p99 of the time spent to commit blocks", + "editable": false, + "error": false, + "fieldConfig": { "defaults": { "unit": "" }, "overrides": [] }, + "fill": 0, + "fillGradient": 0, + "gridPos": { "h": 8, "w": 8, "x": 0, "y": 26 }, + "hiddenSeries": false, + "id": 17, + "legend": { + "alignAsTable": false, + "avg": false, + "current": false, + "hideEmpty": false, + "hideZero": false, + "max": false, + "min": false, + "rightSide": false, + "show": false, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "nullPointMode": "null", + "options": { "alertThreshold": true }, + "percentage": false, + "pluginVersion": "10.2.0-60139", + "pointradius": 2, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "span": 0, + "stack": false, + "steppedLine": false, + "targets": [ + { + "datasource": { "type": "prometheus", "uid": "${Datasource}" }, + "expr": "histogram_quantile(0.50, sum(rate(aptos_executor_commit_blocks_seconds_bucket{chain_name=~\"$chain_name\", cluster=~\"$cluster\", metrics_source=~\"$metrics_source\", namespace=~\"$namespace\", kubernetes_pod_name=~\"$kubernetes_pod_name\", role=~\"$role\"}[$interval])) by (kubernetes_pod_name, role, le))", + "format": "time_series", + "intervalFactor": 1, + "legendFormat": "{{kubernetes_pod_name}}-{{role}}", + "refId": "B" + } + ], + "thresholds": [], + "timeRegions": [], + "title": "Total time to commit blocks [p50]", + "tooltip": { "shared": true, "sort": 0, "value_type": "individual" }, + "type": "graph", + "xaxis": { "format": "", "logBase": 0, "mode": "time", "show": true, "values": [] }, + "yaxes": [ + { "format": "s", "logBase": 1, "show": true }, + { "format": "short", "logBase": 1, "show": true } + ], + "yaxis": { "align": false } + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": { "type": "prometheus", "uid": "${Datasource}" }, + "description": "p50, p95 and p99 of the time spent to commit blocks", + "editable": false, + "error": false, + "fieldConfig": { "defaults": { "unit": "" }, "overrides": [] }, + "fill": 0, + "fillGradient": 0, + "gridPos": { "h": 8, "w": 8, "x": 8, "y": 26 }, + "hiddenSeries": false, + "id": 29, + "legend": { + "alignAsTable": false, + "avg": false, + "current": false, + "hideEmpty": false, + "hideZero": false, + "max": false, + "min": false, + "rightSide": false, + "show": false, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "nullPointMode": "null", + "options": { "alertThreshold": true }, + "percentage": false, + "pluginVersion": "10.2.0-60139", + "pointradius": 2, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "span": 0, + "stack": false, + "steppedLine": false, + "targets": [ + { + "datasource": { "type": "prometheus", "uid": "${Datasource}" }, + "expr": "histogram_quantile(0.90, sum(rate(aptos_executor_commit_blocks_seconds_bucket{chain_name=~\"$chain_name\", cluster=~\"$cluster\", metrics_source=~\"$metrics_source\", namespace=~\"$namespace\", kubernetes_pod_name=~\"$kubernetes_pod_name\", role=~\"$role\"}[$interval])) by (kubernetes_pod_name, role, le))", + "format": "time_series", + "intervalFactor": 1, + "legendFormat": "{{kubernetes_pod_name}}-{{role}}", + "refId": "B" + } + ], + "thresholds": [], + "timeRegions": [], + "title": "Total time to commit blocks [p90]", + "tooltip": { "shared": true, "sort": 0, "value_type": "individual" }, + "type": "graph", + "xaxis": { "format": "", "logBase": 0, "mode": "time", "show": true, "values": [] }, + "yaxes": [ + { "format": "s", "logBase": 1, "show": true }, + { "format": "short", "logBase": 1, "show": true } + ], + "yaxis": { "align": false } + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": { "type": "prometheus", "uid": "${Datasource}" }, + "description": "p50, p95 and p99 of the time spent to commit blocks", + "editable": false, + "error": false, + "fieldConfig": { "defaults": { "unit": "" }, "overrides": [] }, + "fill": 0, + "fillGradient": 0, + "gridPos": { "h": 8, "w": 8, "x": 16, "y": 26 }, + "hiddenSeries": false, + "id": 30, + "legend": { + "alignAsTable": false, + "avg": false, + "current": false, + "hideEmpty": false, + "hideZero": false, + "max": false, + "min": false, + "rightSide": false, + "show": false, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "nullPointMode": "null", + "options": { "alertThreshold": true }, + "percentage": false, + "pluginVersion": "10.2.0-60139", + "pointradius": 2, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "span": 0, + "stack": false, + "steppedLine": false, + "targets": [ + { + "datasource": { "type": "prometheus", "uid": "${Datasource}" }, + "expr": "histogram_quantile(0.99, sum(rate(aptos_executor_commit_blocks_seconds_bucket{chain_name=~\"$chain_name\", cluster=~\"$cluster\", metrics_source=~\"$metrics_source\", namespace=~\"$namespace\", kubernetes_pod_name=~\"$kubernetes_pod_name\", role=~\"$role\"}[$interval])) by (kubernetes_pod_name, role, le))", + "format": "time_series", + "intervalFactor": 1, + "legendFormat": "{{kubernetes_pod_name}}-{{role}}", + "refId": "B" + } + ], + "thresholds": [], + "timeRegions": [], + "title": "Total time to commit blocks [p99]", + "tooltip": { "shared": true, "sort": 0, "value_type": "individual" }, + "type": "graph", + "xaxis": { "format": "", "logBase": 0, "mode": "time", "show": true, "values": [] }, + "yaxes": [ + { "format": "s", "logBase": 1, "show": true }, + { "format": "short", "logBase": 1, "show": true } + ], + "yaxis": { "align": false } + } + ], "span": 0, "targets": [{ "datasource": { "type": "prometheus", "uid": "${Datasource}" }, "refId": "A" }], "title": "Percentiles", "type": "row" }, { - "aliasColors": {}, - "bars": false, - "dashLength": 10, - "dashes": false, + "collapsed": true, "datasource": { "type": "prometheus", "uid": "${Datasource}" }, - "description": "p50, p95 and p99 of the time spent to execute a block", "editable": false, "error": false, - "fieldConfig": { "defaults": { "unit": "" }, "overrides": [] }, - "fill": 0, - "fillGradient": 0, - "gridPos": { "h": 8, "w": 8, "x": 0, "y": 42 }, - "hiddenSeries": false, - "id": 9, - "legend": { - "alignAsTable": false, - "avg": false, - "current": false, - "hideEmpty": false, - "hideZero": false, - "max": false, - "min": false, - "rightSide": false, - "show": false, - "total": false, - "values": false - }, - "lines": true, - "linewidth": 1, - "nullPointMode": "null", - "options": { "alertThreshold": true }, - "percentage": false, - "pluginVersion": "10.2.0-59981", - "pointradius": 2, - "points": false, - "renderer": "flot", - "seriesOverrides": [], - "spaceLength": 10, - "span": 0, - "stack": false, - "steppedLine": false, - "targets": [ - { - "datasource": { "type": "prometheus", "uid": "${Datasource}" }, - "expr": "histogram_quantile(0.50, sum(rate(aptos_executor_execute_block_seconds_bucket{chain_name=~\"$chain_name\", cluster=~\"$cluster\", metrics_source=~\"$metrics_source\", namespace=~\"$namespace\", kubernetes_pod_name=~\"$kubernetes_pod_name\", role=~\"$role\"}[$interval])) by (kubernetes_pod_name, role, le))", - "format": "time_series", - "intervalFactor": 1, - "legendFormat": "{{kubernetes_pod_name}}-{{role}}", - "refId": "C" + "gridPos": { "h": 1, "w": 24, "x": 0, "y": 42 }, + "id": 11, + "panels": [ + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": { "type": "prometheus", "uid": "${Datasource}" }, + "description": "The average time spent to execute a block", + "editable": false, + "error": false, + "fieldConfig": { "defaults": { "unit": "" }, "overrides": [] }, + "fill": 0, + "fillGradient": 0, + "gridPos": { "h": 8, "w": 8, "x": 0, "y": 3 }, + "hiddenSeries": false, + "id": 6, + "legend": { + "alignAsTable": false, + "avg": false, + "current": false, + "hideEmpty": false, + "hideZero": false, + "max": false, + "min": false, + "rightSide": false, + "show": false, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "nullPointMode": "null", + "options": { "alertThreshold": true }, + "percentage": false, + "pluginVersion": "10.2.0-60139", + "pointradius": 2, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "span": 0, + "stack": false, + "steppedLine": false, + "targets": [ + { + "datasource": { "type": "prometheus", "uid": "${Datasource}" }, + "expr": "rate(aptos_executor_execute_block_seconds_sum{chain_name=~\"$chain_name\", cluster=~\"$cluster\", metrics_source=~\"$metrics_source\", namespace=~\"$namespace\", kubernetes_pod_name=~\"$kubernetes_pod_name\", role=~\"$role\"}[$interval]) / rate(aptos_executor_execute_block_seconds_count{chain_name=~\"$chain_name\", cluster=~\"$cluster\", metrics_source=~\"$metrics_source\", namespace=~\"$namespace\", kubernetes_pod_name=~\"$kubernetes_pod_name\", role=~\"$role\"}[$interval])", + "format": "time_series", + "intervalFactor": 1, + "legendFormat": "{{kubernetes_pod_name}}-{{role}}", + "refId": "A" + } + ], + "thresholds": [], + "timeRegions": [], + "title": "Avg Block execution time", + "tooltip": { "shared": true, "sort": 0, "value_type": "individual" }, + "type": "graph", + "xaxis": { "format": "", "logBase": 0, "mode": "time", "show": true, "values": [] }, + "yaxes": [ + { "format": "µs", "logBase": 1, "show": true }, + { "format": "short", "logBase": 1, "show": true } + ], + "yaxis": { "align": false } + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": { "type": "prometheus", "uid": "${Datasource}" }, + "description": "The average time spent to execute a block in VM", + "editable": false, + "error": false, + "fieldConfig": { "defaults": { "unit": "" }, "overrides": [] }, + "fill": 0, + "fillGradient": 0, + "gridPos": { "h": 8, "w": 8, "x": 8, "y": 3 }, + "hiddenSeries": false, + "id": 15, + "legend": { + "alignAsTable": false, + "avg": false, + "current": false, + "hideEmpty": false, + "hideZero": false, + "max": false, + "min": false, + "rightSide": false, + "show": false, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "nullPointMode": "null", + "options": { "alertThreshold": true }, + "percentage": false, + "pluginVersion": "10.2.0-60139", + "pointradius": 2, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "span": 0, + "stack": false, + "steppedLine": false, + "targets": [ + { + "datasource": { "type": "prometheus", "uid": "${Datasource}" }, + "expr": "rate(aptos_executor_vm_execute_block_seconds_sum{chain_name=~\"$chain_name\", cluster=~\"$cluster\", metrics_source=~\"$metrics_source\", namespace=~\"$namespace\", kubernetes_pod_name=~\"$kubernetes_pod_name\", role=~\"$role\"}[$interval]) / rate(aptos_executor_vm_execute_block_seconds_count{chain_name=~\"$chain_name\", cluster=~\"$cluster\", metrics_source=~\"$metrics_source\", namespace=~\"$namespace\", kubernetes_pod_name=~\"$kubernetes_pod_name\", role=~\"$role\"}[$interval])", + "format": "time_series", + "intervalFactor": 1, + "legendFormat": "{{kubernetes_pod_name}}-{{role}}", + "refId": "A" + } + ], + "thresholds": [], + "timeRegions": [], + "title": "Avg VM execute block time", + "tooltip": { "shared": true, "sort": 0, "value_type": "individual" }, + "type": "graph", + "xaxis": { "format": "", "logBase": 0, "mode": "time", "show": true, "values": [] }, + "yaxes": [ + { "format": "µs", "logBase": 1, "show": true }, + { "format": "short", "logBase": 1, "show": true } + ], + "yaxis": { "align": false } + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": { "type": "prometheus", "uid": "${Datasource}" }, + "description": "The average time spent to commit blocks", + "editable": false, + "error": false, + "fieldConfig": { "defaults": { "unit": "" }, "overrides": [] }, + "fill": 0, + "fillGradient": 0, + "gridPos": { "h": 8, "w": 8, "x": 16, "y": 3 }, + "hiddenSeries": false, + "id": 4, + "legend": { + "alignAsTable": false, + "avg": false, + "current": false, + "hideEmpty": false, + "hideZero": false, + "max": false, + "min": false, + "rightSide": false, + "show": false, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "nullPointMode": "null", + "options": { "alertThreshold": true }, + "percentage": false, + "pluginVersion": "10.2.0-60139", + "pointradius": 2, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "span": 0, + "stack": false, + "steppedLine": false, + "targets": [ + { + "datasource": { "type": "prometheus", "uid": "${Datasource}" }, + "expr": "rate(aptos_executor_commit_blocks_seconds_sum{chain_name=~\"$chain_name\", cluster=~\"$cluster\", metrics_source=~\"$metrics_source\", namespace=~\"$namespace\", kubernetes_pod_name=~\"$kubernetes_pod_name\", role=~\"$role\"}[$interval]) / rate(aptos_executor_commit_blocks_seconds_count{chain_name=~\"$chain_name\", cluster=~\"$cluster\", metrics_source=~\"$metrics_source\", namespace=~\"$namespace\", kubernetes_pod_name=~\"$kubernetes_pod_name\", role=~\"$role\"}[$interval])", + "format": "time_series", + "intervalFactor": 1, + "legendFormat": "{{kubernetes_pod_name}}-{{role}}", + "refId": "A" + } + ], + "thresholds": [], + "timeRegions": [], + "title": "Avg Total time to commit blocks", + "tooltip": { "shared": true, "sort": 0, "value_type": "individual" }, + "type": "graph", + "xaxis": { "format": "", "logBase": 0, "mode": "time", "show": true, "values": [] }, + "yaxes": [ + { "format": "s", "logBase": 1, "show": true }, + { "format": "short", "logBase": 1, "show": true } + ], + "yaxis": { "align": false } + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": { "type": "prometheus", "uid": "${Datasource}" }, + "description": "The average time spent for persisting execution result to storage when committing blocks", + "editable": false, + "error": false, + "fieldConfig": { "defaults": { "unit": "" }, "overrides": [] }, + "fill": 0, + "fillGradient": 0, + "gridPos": { "h": 8, "w": 8, "x": 0, "y": 11 }, + "hiddenSeries": false, + "id": 2, + "legend": { + "alignAsTable": false, + "avg": false, + "current": false, + "hideEmpty": false, + "hideZero": false, + "max": false, + "min": false, + "rightSide": false, + "show": false, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "nullPointMode": "null", + "options": { "alertThreshold": true }, + "percentage": false, + "pluginVersion": "10.2.0-60139", + "pointradius": 2, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "span": 0, + "stack": false, + "steppedLine": false, + "targets": [ + { + "datasource": { "type": "prometheus", "uid": "${Datasource}" }, + "expr": "rate(aptos_executor_save_transactions_seconds_sum{chain_name=~\"$chain_name\", cluster=~\"$cluster\", metrics_source=~\"$metrics_source\", namespace=~\"$namespace\", kubernetes_pod_name=~\"$kubernetes_pod_name\", role=~\"$role\"}[$interval]) / rate(aptos_executor_save_transactions_seconds_count{chain_name=~\"$chain_name\", cluster=~\"$cluster\", metrics_source=~\"$metrics_source\", namespace=~\"$namespace\", kubernetes_pod_name=~\"$kubernetes_pod_name\", role=~\"$role\"}[$interval])", + "format": "time_series", + "intervalFactor": 1, + "legendFormat": "{{kubernetes_pod_name}}-{{role}}", + "refId": "A" + } + ], + "thresholds": [], + "timeRegions": [], + "title": "Avg Time spent in storage to save transactions", + "tooltip": { "shared": true, "sort": 0, "value_type": "individual" }, + "type": "graph", + "xaxis": { "format": "", "logBase": 0, "mode": "time", "show": true, "values": [] }, + "yaxes": [ + { "format": "s", "logBase": 1, "show": true }, + { "format": "short", "logBase": 1, "show": true } + ], + "yaxis": { "align": false } + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": { "type": "prometheus", "uid": "${Datasource}" }, + "description": "The average time spent for executing and committing each chunk in state-syncing", + "editable": false, + "error": false, + "fieldConfig": { "defaults": { "unit": "" }, "overrides": [] }, + "fill": 0, + "fillGradient": 0, + "gridPos": { "h": 8, "w": 8, "x": 8, "y": 11 }, + "hiddenSeries": false, + "id": 20, + "legend": { + "alignAsTable": false, + "avg": false, + "current": false, + "hideEmpty": false, + "hideZero": false, + "max": false, + "min": false, + "rightSide": false, + "show": false, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "nullPointMode": "null", + "options": { "alertThreshold": true }, + "percentage": false, + "pluginVersion": "10.2.0-60139", + "pointradius": 2, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "span": 0, + "stack": false, + "steppedLine": false, + "targets": [ + { + "datasource": { "type": "prometheus", "uid": "${Datasource}" }, + "expr": "rate(aptos_executor_execute_and_commit_chunk_seconds_sum{chain_name=~\"$chain_name\", cluster=~\"$cluster\", metrics_source=~\"$metrics_source\", namespace=~\"$namespace\", kubernetes_pod_name=~\"$kubernetes_pod_name\", role=~\"$role\"}[$interval]) / rate(aptos_executor_execute_and_commit_chunk_seconds_count{chain_name=~\"$chain_name\", cluster=~\"$cluster\", metrics_source=~\"$metrics_source\", namespace=~\"$namespace\", kubernetes_pod_name=~\"$kubernetes_pod_name\", role=~\"$role\"}[$interval])", + "legendFormat": "{{kubernetes_pod_name}}-{{role}}", + "refId": "A" + } + ], + "thresholds": [], + "timeRegions": [], + "title": "Avg Execute and commit chunk time", + "tooltip": { "shared": true, "sort": 0, "value_type": "individual" }, + "type": "graph", + "xaxis": { "format": "", "logBase": 0, "mode": "time", "show": true, "values": [] }, + "yaxes": [ + { "format": "short", "logBase": 1, "show": true }, + { "format": "short", "logBase": 1, "show": true } + ], + "yaxis": { "align": false } + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": { "type": "prometheus", "uid": "${Datasource}" }, + "description": "The average time spent to commit blocks", + "editable": false, + "error": false, + "fieldConfig": { "defaults": { "unit": "" }, "overrides": [] }, + "fill": 0, + "fillGradient": 0, + "gridPos": { "h": 8, "w": 8, "x": 16, "y": 11 }, + "hiddenSeries": false, + "id": 50, + "legend": { + "alignAsTable": false, + "avg": false, + "current": false, + "hideEmpty": false, + "hideZero": false, + "max": false, + "min": false, + "rightSide": false, + "show": false, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "nullPointMode": "null", + "options": { "alertThreshold": true }, + "percentage": false, + "pluginVersion": "10.2.0-60139", + "pointradius": 2, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "span": 0, + "stack": false, + "steppedLine": false, + "targets": [ + { + "datasource": { "type": "prometheus", "uid": "${Datasource}" }, + "editorMode": "code", + "expr": "rate(aptos_executor_execute_block_seconds_sum{chain_name=~\"$chain_name\", cluster=~\"$cluster\", metrics_source=~\"$metrics_source\", namespace=~\"$namespace\", kubernetes_pod_name=~\"$kubernetes_pod_name\", role=~\"$role\"}[$interval]) / rate(aptos_executor_execute_block_seconds_count{chain_name=~\"$chain_name\", cluster=~\"$cluster\", metrics_source=~\"$metrics_source\", namespace=~\"$namespace\", kubernetes_pod_name=~\"$kubernetes_pod_name\", role=~\"$role\"}[$interval])", + "format": "time_series", + "intervalFactor": 1, + "legendFormat": "{{kubernetes_pod_name}}-{{role}}", + "range": true, + "refId": "A" + } + ], + "thresholds": [], + "timeRegions": [], + "title": "Avg Total time to execute blocks", + "tooltip": { "shared": true, "sort": 0, "value_type": "individual" }, + "type": "graph", + "xaxis": { "format": "", "logBase": 0, "mode": "time", "show": true, "values": [] }, + "yaxes": [ + { "format": "s", "logBase": 1, "show": true }, + { "format": "short", "logBase": 1, "show": true } + ], + "yaxis": { "align": false } + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": { "type": "prometheus", "uid": "${Datasource}" }, + "description": "The rate of speculative ", + "editable": false, + "error": false, + "fieldConfig": { "defaults": { "unit": "" }, "overrides": [] }, + "fill": 0, + "fillGradient": 0, + "gridPos": { "h": 8, "w": 8, "x": 0, "y": 19 }, + "hiddenSeries": false, + "id": 32, + "legend": { + "alignAsTable": false, + "avg": false, + "current": false, + "hideEmpty": false, + "hideZero": false, + "max": false, + "min": false, + "rightSide": false, + "show": false, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "nullPointMode": "null", + "options": { "alertThreshold": true }, + "percentage": false, + "pluginVersion": "10.2.0-60139", + "pointradius": 2, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "span": 0, + "stack": false, + "steppedLine": false, + "targets": [ + { + "datasource": { "type": "prometheus", "uid": "${Datasource}" }, + "editorMode": "code", + "expr": "rate(aptos_execution_speculative_abort_count{chain_name=~\"$chain_name\", cluster=~\"$cluster\", metrics_source=~\"$metrics_source\", namespace=~\"$namespace\", kubernetes_pod_name=~\"$kubernetes_pod_name\", role=~\"$role\"}[$interval])", + "format": "time_series", + "intervalFactor": 1, + "legendFormat": "{{kubernetes_pod_name}}-{{role}}", + "range": true, + "refId": "A" + } + ], + "thresholds": [], + "timeRegions": [], + "title": "Execution speculative abort rate", + "tooltip": { "shared": true, "sort": 0, "value_type": "individual" }, + "type": "graph", + "xaxis": { "format": "", "logBase": 0, "mode": "time", "show": true, "values": [] }, + "yaxes": [ + { "$$hashKey": "object:125", "format": "none", "label": "aborts / s", "logBase": 1, "show": true }, + { "$$hashKey": "object:126", "format": "short", "logBase": 1, "show": true } + ], + "yaxis": { "align": false } + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": { "type": "prometheus", "uid": "${Datasource}" }, + "description": "", + "editable": false, + "error": false, + "fieldConfig": { "defaults": { "unit": "" }, "overrides": [] }, + "fill": 0, + "fillGradient": 0, + "gridPos": { "h": 8, "w": 8, "x": 8, "y": 19 }, + "hiddenSeries": false, + "id": 49, + "legend": { + "alignAsTable": false, + "avg": false, + "current": false, + "hideEmpty": false, + "hideZero": false, + "max": false, + "min": false, + "rightSide": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "nullPointMode": "null", + "options": { "alertThreshold": true }, + "percentage": false, + "pluginVersion": "10.2.0-60139", + "pointradius": 2, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "span": 0, + "stack": false, + "steppedLine": false, + "targets": [ + { + "datasource": { "type": "prometheus", "uid": "${Datasource}" }, + "editorMode": "code", + "expr": "quantile(0.67, rate(aptos_executor_vm_execute_block_seconds_sum{ chain_name=~\"$chain_name\", cluster=~\"$cluster\", metrics_source=~\"$metrics_source\", namespace=~\"$namespace\", kubernetes_pod_name=~\"$kubernetes_pod_name\", role=~\"$role\"}[$interval]) ) / quantile(0.67, rate(aptos_executor_execute_block_seconds_count{ chain_name=~\"$chain_name\", cluster=~\"$cluster\", metrics_source=~\"$metrics_source\", namespace=~\"$namespace\", kubernetes_pod_name=~\"$kubernetes_pod_name\", role=~\"$role\"}[$interval]) )", + "format": "time_series", + "hide": false, + "intervalFactor": 1, + "legendFormat": "vm_execution_time", + "range": true, + "refId": "C" + }, + { + "datasource": { "type": "prometheus", "uid": "${Datasource}" }, + "editorMode": "code", + "expr": "quantile(0.67, rate(aptos_execution_dependency_wait_sum{ chain_name=~\"$chain_name\", cluster=~\"$cluster\", metrics_source=~\"$metrics_source\", namespace=~\"$namespace\", kubernetes_pod_name=~\"$kubernetes_pod_name\", role=~\"$role\"}[$interval]) / avg(block_executor_concurrency{chain_name=~\"$chain_name\", cluster=~\"$cluster\", metrics_source=~\"$metrics_source\", namespace=~\"$namespace\", kubernetes_pod_name=~\"$kubernetes_pod_name\", role=~\"$role\"}) ) / quantile(0.67, rate(aptos_executor_execute_block_seconds_count{ chain_name=~\"$chain_name\", cluster=~\"$cluster\", metrics_source=~\"$metrics_source\", namespace=~\"$namespace\", kubernetes_pod_name=~\"$kubernetes_pod_name\", role=~\"$role\"}[$interval]) )", + "hide": false, + "legendFormat": "executor_dependency_wait", + "range": true, + "refId": "A" + }, + { + "datasource": { "type": "prometheus", "uid": "${Datasource}" }, + "editorMode": "code", + "expr": "quantile(0.67, rate(aptos_execution_vm_init_seconds_sum{ chain_name=~\"$chain_name\", cluster=~\"$cluster\", metrics_source=~\"$metrics_source\", namespace=~\"$namespace\", kubernetes_pod_name=~\"$kubernetes_pod_name\", role=~\"$role\"}[$interval]) / avg(block_executor_concurrency{chain_name=~\"$chain_name\", cluster=~\"$cluster\", metrics_source=~\"$metrics_source\", namespace=~\"$namespace\", kubernetes_pod_name=~\"$kubernetes_pod_name\", role=~\"$role\"}) ) / quantile(0.67, rate(aptos_executor_execute_block_seconds_count{ chain_name=~\"$chain_name\", cluster=~\"$cluster\", metrics_source=~\"$metrics_source\", namespace=~\"$namespace\", kubernetes_pod_name=~\"$kubernetes_pod_name\", role=~\"$role\"}[$interval]) )", + "hide": false, + "legendFormat": "vm_init", + "range": true, + "refId": "B" + }, + { + "datasource": { "type": "prometheus", "uid": "${Datasource}" }, + "editorMode": "code", + "expr": "quantile(0.67, rate(aptos_execution_task_validate_seconds_sum{ chain_name=~\"$chain_name\", cluster=~\"$cluster\", metrics_source=~\"$metrics_source\", namespace=~\"$namespace\", kubernetes_pod_name=~\"$kubernetes_pod_name\", role=~\"$role\"}[$interval]) / avg(block_executor_concurrency{chain_name=~\"$chain_name\", cluster=~\"$cluster\", metrics_source=~\"$metrics_source\", namespace=~\"$namespace\", kubernetes_pod_name=~\"$kubernetes_pod_name\", role=~\"$role\"}) ) / quantile(0.67, rate(aptos_executor_execute_block_seconds_count{ chain_name=~\"$chain_name\", cluster=~\"$cluster\", metrics_source=~\"$metrics_source\", namespace=~\"$namespace\", kubernetes_pod_name=~\"$kubernetes_pod_name\", role=~\"$role\"}[$interval]) )", + "hide": false, + "legendFormat": "task_validate", + "range": true, + "refId": "D" + }, + { + "datasource": { "type": "prometheus", "uid": "${Datasource}" }, + "editorMode": "code", + "expr": "quantile(0.67, rate(aptos_execution_task_execute_seconds_sum{ chain_name=~\"$chain_name\", cluster=~\"$cluster\", metrics_source=~\"$metrics_source\", namespace=~\"$namespace\", kubernetes_pod_name=~\"$kubernetes_pod_name\", role=~\"$role\"}[$interval]) / avg(block_executor_concurrency{chain_name=~\"$chain_name\", cluster=~\"$cluster\", metrics_source=~\"$metrics_source\", namespace=~\"$namespace\", kubernetes_pod_name=~\"$kubernetes_pod_name\", role=~\"$role\"}) ) / quantile(0.67, rate(aptos_executor_execute_block_seconds_count{ chain_name=~\"$chain_name\", cluster=~\"$cluster\", metrics_source=~\"$metrics_source\", namespace=~\"$namespace\", kubernetes_pod_name=~\"$kubernetes_pod_name\", role=~\"$role\"}[$interval]) )", + "hide": false, + "legendFormat": "task_execute", + "range": true, + "refId": "E" + }, + { + "datasource": { "type": "prometheus", "uid": "${Datasource}" }, + "editorMode": "code", + "expr": "quantile(0.67, rate(block_executor_signature_verification_seconds_sum{ chain_name=~\"$chain_name\", cluster=~\"$cluster\", metrics_source=~\"$metrics_source\", namespace=~\"$namespace\", kubernetes_pod_name=~\"$kubernetes_pod_name\", role=~\"$role\"}[$interval])) / quantile(0.67, rate(aptos_executor_execute_block_seconds_count{ chain_name=~\"$chain_name\", cluster=~\"$cluster\", metrics_source=~\"$metrics_source\", namespace=~\"$namespace\", kubernetes_pod_name=~\"$kubernetes_pod_name\", role=~\"$role\"}[$interval]) )", + "hide": false, + "legendFormat": "signature_verification", + "range": true, + "refId": "F" + }, + { + "datasource": { "type": "prometheus", "uid": "${Datasource}" }, + "editorMode": "code", + "expr": "quantile(0.67, rate(aptos_vm_txn_total_seconds_sum{ chain_name=~\"$chain_name\", cluster=~\"$cluster\", metrics_source=~\"$metrics_source\", namespace=~\"$namespace\", kubernetes_pod_name=~\"$kubernetes_pod_name\", role=~\"$role\"}[$interval]) / avg(block_executor_concurrency{chain_name=~\"$chain_name\", cluster=~\"$cluster\", metrics_source=~\"$metrics_source\", namespace=~\"$namespace\", kubernetes_pod_name=~\"$kubernetes_pod_name\", role=~\"$role\"}) ) / quantile(0.67, rate(aptos_executor_execute_block_seconds_count{ chain_name=~\"$chain_name\", cluster=~\"$cluster\", metrics_source=~\"$metrics_source\", namespace=~\"$namespace\", kubernetes_pod_name=~\"$kubernetes_pod_name\", role=~\"$role\"}[$interval]) )", + "hide": true, + "legendFormat": "vm_execute_user_txn", + "range": true, + "refId": "G" + }, + { + "datasource": { "type": "prometheus", "uid": "${Datasource}" }, + "editorMode": "code", + "expr": "quantile(0.67, rate(block_executor_execute_block_seconds_sum{ chain_name=~\"$chain_name\", cluster=~\"$cluster\", metrics_source=~\"$metrics_source\", namespace=~\"$namespace\", kubernetes_pod_name=~\"$kubernetes_pod_name\", role=~\"$role\"}[$interval]) ) / quantile(0.67, rate(aptos_executor_execute_block_seconds_count{ chain_name=~\"$chain_name\", cluster=~\"$cluster\", metrics_source=~\"$metrics_source\", namespace=~\"$namespace\", kubernetes_pod_name=~\"$kubernetes_pod_name\", role=~\"$role\"}[$interval]) )", + "format": "time_series", + "hide": false, + "intervalFactor": 1, + "legendFormat": "block_executor_execute_block", + "range": true, + "refId": "H" + }, + { + "datasource": { "type": "prometheus", "uid": "${Datasource}" }, + "editorMode": "code", + "expr": "quantile(0.67, rate(aptos_parallel_execution_seconds_sum{ chain_name=~\"$chain_name\", cluster=~\"$cluster\", metrics_source=~\"$metrics_source\", namespace=~\"$namespace\", kubernetes_pod_name=~\"$kubernetes_pod_name\", role=~\"$role\"}[$interval]) ) / quantile(0.67, rate(aptos_executor_execute_block_seconds_count{ chain_name=~\"$chain_name\", cluster=~\"$cluster\", metrics_source=~\"$metrics_source\", namespace=~\"$namespace\", kubernetes_pod_name=~\"$kubernetes_pod_name\", role=~\"$role\"}[$interval]) )", + "format": "time_series", + "hide": false, + "intervalFactor": 1, + "legendFormat": "parallel_execution_seconds", + "range": true, + "refId": "I" + }, + { + "datasource": { "type": "prometheus", "uid": "${Datasource}" }, + "editorMode": "code", + "expr": "quantile(0.67, rate(aptos_rayon_execution_seconds_sum{ chain_name=~\"$chain_name\", cluster=~\"$cluster\", metrics_source=~\"$metrics_source\", namespace=~\"$namespace\", kubernetes_pod_name=~\"$kubernetes_pod_name\", role=~\"$role\"}[$interval]) ) / quantile(0.67, rate(aptos_executor_execute_block_seconds_count{ chain_name=~\"$chain_name\", cluster=~\"$cluster\", metrics_source=~\"$metrics_source\", namespace=~\"$namespace\", kubernetes_pod_name=~\"$kubernetes_pod_name\", role=~\"$role\"}[$interval]) )", + "format": "time_series", + "hide": false, + "intervalFactor": 1, + "legendFormat": "rayon_execution_seconds", + "range": true, + "refId": "J" + }, + { + "datasource": { "type": "prometheus", "uid": "${Datasource}" }, + "editorMode": "code", + "expr": "quantile(0.67, rate(aptos_execution_work_with_task_seconds_sum{ chain_name=~\"$chain_name\", cluster=~\"$cluster\", metrics_source=~\"$metrics_source\", namespace=~\"$namespace\", kubernetes_pod_name=~\"$kubernetes_pod_name\", role=~\"$role\"}[$interval]) / avg(block_executor_concurrency{chain_name=~\"$chain_name\", cluster=~\"$cluster\", metrics_source=~\"$metrics_source\", namespace=~\"$namespace\", kubernetes_pod_name=~\"$kubernetes_pod_name\", role=~\"$role\"}) ) / quantile(0.67, rate(aptos_executor_execute_block_seconds_count{ chain_name=~\"$chain_name\", cluster=~\"$cluster\", metrics_source=~\"$metrics_source\", namespace=~\"$namespace\", kubernetes_pod_name=~\"$kubernetes_pod_name\", role=~\"$role\"}[$interval]) )", + "hide": false, + "legendFormat": "work_with_task", + "range": true, + "refId": "K" + }, + { + "datasource": { "type": "prometheus", "uid": "${Datasource}" }, + "editorMode": "code", + "expr": "quantile(0.67, rate(aptos_execution_get_next_task_seconds_sum{ chain_name=~\"$chain_name\", cluster=~\"$cluster\", metrics_source=~\"$metrics_source\", namespace=~\"$namespace\", kubernetes_pod_name=~\"$kubernetes_pod_name\", role=~\"$role\"}[$interval]) / avg(block_executor_concurrency{chain_name=~\"$chain_name\", cluster=~\"$cluster\", metrics_source=~\"$metrics_source\", namespace=~\"$namespace\", kubernetes_pod_name=~\"$kubernetes_pod_name\", role=~\"$role\"}) ) / quantile(0.67, rate(aptos_executor_execute_block_seconds_count{ chain_name=~\"$chain_name\", cluster=~\"$cluster\", metrics_source=~\"$metrics_source\", namespace=~\"$namespace\", kubernetes_pod_name=~\"$kubernetes_pod_name\", role=~\"$role\"}[$interval]) )", + "hide": false, + "legendFormat": "get_next_task", + "range": true, + "refId": "L" + }, + { + "datasource": { "type": "prometheus", "uid": "${Datasource}" }, + "editorMode": "code", + "expr": "quantile(0.67, rate(final_output_extraction_seconds_sum{ chain_name=~\"$chain_name\", cluster=~\"$cluster\", metrics_source=~\"$metrics_source\", namespace=~\"$namespace\", kubernetes_pod_name=~\"$kubernetes_pod_name\", role=~\"$role\"}[$interval]) ) / quantile(0.67, rate(aptos_executor_execute_block_seconds_count{ chain_name=~\"$chain_name\", cluster=~\"$cluster\", metrics_source=~\"$metrics_source\", namespace=~\"$namespace\", kubernetes_pod_name=~\"$kubernetes_pod_name\", role=~\"$role\"}[$interval]) )", + "format": "time_series", + "hide": false, + "intervalFactor": 1, + "legendFormat": "final_output_extraction", + "range": true, + "refId": "M" + }, + { + "datasource": { "type": "prometheus", "uid": "${Datasource}" }, + "editorMode": "code", + "expr": "quantile(0.67, rate(final_result_collection_seconds_sum{ chain_name=~\"$chain_name\", cluster=~\"$cluster\", metrics_source=~\"$metrics_source\", namespace=~\"$namespace\", kubernetes_pod_name=~\"$kubernetes_pod_name\", role=~\"$role\"}[$interval]) ) / quantile(0.67, rate(aptos_executor_execute_block_seconds_count{ chain_name=~\"$chain_name\", cluster=~\"$cluster\", metrics_source=~\"$metrics_source\", namespace=~\"$namespace\", kubernetes_pod_name=~\"$kubernetes_pod_name\", role=~\"$role\"}[$interval]) )", + "format": "time_series", + "hide": false, + "intervalFactor": 1, + "legendFormat": "fina_result_collection", + "range": true, + "refId": "N" + }, + { + "datasource": { "type": "prometheus", "uid": "${Datasource}" }, + "editorMode": "code", + "expr": "quantile(0.67, rate(update_counters_seconds_sum{ chain_name=~\"$chain_name\", cluster=~\"$cluster\", metrics_source=~\"$metrics_source\", namespace=~\"$namespace\", kubernetes_pod_name=~\"$kubernetes_pod_name\", role=~\"$role\"}[$interval]) ) / quantile(0.67, rate(aptos_executor_execute_block_seconds_count{ chain_name=~\"$chain_name\", cluster=~\"$cluster\", metrics_source=~\"$metrics_source\", namespace=~\"$namespace\", kubernetes_pod_name=~\"$kubernetes_pod_name\", role=~\"$role\"}[$interval]) )", + "format": "time_series", + "hide": false, + "intervalFactor": 1, + "legendFormat": "update_counters_latency", + "range": true, + "refId": "O" + } + ], + "thresholds": [], + "timeRegions": [], + "title": "BlockSTM time breakdown per block (temp)", + "tooltip": { "shared": true, "sort": 0, "value_type": "individual" }, + "type": "graph", + "xaxis": { "format": "", "logBase": 0, "mode": "time", "show": true, "values": [] }, + "yaxes": [ + { "$$hashKey": "object:314", "format": "s", "logBase": 1, "show": true }, + { "$$hashKey": "object:315", "format": "short", "logBase": 1, "show": true } + ], + "yaxis": { "align": false } + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": { "type": "prometheus", "uid": "${Datasource}" }, + "description": "The counter that indicates the number of errors reported.", + "editable": false, + "error": false, + "fieldConfig": { "defaults": { "unit": "" }, "overrides": [] }, + "fill": 0, + "fillGradient": 0, + "gridPos": { "h": 8, "w": 8, "x": 16, "y": 19 }, + "hiddenSeries": false, + "id": 22, + "legend": { + "alignAsTable": false, + "avg": false, + "current": false, + "hideEmpty": false, + "hideZero": false, + "max": false, + "min": false, + "rightSide": false, + "show": false, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "nullPointMode": "null", + "options": { "alertThreshold": true }, + "percentage": false, + "pluginVersion": "10.2.0-60139", + "pointradius": 2, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "span": 0, + "stack": false, + "steppedLine": false, + "targets": [ + { + "datasource": { "type": "prometheus", "uid": "${Datasource}" }, + "expr": "aptos_executor_error_total{chain_name=~\"$chain_name\", cluster=~\"$cluster\", metrics_source=~\"$metrics_source\", namespace=~\"$namespace\", kubernetes_pod_name=~\"$kubernetes_pod_name\", role=~\"$role\"}", + "legendFormat": "{{kubernetes_pod_name}}-{{role}}", + "refId": "A" + } + ], + "thresholds": [], + "timeRegions": [], + "title": "Executor Error Counter", + "tooltip": { "shared": true, "sort": 0, "value_type": "individual" }, + "type": "graph", + "xaxis": { "format": "", "logBase": 0, "mode": "time", "show": true, "values": [] }, + "yaxes": [ + { "format": "short", "logBase": 1, "show": true }, + { "format": "short", "logBase": 1, "show": true } + ], + "yaxis": { "align": false } } ], - "thresholds": [], - "timeRegions": [], - "title": "Block execution time [p50]", - "tooltip": { "shared": true, "sort": 0, "value_type": "individual" }, - "type": "graph", - "xaxis": { "format": "", "logBase": 0, "mode": "time", "show": true, "values": [] }, - "yaxes": [ - { "format": "s", "logBase": 1, "show": true }, - { "format": "short", "logBase": 1, "show": true } - ], - "yaxis": { "align": false } + "span": 0, + "targets": [{ "datasource": { "type": "prometheus", "uid": "${Datasource}" }, "refId": "A" }], + "title": "Validators", + "type": "row" }, { - "aliasColors": {}, - "bars": false, - "dashLength": 10, - "dashes": false, - "datasource": { "type": "prometheus", "uid": "${Datasource}" }, - "description": "p50, p95 and p99 of the time spent to execute a block", - "editable": false, - "error": false, - "fieldConfig": { "defaults": { "unit": "" }, "overrides": [] }, - "fill": 0, - "fillGradient": 0, - "gridPos": { "h": 8, "w": 8, "x": 8, "y": 42 }, - "hiddenSeries": false, - "id": 24, - "legend": { - "alignAsTable": false, - "avg": false, - "current": false, - "hideEmpty": false, - "hideZero": false, - "max": false, - "min": false, - "rightSide": false, - "show": false, - "total": false, - "values": false - }, - "lines": true, - "linewidth": 1, - "nullPointMode": "null", - "options": { "alertThreshold": true }, - "percentage": false, - "pluginVersion": "10.2.0-59981", - "pointradius": 2, - "points": false, - "renderer": "flot", - "seriesOverrides": [], - "spaceLength": 10, - "span": 0, - "stack": false, - "steppedLine": false, - "targets": [ - { - "datasource": { "type": "prometheus", "uid": "${Datasource}" }, - "expr": "histogram_quantile(0.90, sum(rate(aptos_executor_execute_block_seconds_bucket{chain_name=~\"$chain_name\", cluster=~\"$cluster\", metrics_source=~\"$metrics_source\", namespace=~\"$namespace\", kubernetes_pod_name=~\"$kubernetes_pod_name\", role=~\"$role\"}[$interval])) by (kubernetes_pod_name, role, le))", - "format": "time_series", - "intervalFactor": 1, - "legendFormat": "{{kubernetes_pod_name}}-{{role}}", - "refId": "C" + "collapsed": true, + "gridPos": { "h": 1, "w": 24, "x": 0, "y": 43 }, + "id": 55, + "panels": [ + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": { "type": "prometheus", "uid": "${Datasource}" }, + "description": "The per-block gas consumed by parallel execution (BlockSTM) or sequential execution.", + "editable": false, + "error": false, + "fieldConfig": { "defaults": { "unit": "" }, "overrides": [] }, + "fill": 0, + "fillGradient": 0, + "gridPos": { "h": 8, "w": 8, "x": 0, "y": 4 }, + "hiddenSeries": false, + "id": 52, + "legend": { + "alignAsTable": false, + "avg": false, + "current": false, + "hideEmpty": false, + "hideZero": false, + "max": false, + "min": false, + "rightSide": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "nullPointMode": "null", + "options": { "alertThreshold": true }, + "percentage": false, + "pluginVersion": "10.2.0-60139", + "pointradius": 2, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "span": 0, + "stack": false, + "steppedLine": false, + "targets": [ + { + "datasource": { "type": "prometheus", "uid": "${Datasource}" }, + "editorMode": "code", + "expr": "quantile by(stage) (0.5, rate(aptos_execution_block_gas_sum{ chain_name=~\"$chain_name\", cluster=~\"$cluster\", metrics_source=~\"$metrics_source\", namespace=~\"$namespace\", kubernetes_pod_name=~\"$kubernetes_pod_name\", role=~\"$role\", stage=~\"execution_gas|io_gas|non_storage_gas\", mode=\"parallel\"}[$interval]) / rate(aptos_execution_block_gas_count{ chain_name=~\"$chain_name\", cluster=~\"$cluster\", metrics_source=~\"$metrics_source\", namespace=~\"$namespace\", kubernetes_pod_name=~\"$kubernetes_pod_name\", role=~\"$role\", stage=~\"execution_gas|io_gas|non_storage_gas\", mode=\"parallel\"}[$interval]))", + "hide": false, + "legendFormat": "parallel_{{stage}}", + "range": true, + "refId": "A" + }, + { + "datasource": { "type": "prometheus", "uid": "${Datasource}" }, + "editorMode": "code", + "expr": "quantile by(stage) (0.5, rate(aptos_execution_block_gas_sum{ chain_name=~\"$chain_name\", cluster=~\"$cluster\", metrics_source=~\"$metrics_source\", namespace=~\"$namespace\", kubernetes_pod_name=~\"$kubernetes_pod_name\", role=~\"$role\", stage=~\"execution_gas|io_gas|non_storage_gas\", mode=\"sequential\"}[$interval]) / rate(aptos_execution_block_gas_count{ chain_name=~\"$chain_name\", cluster=~\"$cluster\", metrics_source=~\"$metrics_source\", namespace=~\"$namespace\", kubernetes_pod_name=~\"$kubernetes_pod_name\", role=~\"$role\", stage=~\"execution_gas|io_gas|non_storage_gas\", mode=\"sequential\"}[$interval]))", + "hide": false, + "legendFormat": "sequential_{{stage}}", + "range": true, + "refId": "B" + } + ], + "thresholds": [], + "timeRegions": [], + "title": "Per-block execution gas", + "tooltip": { "shared": true, "sort": 0, "value_type": "individual" }, + "type": "graph", + "xaxis": { "format": "", "logBase": 0, "mode": "time", "show": true, "values": [] }, + "yaxes": [ + { "$$hashKey": "object:314", "format": "none", "logBase": 1, "show": true }, + { "$$hashKey": "object:315", "format": "short", "logBase": 1, "show": true } + ], + "yaxis": { "align": false } + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": { "type": "prometheus", "uid": "${Datasource}" }, + "description": "The per-block gas consumed by parallel execution (BlockSTM) or sequential execution.", + "editable": false, + "error": false, + "fieldConfig": { "defaults": { "unit": "" }, "overrides": [] }, + "fill": 0, + "fillGradient": 0, + "gridPos": { "h": 8, "w": 8, "x": 8, "y": 4 }, + "hiddenSeries": false, + "id": 62, + "legend": { + "alignAsTable": false, + "avg": false, + "current": false, + "hideEmpty": false, + "hideZero": false, + "max": false, + "min": false, + "rightSide": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "nullPointMode": "null", + "options": { "alertThreshold": true }, + "percentage": false, + "pluginVersion": "10.2.0-60139", + "pointradius": 2, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "span": 0, + "stack": false, + "steppedLine": false, + "targets": [ + { + "datasource": { "type": "prometheus", "uid": "${Datasource}" }, + "editorMode": "code", + "expr": "quantile by(stage) (0.5, rate(aptos_execution_block_gas_sum{ chain_name=~\"$chain_name\", cluster=~\"$cluster\", metrics_source=~\"$metrics_source\", namespace=~\"$namespace\", kubernetes_pod_name=~\"$kubernetes_pod_name\", role=~\"$role\", stage=~\"storage_in_octas|storage_in_gas\", mode=\"parallel\"}[$interval]) / rate(aptos_execution_block_gas_count{ chain_name=~\"$chain_name\", cluster=~\"$cluster\", metrics_source=~\"$metrics_source\", namespace=~\"$namespace\", kubernetes_pod_name=~\"$kubernetes_pod_name\", role=~\"$role\", stage=~\"storage_in_octas|storage_in_gas\", mode=\"parallel\"}[$interval]))", + "hide": false, + "legendFormat": "parallel_{{stage}}", + "range": true, + "refId": "A" + }, + { + "datasource": { "type": "prometheus", "uid": "${Datasource}" }, + "editorMode": "code", + "expr": "quantile by(stage) (0.5, rate(aptos_execution_block_gas_sum{ chain_name=~\"$chain_name\", cluster=~\"$cluster\", metrics_source=~\"$metrics_source\", namespace=~\"$namespace\", kubernetes_pod_name=~\"$kubernetes_pod_name\", role=~\"$role\", stage=~\"storage_in_octas|storage_in_gas\", mode=\"sequential\"}[$interval]) / rate(aptos_execution_block_gas_count{ chain_name=~\"$chain_name\", cluster=~\"$cluster\", metrics_source=~\"$metrics_source\", namespace=~\"$namespace\", kubernetes_pod_name=~\"$kubernetes_pod_name\", role=~\"$role\", stage=~\"storage_in_octas|storage_in_gas\", mode=\"sequential\"}[$interval]))", + "hide": false, + "legendFormat": "sequential_{{stage}}", + "range": true, + "refId": "B" + } + ], + "thresholds": [], + "timeRegions": [], + "title": "Per-block storage gas", + "tooltip": { "shared": true, "sort": 0, "value_type": "individual" }, + "type": "graph", + "xaxis": { "format": "", "logBase": 0, "mode": "time", "show": true, "values": [] }, + "yaxes": [ + { "$$hashKey": "object:314", "format": "none", "logBase": 1, "show": true }, + { "$$hashKey": "object:315", "format": "short", "logBase": 1, "show": true } + ], + "yaxis": { "align": false } + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": { "type": "prometheus", "uid": "${Datasource}" }, + "description": "The per-block gas consumed by parallel execution (BlockSTM) or sequential execution.", + "editable": false, + "error": false, + "fieldConfig": { "defaults": { "unit": "" }, "overrides": [] }, + "fill": 0, + "fillGradient": 0, + "gridPos": { "h": 8, "w": 8, "x": 16, "y": 4 }, + "hiddenSeries": false, + "id": 58, + "legend": { + "alignAsTable": false, + "avg": false, + "current": false, + "hideEmpty": false, + "hideZero": false, + "max": false, + "min": false, + "rightSide": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "nullPointMode": "null", + "options": { "alertThreshold": true }, + "percentage": false, + "pluginVersion": "10.2.0-60139", + "pointradius": 2, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "span": 0, + "stack": false, + "steppedLine": false, + "targets": [ + { + "datasource": { "type": "prometheus", "uid": "${Datasource}" }, + "editorMode": "code", + "expr": "quantile by(stage) (0.5, rate(aptos_execution_block_gas_sum{ chain_name=~\"$chain_name\", cluster=~\"$cluster\", metrics_source=~\"$metrics_source\", namespace=~\"$namespace\", kubernetes_pod_name=~\"$kubernetes_pod_name\", role=~\"$role\", stage='storage_in_octas', mode=\"parallel\"}[$interval]) / rate(aptos_execution_block_gas_count{ chain_name=~\"$chain_name\", cluster=~\"$cluster\", metrics_source=~\"$metrics_source\", namespace=~\"$namespace\", kubernetes_pod_name=~\"$kubernetes_pod_name\", role=~\"$role\", stage='storage_in_octas', mode=\"parallel\"}[$interval]))", + "hide": false, + "legendFormat": "parallel_{{stage}}", + "range": true, + "refId": "A" + }, + { + "datasource": { "type": "prometheus", "uid": "${Datasource}" }, + "editorMode": "code", + "expr": "quantile by(stage) (0.5, rate(aptos_execution_block_gas_sum{ chain_name=~\"$chain_name\", cluster=~\"$cluster\", metrics_source=~\"$metrics_source\", namespace=~\"$namespace\", kubernetes_pod_name=~\"$kubernetes_pod_name\", role=~\"$role\", stage='storage_in_octas', mode=\"sequential\"}[$interval]) / rate(aptos_execution_block_gas_count{ chain_name=~\"$chain_name\", cluster=~\"$cluster\", metrics_source=~\"$metrics_source\", namespace=~\"$namespace\", kubernetes_pod_name=~\"$kubernetes_pod_name\", role=~\"$role\", stage='storage_in_octas', mode=\"sequential\"}[$interval]))", + "hide": false, + "legendFormat": "sequential_{{stage}}", + "range": true, + "refId": "B" + } + ], + "thresholds": [], + "timeRegions": [], + "title": "Per-block storage fee", + "tooltip": { "shared": true, "sort": 0, "value_type": "individual" }, + "type": "graph", + "xaxis": { "format": "", "logBase": 0, "mode": "time", "show": true, "values": [] }, + "yaxes": [ + { "$$hashKey": "object:314", "format": "none", "logBase": 1, "show": true }, + { "$$hashKey": "object:315", "format": "short", "logBase": 1, "show": true } + ], + "yaxis": { "align": false } + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": { "type": "prometheus", "uid": "${Datasource}" }, + "description": "The avg txn gas consumed by parallel execution (BlockSTM) or sequential execution.", + "editable": false, + "error": false, + "fieldConfig": { "defaults": { "unit": "" }, "overrides": [] }, + "fill": 0, + "fillGradient": 0, + "gridPos": { "h": 8, "w": 8, "x": 0, "y": 12 }, + "hiddenSeries": false, + "id": 61, + "legend": { + "alignAsTable": false, + "avg": false, + "current": false, + "hideEmpty": false, + "hideZero": false, + "max": false, + "min": false, + "rightSide": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "nullPointMode": "null", + "options": { "alertThreshold": true }, + "percentage": false, + "pluginVersion": "10.2.0-60139", + "pointradius": 2, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "span": 0, + "stack": false, + "steppedLine": false, + "targets": [ + { + "datasource": { "type": "prometheus", "uid": "${Datasource}" }, + "editorMode": "code", + "expr": "quantile by(stage) (0.5, rate(aptos_execution_txn_gas_sum{ chain_name=~\"$chain_name\", cluster=~\"$cluster\", metrics_source=~\"$metrics_source\", namespace=~\"$namespace\", kubernetes_pod_name=~\"$kubernetes_pod_name\", role=~\"$role\", stage!='storage_in_octas', mode=\"parallel\"}[$interval]) / rate(aptos_execution_txn_gas_count{ chain_name=~\"$chain_name\", cluster=~\"$cluster\", metrics_source=~\"$metrics_source\", namespace=~\"$namespace\", kubernetes_pod_name=~\"$kubernetes_pod_name\", role=~\"$role\", stage!='storage_in_octas', mode=\"parallel\"}[$interval]))", + "hide": false, + "legendFormat": "parallel_{{stage}}", + "range": true, + "refId": "A" + }, + { + "datasource": { "type": "prometheus", "uid": "${Datasource}" }, + "editorMode": "code", + "expr": "quantile by(stage) (0.5, rate(aptos_execution_txn_gas_sum{ chain_name=~\"$chain_name\", cluster=~\"$cluster\", metrics_source=~\"$metrics_source\", namespace=~\"$namespace\", kubernetes_pod_name=~\"$kubernetes_pod_name\", role=~\"$role\", stage!='storage_in_octas', mode=\"sequential\"}[$interval]) / rate(aptos_execution_txn_gas_count{ chain_name=~\"$chain_name\", cluster=~\"$cluster\", metrics_source=~\"$metrics_source\", namespace=~\"$namespace\", kubernetes_pod_name=~\"$kubernetes_pod_name\", role=~\"$role\", stage!='storage_in_octas', mode=\"sequential\"}[$interval]))", + "hide": false, + "legendFormat": "sequential_{{stage}}", + "range": true, + "refId": "B" + } + ], + "thresholds": [], + "timeRegions": [], + "title": "Avg txn gas", + "tooltip": { "shared": true, "sort": 0, "value_type": "individual" }, + "type": "graph", + "xaxis": { "format": "", "logBase": 0, "mode": "time", "show": true, "values": [] }, + "yaxes": [ + { "$$hashKey": "object:314", "format": "none", "logBase": 1, "show": true }, + { "$$hashKey": "object:315", "format": "short", "logBase": 1, "show": true } + ], + "yaxis": { "align": false } + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": { "type": "prometheus", "uid": "${Datasource}" }, + "description": "The per-block gas consumed by parallel execution (BlockSTM) or sequential execution.", + "editable": false, + "error": false, + "fieldConfig": { "defaults": { "unit": "" }, "overrides": [] }, + "fill": 0, + "fillGradient": 0, + "gridPos": { "h": 8, "w": 8, "x": 8, "y": 12 }, + "hiddenSeries": false, + "id": 60, + "legend": { + "alignAsTable": false, + "avg": false, + "current": false, + "hideEmpty": false, + "hideZero": false, + "max": false, + "min": false, + "rightSide": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "nullPointMode": "null", + "options": { "alertThreshold": true }, + "percentage": false, + "pluginVersion": "10.2.0-60139", + "pointradius": 2, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "span": 0, + "stack": false, + "steppedLine": false, + "targets": [ + { + "datasource": { "type": "prometheus", "uid": "${Datasource}" }, + "editorMode": "code", + "expr": "quantile (0.5, \n (\n max by (kubernetes_pod_name, role) (\n rate(aptos_execution_block_gas_sum{ chain_name=~\"$chain_name\", cluster=~\"$cluster\", metrics_source=~\"$metrics_source\", namespace=~\"$namespace\", kubernetes_pod_name=~\"$kubernetes_pod_name\", role=~\"$role\", stage='non_storage_gas'}[$interval])\n )\n / \n max by (kubernetes_pod_name, role) (\n rate(aptos_execution_block_gas_count{ chain_name=~\"$chain_name\", cluster=~\"$cluster\", metrics_source=~\"$metrics_source\", namespace=~\"$namespace\", kubernetes_pod_name=~\"$kubernetes_pod_name\", role=~\"$role\", stage='non_storage_gas'}[$interval])\n )\n )\n / \n (\n max by (kubernetes_pod_name, role) (\n rate(consensus_duration_sum{ chain_name=~\"$chain_name\", cluster=~\"$cluster\", metrics_source=~\"$metrics_source\", namespace=~\"$namespace\", kubernetes_pod_name=~\"$kubernetes_pod_name\", role=~\"$role\", op='execute_block'}[$interval])\n )\n / \n max by (kubernetes_pod_name, role) (\n rate(consensus_duration_count{ chain_name=~\"$chain_name\", cluster=~\"$cluster\", metrics_source=~\"$metrics_source\", namespace=~\"$namespace\", kubernetes_pod_name=~\"$kubernetes_pod_name\", role=~\"$role\", op='execute_block'}[$interval])\n )\n )\n)", + "hide": false, + "legendFormat": "gas / s", + "range": true, + "refId": "A" + } + ], + "thresholds": [], + "timeRegions": [], + "title": "Execution speed - run-time gas / s", + "tooltip": { "shared": true, "sort": 0, "value_type": "individual" }, + "type": "graph", + "xaxis": { "format": "", "logBase": 0, "mode": "time", "show": true, "values": [] }, + "yaxes": [ + { "$$hashKey": "object:314", "format": "none", "logBase": 1, "show": true }, + { "$$hashKey": "object:315", "format": "short", "logBase": 1, "show": true } + ], + "yaxis": { "align": false } + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": { "type": "prometheus", "uid": "${Datasource}" }, + "description": "The rate of the parallel / sequential execution exceeding the per-block gas limit. ", + "editable": false, + "error": false, + "fieldConfig": { "defaults": { "unit": "" }, "overrides": [] }, + "fill": 0, + "fillGradient": 0, + "gridPos": { "h": 8, "w": 8, "x": 16, "y": 12 }, + "hiddenSeries": false, + "id": 53, + "legend": { + "alignAsTable": false, + "avg": false, + "current": false, + "hideEmpty": false, + "hideZero": false, + "max": false, + "min": false, + "rightSide": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "nullPointMode": "null", + "options": { "alertThreshold": true }, + "percentage": false, + "pluginVersion": "10.2.0-60139", + "pointradius": 2, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "span": 0, + "stack": false, + "steppedLine": false, + "targets": [ + { + "datasource": { "type": "prometheus", "uid": "${Datasource}" }, + "editorMode": "code", + "expr": "rate(aptos_execution_gas_limit_count{chain_name=~\"$chain_name\", cluster=~\"$cluster\", metrics_source=~\"$metrics_source\", namespace=~\"$namespace\", kubernetes_pod_name=~\"$kubernetes_pod_name\", role=~\"$role\", mode=\"parallel\"}[$interval])", + "format": "time_series", + "intervalFactor": 1, + "legendFormat": "parallel execution", + "range": true, + "refId": "A" + }, + { + "datasource": { "type": "prometheus", "uid": "${Datasource}" }, + "editorMode": "code", + "expr": "rate(aptos_execution_gas_limit_count{chain_name=~\"$chain_name\", cluster=~\"$cluster\", metrics_source=~\"$metrics_source\", namespace=~\"$namespace\", kubernetes_pod_name=~\"$kubernetes_pod_name\", role=~\"$role\", mode=\"sequential\"}[$interval])", + "format": "time_series", + "hide": false, + "intervalFactor": 1, + "legendFormat": "sequential execution", + "range": true, + "refId": "B" + } + ], + "thresholds": [], + "timeRegions": [], + "title": "Execution exceed block gas limit rate", + "tooltip": { "shared": true, "sort": 0, "value_type": "individual" }, + "type": "graph", + "xaxis": { "format": "", "logBase": 0, "mode": "time", "show": true, "values": [] }, + "yaxes": [ + { "$$hashKey": "object:125", "format": "none", "label": "aborts / s", "logBase": 1, "show": true }, + { "$$hashKey": "object:126", "format": "short", "logBase": 1, "show": true } + ], + "yaxis": { "align": false } + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": { "type": "prometheus", "uid": "${Datasource}" }, + "description": "The average time spent to execute a block", + "editable": false, + "error": false, + "fieldConfig": { "defaults": { "unit": "" }, "overrides": [] }, + "fill": 0, + "fillGradient": 0, + "gridPos": { "h": 8, "w": 8, "x": 0, "y": 20 }, + "hiddenSeries": false, + "id": 63, + "legend": { + "alignAsTable": false, + "avg": false, + "current": false, + "hideEmpty": false, + "hideZero": false, + "max": false, + "min": false, + "rightSide": false, + "show": false, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "nullPointMode": "null", + "options": { "alertThreshold": true }, + "percentage": false, + "pluginVersion": "10.2.0-60139", + "pointradius": 2, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "span": 0, + "stack": false, + "steppedLine": false, + "targets": [ + { + "datasource": { "type": "prometheus", "uid": "${Datasource}" }, + "editorMode": "code", + "expr": "rate(aptos_execution_transaction_shuffle_seconds_sum{chain_name=~\"$chain_name\", cluster=~\"$cluster\", metrics_source=~\"$metrics_source\", namespace=~\"$namespace\", kubernetes_pod_name=~\"$kubernetes_pod_name\", role=~\"$role\"}[$interval]) / rate(aptos_execution_transaction_shuffle_seconds_count{chain_name=~\"$chain_name\", cluster=~\"$cluster\", metrics_source=~\"$metrics_source\", namespace=~\"$namespace\", kubernetes_pod_name=~\"$kubernetes_pod_name\", role=~\"$role\"}[$interval])", + "format": "time_series", + "intervalFactor": 1, + "legendFormat": "{{kubernetes_pod_name}}-{{role}}", + "range": true, + "refId": "A" + } + ], + "thresholds": [], + "timeRegions": [], + "title": "Avg Txn shuffling time", + "tooltip": { "shared": true, "sort": 0, "value_type": "individual" }, + "type": "graph", + "xaxis": { "format": "", "logBase": 0, "mode": "time", "show": true, "values": [] }, + "yaxes": [ + { "format": "µs", "logBase": 1, "show": true }, + { "format": "short", "logBase": 1, "show": true } + ], + "yaxis": { "align": false } + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": { "type": "prometheus", "uid": "${Datasource}" }, + "description": "The average time spent to dedup the txns in a block", + "editable": false, + "error": false, + "fieldConfig": { "defaults": { "unit": "" }, "overrides": [] }, + "fill": 0, + "fillGradient": 0, + "gridPos": { "h": 8, "w": 8, "x": 8, "y": 20 }, + "hiddenSeries": false, + "id": 70, + "legend": { + "alignAsTable": false, + "avg": false, + "current": false, + "hideEmpty": false, + "hideZero": false, + "max": false, + "min": false, + "rightSide": false, + "show": false, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "nullPointMode": "null", + "options": { "alertThreshold": true }, + "percentage": false, + "pluginVersion": "10.2.0-60139", + "pointradius": 2, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "span": 0, + "stack": false, + "steppedLine": false, + "targets": [ + { + "datasource": { "type": "prometheus", "uid": "${Datasource}" }, + "editorMode": "code", + "expr": "rate(aptos_execution_transaction_dedup_seconds_sum{chain_name=~\"$chain_name\", cluster=~\"$cluster\", metrics_source=~\"$metrics_source\", namespace=~\"$namespace\", kubernetes_pod_name=~\"$kubernetes_pod_name\", role=~\"$role\"}[$interval]) / rate(aptos_execution_transaction_dedup_seconds_count{chain_name=~\"$chain_name\", cluster=~\"$cluster\", metrics_source=~\"$metrics_source\", namespace=~\"$namespace\", kubernetes_pod_name=~\"$kubernetes_pod_name\", role=~\"$role\"}[$interval])", + "format": "time_series", + "intervalFactor": 1, + "legendFormat": "{{kubernetes_pod_name}}-{{role}}", + "range": true, + "refId": "A" + } + ], + "thresholds": [], + "timeRegions": [], + "title": "Avg Txn dedup time", + "tooltip": { "shared": true, "sort": 0, "value_type": "individual" }, + "type": "graph", + "xaxis": { "format": "", "logBase": 0, "mode": "time", "show": true, "values": [] }, + "yaxes": [ + { "format": "µs", "logBase": 1, "show": true }, + { "format": "short", "logBase": 1, "show": true } + ], + "yaxis": { "align": false } + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": { "type": "prometheus", "uid": "${Datasource}" }, + "description": "The time spent waiting for batches, per second", + "editable": false, + "error": false, + "fieldConfig": { "defaults": { "unit": "" }, "overrides": [] }, + "fill": 0, + "fillGradient": 0, + "gridPos": { "h": 8, "w": 8, "x": 16, "y": 20 }, + "hiddenSeries": false, + "id": 71, + "legend": { + "alignAsTable": false, + "avg": false, + "current": false, + "hideEmpty": false, + "hideZero": false, + "max": false, + "min": false, + "rightSide": false, + "show": false, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "nullPointMode": "null", + "options": { "alertThreshold": true }, + "percentage": false, + "pluginVersion": "10.2.0-60139", + "pointradius": 2, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "span": 0, + "stack": false, + "steppedLine": false, + "targets": [ + { + "datasource": { "type": "prometheus", "uid": "${Datasource}" }, + "editorMode": "code", + "expr": "rate(aptos_consensus_batch_wait_duration_sum{chain_name=~\"$chain_name\", cluster=~\"$cluster\", metrics_source=~\"$metrics_source\", namespace=~\"$namespace\", kubernetes_pod_name=~\"$kubernetes_pod_name\", role=~\"$role\"}[$interval])", + "format": "time_series", + "intervalFactor": 1, + "legendFormat": "{{kubernetes_pod_name}}-{{role}}", + "range": true, + "refId": "A" + } + ], + "thresholds": [], + "timeRegions": [], + "title": "Batch wait duration (per s)", + "tooltip": { "shared": true, "sort": 0, "value_type": "individual" }, + "type": "graph", + "xaxis": { "format": "", "logBase": 0, "mode": "time", "show": true, "values": [] }, + "yaxes": [ + { "format": "µs", "logBase": 1, "show": true }, + { "format": "short", "logBase": 1, "show": true } + ], + "yaxis": { "align": false } } ], - "thresholds": [], - "timeRegions": [], - "title": "Block execution time [p90]", - "tooltip": { "shared": true, "sort": 0, "value_type": "individual" }, - "type": "graph", - "xaxis": { "format": "", "logBase": 0, "mode": "time", "show": true, "values": [] }, - "yaxes": [ - { "format": "s", "logBase": 1, "show": true }, - { "format": "short", "logBase": 1, "show": true } - ], - "yaxis": { "align": false } + "title": "Execution Per Block Gas", + "type": "row" }, { - "aliasColors": {}, - "bars": false, - "dashLength": 10, - "dashes": false, - "datasource": { "type": "prometheus", "uid": "${Datasource}" }, - "description": "p50, p95 and p99 of the time spent to execute a block", - "editable": false, - "error": false, - "fieldConfig": { "defaults": { "unit": "" }, "overrides": [] }, - "fill": 0, - "fillGradient": 0, - "gridPos": { "h": 8, "w": 8, "x": 16, "y": 42 }, - "hiddenSeries": false, - "id": 23, - "legend": { - "alignAsTable": false, - "avg": false, - "current": false, - "hideEmpty": false, - "hideZero": false, - "max": false, - "min": false, - "rightSide": false, - "show": false, - "total": false, - "values": false - }, - "lines": true, - "linewidth": 1, - "nullPointMode": "null", - "options": { "alertThreshold": true }, - "percentage": false, - "pluginVersion": "10.2.0-59981", - "pointradius": 2, - "points": false, - "renderer": "flot", - "seriesOverrides": [], - "spaceLength": 10, - "span": 0, - "stack": false, - "steppedLine": false, - "targets": [ - { - "datasource": { "type": "prometheus", "uid": "${Datasource}" }, - "expr": "histogram_quantile(0.99, sum(rate(aptos_executor_execute_block_seconds_bucket{chain_name=~\"$chain_name\", cluster=~\"$cluster\", metrics_source=~\"$metrics_source\", namespace=~\"$namespace\", kubernetes_pod_name=~\"$kubernetes_pod_name\", role=~\"$role\"}[$interval])) by (kubernetes_pod_name, role, le))", - "format": "time_series", - "intervalFactor": 1, - "legendFormat": "{{kubernetes_pod_name}}-{{role}}", - "refId": "C" + "collapsed": true, + "gridPos": { "h": 1, "w": 24, "x": 0, "y": 44 }, + "id": 65, + "panels": [ + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": { "type": "prometheus", "uid": "${Datasource}" }, + "description": "", + "editable": false, + "error": false, + "fieldConfig": { "defaults": { "unit": "none" }, "overrides": [] }, + "fill": 0, + "fillGradient": 0, + "gridPos": { "h": 8, "w": 8, "x": 0, "y": 5 }, + "hiddenSeries": false, + "id": 66, + "legend": { + "alignAsTable": false, + "avg": false, + "current": false, + "hideEmpty": false, + "hideZero": false, + "max": false, + "min": false, + "rightSide": false, + "show": false, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "nullPointMode": "null", + "options": { "alertThreshold": true }, + "percentage": false, + "pluginVersion": "10.2.0-60139", + "pointradius": 2, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "span": 0, + "stack": false, + "steppedLine": false, + "targets": [ + { + "datasource": { "type": "prometheus", "uid": "${Datasource}" }, + "editorMode": "code", + "expr": " avg by (round_id)(max by(shard_id, round_id) (rate(sharded_block_execution_by_rounds_seconds_sum{ chain_name=~\"$chain_name\", cluster=~\"$cluster\", metrics_source=~\"$metrics_source\", namespace=~\"$namespace\", kubernetes_pod_name=~\"$kubernetes_pod_name\", role=~\"$role\"}[$interval]) ))", + "hide": false, + "legendFormat": "sub_block_execution_{{round_id}}", + "range": true, + "refId": "D" + }, + { + "datasource": { "type": "prometheus", "uid": "${Datasource}" }, + "editorMode": "code", + "expr": "rate(sharded_block_execution_seconds_sum{ chain_name=~\"$chain_name\", cluster=~\"$cluster\", metrics_source=~\"$metrics_source\", namespace=~\"$namespace\", kubernetes_pod_name=~\"$kubernetes_pod_name\", role=~\"$role\"}[$interval])", + "hide": false, + "legendFormat": "sharded_execution", + "range": true, + "refId": "A" + }, + { + "datasource": { "type": "prometheus", "uid": "${Datasource}" }, + "editorMode": "code", + "expr": "rate(sharded_execution_result_aggregation_seconds_sum{ chain_name=~\"$chain_name\", cluster=~\"$cluster\", metrics_source=~\"$metrics_source\", namespace=~\"$namespace\", kubernetes_pod_name=~\"$kubernetes_pod_name\", role=~\"$role\"}[$interval])", + "hide": false, + "legendFormat": "aggregation_result", + "range": true, + "refId": "B" + }, + { + "datasource": { "type": "prometheus", "uid": "${Datasource}" }, + "editorMode": "code", + "expr": "rate(wait_for_sharded_output_seconds_sum{ chain_name=~\"$chain_name\", cluster=~\"$cluster\", metrics_source=~\"$metrics_source\", namespace=~\"$namespace\", kubernetes_pod_name=~\"$kubernetes_pod_name\", role=~\"$role\"}[$interval])", + "hide": false, + "legendFormat": "wait_for_sharded_output", + "range": true, + "refId": "C" + } + ], + "thresholds": [], + "timeRegions": [], + "title": "Sharded execution time in 1s", + "tooltip": { "shared": true, "sort": 0, "value_type": "individual" }, + "type": "graph", + "xaxis": { "format": "", "logBase": 0, "mode": "time", "show": true, "values": [] }, + "yaxes": [ + { "$$hashKey": "object:140", "format": "none", "logBase": 1, "show": true }, + { "$$hashKey": "object:141", "format": "short", "logBase": 1, "show": true } + ], + "yaxis": { "align": false } + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": { "type": "prometheus", "uid": "${Datasource}" }, + "description": "", + "editable": false, + "error": false, + "fieldConfig": { "defaults": { "unit": "none" }, "overrides": [] }, + "fill": 0, + "fillGradient": 0, + "gridPos": { "h": 8, "w": 8, "x": 8, "y": 5 }, + "hiddenSeries": false, + "id": 68, + "legend": { + "alignAsTable": false, + "avg": false, + "current": false, + "hideEmpty": false, + "hideZero": false, + "max": false, + "min": false, + "rightSide": false, + "show": false, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "nullPointMode": "null", + "options": { "alertThreshold": true }, + "percentage": false, + "pluginVersion": "10.2.0-60139", + "pointradius": 2, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "span": 0, + "stack": false, + "steppedLine": false, + "targets": [ + { + "datasource": { "type": "prometheus", "uid": "${Datasource}" }, + "editorMode": "code", + "expr": "sum by (round_id) (sum by (round_id, shard_id) (rate(sharded_block_executor_txn_count_sum{ chain_name=~\"$chain_name\", cluster=~\"$cluster\", metrics_source=~\"$metrics_source\", namespace=~\"$namespace\", kubernetes_pod_name=~\"$kubernetes_pod_name\", role=~\"$role\"}[$interval]) ) / sum by(round_id, shard_id) (rate(sharded_block_execution_by_rounds_seconds_sum{ chain_name=~\"$chain_name\", cluster=~\"$cluster\", metrics_source=~\"$metrics_source\", namespace=~\"$namespace\", kubernetes_pod_name=~\"$kubernetes_pod_name\", role=~\"$role\"}[$interval]) ) )", + "hide": false, + "legendFormat": "{{round_id}}", + "range": true, + "refId": "D" + } + ], + "thresholds": [], + "timeRegions": [], + "title": "Sharded execution TPS by round", + "tooltip": { "shared": true, "sort": 0, "value_type": "individual" }, + "type": "graph", + "xaxis": { "format": "", "logBase": 0, "mode": "time", "show": true, "values": [] }, + "yaxes": [ + { "$$hashKey": "object:140", "format": "none", "logBase": 1, "show": true }, + { "$$hashKey": "object:141", "format": "short", "logBase": 1, "show": true } + ], + "yaxis": { "align": false } + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": { "type": "prometheus", "uid": "${Datasource}" }, + "description": "", + "editable": false, + "error": false, + "fieldConfig": { "defaults": { "unit": "none" }, "overrides": [] }, + "fill": 0, + "fillGradient": 0, + "gridPos": { "h": 8, "w": 8, "x": 16, "y": 5 }, + "hiddenSeries": false, + "id": 67, + "legend": { + "alignAsTable": false, + "avg": false, + "current": false, + "hideEmpty": false, + "hideZero": false, + "max": false, + "min": false, + "rightSide": false, + "show": false, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "nullPointMode": "null", + "options": { "alertThreshold": true }, + "percentage": false, + "pluginVersion": "10.2.0-60139", + "pointradius": 2, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "span": 0, + "stack": false, + "steppedLine": false, + "targets": [ + { + "datasource": { "type": "prometheus", "uid": "${Datasource}" }, + "editorMode": "code", + "expr": "sum by (round_id) (avg by (round_id, shard_id) (rate(sharded_block_executor_txn_count_sum{ chain_name=~\"$chain_name\", cluster=~\"$cluster\", metrics_source=~\"$metrics_source\", namespace=~\"$namespace\", kubernetes_pod_name=~\"$kubernetes_pod_name\", role=~\"$role\"}[$interval]) ) )", + "hide": false, + "legendFormat": "{{round_id}}", + "range": true, + "refId": "D" + } + ], + "thresholds": [], + "timeRegions": [], + "title": "(to fix) Sharded execution transaction counts by round", + "tooltip": { "shared": true, "sort": 0, "value_type": "individual" }, + "type": "graph", + "xaxis": { "format": "", "logBase": 0, "mode": "time", "show": true, "values": [] }, + "yaxes": [ + { "$$hashKey": "object:140", "format": "none", "logBase": 1, "show": true }, + { "$$hashKey": "object:141", "format": "short", "logBase": 1, "show": true } + ], + "yaxis": { "align": false } + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": { "type": "prometheus", "uid": "${Datasource}" }, + "description": "", + "editable": false, + "error": false, + "fieldConfig": { "defaults": { "unit": "none" }, "overrides": [] }, + "fill": 0, + "fillGradient": 0, + "gridPos": { "h": 8, "w": 8, "x": 0, "y": 13 }, + "hiddenSeries": false, + "id": 69, + "legend": { + "alignAsTable": false, + "avg": false, + "current": false, + "hideEmpty": false, + "hideZero": false, + "max": false, + "min": false, + "rightSide": false, + "show": false, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "nullPointMode": "null", + "options": { "alertThreshold": true }, + "percentage": false, + "pluginVersion": "10.2.0-60139", + "pointradius": 2, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "span": 0, + "stack": false, + "steppedLine": false, + "targets": [ + { + "datasource": { "type": "prometheus", "uid": "${Datasource}" }, + "editorMode": "code", + "expr": " avg by (round_id)(max by(shard_id, round_id) (rate(sharded_block_execution_by_rounds_seconds_sum{ chain_name=~\"$chain_name\", cluster=~\"$cluster\", metrics_source=~\"$metrics_source\", namespace=~\"$namespace\", kubernetes_pod_name=~\"$kubernetes_pod_name\", role=~\"$role\"}[$interval]) ))", + "hide": false, + "legendFormat": "sub_block_execution_{{round_id}}", + "range": true, + "refId": "D" + }, + { + "datasource": { "type": "prometheus", "uid": "${Datasource}" }, + "editorMode": "code", + "expr": "rate(sharded_block_execution_seconds_sum{ chain_name=~\"$chain_name\", cluster=~\"$cluster\", metrics_source=~\"$metrics_source\", namespace=~\"$namespace\", kubernetes_pod_name=~\"$kubernetes_pod_name\", role=~\"$role\"}[$interval])", + "hide": false, + "legendFormat": "sharded_execution", + "range": true, + "refId": "A" + }, + { + "datasource": { "type": "prometheus", "uid": "${Datasource}" }, + "editorMode": "code", + "expr": "rate(sharded_execution_result_aggregation_seconds_sum{ chain_name=~\"$chain_name\", cluster=~\"$cluster\", metrics_source=~\"$metrics_source\", namespace=~\"$namespace\", kubernetes_pod_name=~\"$kubernetes_pod_name\", role=~\"$role\"}[$interval])", + "hide": false, + "legendFormat": "aggregation_result", + "range": true, + "refId": "B" + }, + { + "datasource": { "type": "prometheus", "uid": "${Datasource}" }, + "editorMode": "code", + "expr": "rate(wait_for_sharded_output_seconds_sum{ chain_name=~\"$chain_name\", cluster=~\"$cluster\", metrics_source=~\"$metrics_source\", namespace=~\"$namespace\", kubernetes_pod_name=~\"$kubernetes_pod_name\", role=~\"$role\"}[$interval])", + "hide": false, + "legendFormat": "wait_for_sharded_output", + "range": true, + "refId": "C" + }, + { + "datasource": { "type": "prometheus", "uid": "${Datasource}" }, + "editorMode": "code", + "expr": "avg(rate(drop_state_view_seconds_sum{ chain_name=~\"$chain_name\", cluster=~\"$cluster\", metrics_source=~\"$metrics_source\", namespace=~\"$namespace\", kubernetes_pod_name=~\"$kubernetes_pod_name\", role=~\"$role\"}[$interval]))", + "hide": false, + "legendFormat": "drop_state_view", + "range": true, + "refId": "E" + }, + { + "datasource": { "type": "prometheus", "uid": "${Datasource}" }, + "editorMode": "code", + "expr": "max(avg by(shard_id) (rate(execute_shard_command_seconds_sum{ chain_name=~\"$chain_name\", cluster=~\"$cluster\", metrics_source=~\"$metrics_source\", namespace=~\"$namespace\", kubernetes_pod_name=~\"$kubernetes_pod_name\", role=~\"$role\"}[$interval])))", + "hide": false, + "legendFormat": "execute_shard_command_seconds", + "range": true, + "refId": "F" + } + ], + "thresholds": [], + "timeRegions": [], + "title": "(test-only)Sharded execution time in 1s", + "tooltip": { "shared": true, "sort": 0, "value_type": "individual" }, + "type": "graph", + "xaxis": { "format": "", "logBase": 0, "mode": "time", "show": true, "values": [] }, + "yaxes": [ + { "$$hashKey": "object:140", "format": "none", "logBase": 1, "show": true }, + { "$$hashKey": "object:141", "format": "short", "logBase": 1, "show": true } + ], + "yaxis": { "align": false } } ], - "thresholds": [], - "timeRegions": [], - "title": "Block execution time [p99]", - "tooltip": { "shared": true, "sort": 0, "value_type": "individual" }, - "type": "graph", - "xaxis": { "format": "", "logBase": 0, "mode": "time", "show": true, "values": [] }, - "yaxes": [ - { "format": "s", "logBase": 1, "show": true }, - { "format": "short", "logBase": 1, "show": true } - ], - "yaxis": { "align": false } - }, - { - "aliasColors": {}, - "bars": false, - "dashLength": 10, - "dashes": false, - "datasource": { "type": "prometheus", "uid": "${Datasource}" }, - "description": "p50, p95 and p99 of the time spent for persisting execution result to storage when committing blocks", - "editable": false, - "error": false, - "fieldConfig": { "defaults": { "unit": "" }, "overrides": [] }, - "fill": 0, - "fillGradient": 0, - "gridPos": { "h": 8, "w": 8, "x": 0, "y": 50 }, - "hiddenSeries": false, - "id": 18, - "legend": { - "alignAsTable": false, - "avg": false, - "current": false, - "hideEmpty": false, - "hideZero": false, - "max": false, - "min": false, - "rightSide": false, - "show": false, - "total": false, - "values": false - }, - "lines": true, - "linewidth": 1, - "nullPointMode": "null", - "options": { "alertThreshold": true }, - "percentage": false, - "pluginVersion": "10.2.0-59981", - "pointradius": 2, - "points": false, - "renderer": "flot", - "seriesOverrides": [], - "spaceLength": 10, - "span": 0, - "stack": false, - "steppedLine": false, - "targets": [ - { - "datasource": { "type": "prometheus", "uid": "${Datasource}" }, - "expr": "histogram_quantile(0.50, sum(rate(aptos_executor_save_transactions_seconds_bucket{chain_name=~\"$chain_name\", cluster=~\"$cluster\", metrics_source=~\"$metrics_source\", namespace=~\"$namespace\", kubernetes_pod_name=~\"$kubernetes_pod_name\", role=~\"$role\"}[$interval])) by (kubernetes_pod_name, role,le))", - "format": "time_series", - "intervalFactor": 1, - "legendFormat": "{{kubernetes_pod_name}}-{{role}}", - "refId": "B" - } - ], - "thresholds": [], - "timeRegions": [], - "title": "Time spent in storage to save transactions [p50]", - "tooltip": { "shared": true, "sort": 0, "value_type": "individual" }, - "type": "graph", - "xaxis": { "format": "", "logBase": 0, "mode": "time", "show": true, "values": [] }, - "yaxes": [ - { "format": "s", "logBase": 1, "show": true }, - { "format": "short", "logBase": 1, "show": true } - ], - "yaxis": { "align": false } - }, - { - "aliasColors": {}, - "bars": false, - "dashLength": 10, - "dashes": false, - "datasource": { "type": "prometheus", "uid": "${Datasource}" }, - "description": "p50, p95 and p99 of the time spent for persisting execution result to storage when committing blocks", - "editable": false, - "error": false, - "fieldConfig": { "defaults": { "unit": "" }, "overrides": [] }, - "fill": 0, - "fillGradient": 0, - "gridPos": { "h": 8, "w": 8, "x": 8, "y": 50 }, - "hiddenSeries": false, - "id": 25, - "legend": { - "alignAsTable": false, - "avg": false, - "current": false, - "hideEmpty": false, - "hideZero": false, - "max": false, - "min": false, - "rightSide": false, - "show": false, - "total": false, - "values": false - }, - "lines": true, - "linewidth": 1, - "nullPointMode": "null", - "options": { "alertThreshold": true }, - "percentage": false, - "pluginVersion": "10.2.0-59981", - "pointradius": 2, - "points": false, - "renderer": "flot", - "seriesOverrides": [], - "spaceLength": 10, - "span": 0, - "stack": false, - "steppedLine": false, - "targets": [ - { - "datasource": { "type": "prometheus", "uid": "${Datasource}" }, - "expr": "histogram_quantile(0.90, sum(rate(aptos_executor_save_transactions_seconds_bucket{chain_name=~\"$chain_name\", cluster=~\"$cluster\", metrics_source=~\"$metrics_source\", namespace=~\"$namespace\", kubernetes_pod_name=~\"$kubernetes_pod_name\", role=~\"$role\"}[$interval])) by (kubernetes_pod_name, role,le))", - "format": "time_series", - "intervalFactor": 1, - "legendFormat": "{{kubernetes_pod_name}}-{{role}}", - "refId": "B" - } - ], - "thresholds": [], - "timeRegions": [], - "title": "Time spent in storage to save transactions [p90]", - "tooltip": { "shared": true, "sort": 0, "value_type": "individual" }, - "type": "graph", - "xaxis": { "format": "", "logBase": 0, "mode": "time", "show": true, "values": [] }, - "yaxes": [ - { "format": "s", "logBase": 1, "show": true }, - { "format": "short", "logBase": 1, "show": true } - ], - "yaxis": { "align": false } - }, - { - "aliasColors": {}, - "bars": false, - "dashLength": 10, - "dashes": false, - "datasource": { "type": "prometheus", "uid": "${Datasource}" }, - "description": "p50, p95 and p99 of the time spent for persisting execution result to storage when committing blocks", - "editable": false, - "error": false, - "fieldConfig": { "defaults": { "unit": "" }, "overrides": [] }, - "fill": 0, - "fillGradient": 0, - "gridPos": { "h": 8, "w": 8, "x": 16, "y": 50 }, - "hiddenSeries": false, - "id": 26, - "legend": { - "alignAsTable": false, - "avg": false, - "current": false, - "hideEmpty": false, - "hideZero": false, - "max": false, - "min": false, - "rightSide": false, - "show": true, - "total": false, - "values": false - }, - "lines": true, - "linewidth": 1, - "nullPointMode": "null", - "options": { "alertThreshold": true }, - "percentage": false, - "pluginVersion": "10.2.0-59981", - "pointradius": 2, - "points": false, - "renderer": "flot", - "seriesOverrides": [], - "spaceLength": 10, - "span": 0, - "stack": false, - "steppedLine": false, - "targets": [ - { - "datasource": { "type": "prometheus", "uid": "${Datasource}" }, - "expr": "histogram_quantile(0.99, sum(rate(aptos_executor_save_transactions_seconds_bucket{chain_name=~\"$chain_name\", cluster=~\"$cluster\", metrics_source=~\"$metrics_source\", namespace=~\"$namespace\", kubernetes_pod_name=~\"$kubernetes_pod_name\", role=~\"$role\"}[$interval])) by (kubernetes_pod_name, role,le))", - "format": "time_series", - "intervalFactor": 1, - "legendFormat": "{{kubernetes_pod_name}}-{{role}}", - "refId": "B" - } - ], - "thresholds": [], - "timeRegions": [], - "title": "Time spent in storage to save transactions [p99]", - "tooltip": { "shared": true, "sort": 0, "value_type": "individual" }, - "type": "graph", - "xaxis": { "format": "", "logBase": 0, "mode": "time", "show": true, "values": [] }, - "yaxes": [ - { "format": "s", "logBase": 1, "show": true }, - { "format": "short", "logBase": 1, "show": true } - ], - "yaxis": { "align": false } - }, - { - "aliasColors": {}, - "bars": false, - "dashLength": 10, - "dashes": false, - "datasource": { "type": "prometheus", "uid": "${Datasource}" }, - "description": "p50, p95 and p99 of the time spent to execute a block in VM", - "editable": false, - "error": false, - "fieldConfig": { "defaults": { "unit": "" }, "overrides": [] }, - "fill": 0, - "fillGradient": 0, - "gridPos": { "h": 8, "w": 8, "x": 0, "y": 58 }, - "hiddenSeries": false, - "id": 16, - "legend": { - "alignAsTable": false, - "avg": false, - "current": false, - "hideEmpty": false, - "hideZero": false, - "max": false, - "min": false, - "rightSide": false, - "show": false, - "total": false, - "values": false - }, - "lines": true, - "linewidth": 1, - "nullPointMode": "null", - "options": { "alertThreshold": true }, - "percentage": false, - "pluginVersion": "10.2.0-59981", - "pointradius": 2, - "points": false, - "renderer": "flot", - "seriesOverrides": [], - "spaceLength": 10, - "span": 0, - "stack": false, - "steppedLine": false, - "targets": [ - { - "datasource": { "type": "prometheus", "uid": "${Datasource}" }, - "expr": "histogram_quantile(0.50, sum(rate(aptos_executor_vm_execute_block_seconds_bucket{chain_name=~\"$chain_name\", cluster=~\"$cluster\", metrics_source=~\"$metrics_source\", namespace=~\"$namespace\", kubernetes_pod_name=~\"$kubernetes_pod_name\", role=~\"$role\"}[$interval])) by (kubernetes_pod_name, role, le))", - "format": "time_series", - "intervalFactor": 1, - "legendFormat": "{{kubernetes_pod_name}}-{{role}}", - "refId": "B" - } - ], - "thresholds": [], - "timeRegions": [], - "title": "VM execute block time [p50]", - "tooltip": { "shared": true, "sort": 0, "value_type": "individual" }, - "type": "graph", - "xaxis": { "format": "", "logBase": 0, "mode": "time", "show": true, "values": [] }, - "yaxes": [ - { "format": "s", "logBase": 1, "show": true }, - { "format": "short", "logBase": 1, "show": true } - ], - "yaxis": { "align": false } - }, - { - "aliasColors": {}, - "bars": false, - "dashLength": 10, - "dashes": false, - "datasource": { "type": "prometheus", "uid": "${Datasource}" }, - "description": "p50, p95 and p99 of the time spent to execute a block in VM", - "editable": false, - "error": false, - "fieldConfig": { "defaults": { "unit": "" }, "overrides": [] }, - "fill": 0, - "fillGradient": 0, - "gridPos": { "h": 8, "w": 8, "x": 8, "y": 58 }, - "hiddenSeries": false, - "id": 27, - "legend": { - "alignAsTable": false, - "avg": false, - "current": false, - "hideEmpty": false, - "hideZero": false, - "max": false, - "min": false, - "rightSide": false, - "show": false, - "total": false, - "values": false - }, - "lines": true, - "linewidth": 1, - "nullPointMode": "null", - "options": { "alertThreshold": true }, - "percentage": false, - "pluginVersion": "10.2.0-59981", - "pointradius": 2, - "points": false, - "renderer": "flot", - "seriesOverrides": [], - "spaceLength": 10, - "span": 0, - "stack": false, - "steppedLine": false, - "targets": [ - { - "datasource": { "type": "prometheus", "uid": "${Datasource}" }, - "expr": "histogram_quantile(0.90, sum(rate(aptos_executor_vm_execute_block_seconds_bucket{chain_name=~\"$chain_name\", cluster=~\"$cluster\", metrics_source=~\"$metrics_source\", namespace=~\"$namespace\", kubernetes_pod_name=~\"$kubernetes_pod_name\", role=~\"$role\"}[$interval])) by (kubernetes_pod_name, role, le))", - "format": "time_series", - "intervalFactor": 1, - "legendFormat": "{{kubernetes_pod_name}}-{{role}}", - "refId": "B" - } - ], - "thresholds": [], - "timeRegions": [], - "title": "VM execute block time [p90]", - "tooltip": { "shared": true, "sort": 0, "value_type": "individual" }, - "type": "graph", - "xaxis": { "format": "", "logBase": 0, "mode": "time", "show": true, "values": [] }, - "yaxes": [ - { "format": "s", "logBase": 1, "show": true }, - { "format": "short", "logBase": 1, "show": true } - ], - "yaxis": { "align": false } - }, - { - "aliasColors": {}, - "bars": false, - "dashLength": 10, - "dashes": false, - "datasource": { "type": "prometheus", "uid": "${Datasource}" }, - "description": "p50, p95 and p99 of the time spent to execute a block in VM", - "editable": false, - "error": false, - "fieldConfig": { "defaults": { "unit": "" }, "overrides": [] }, - "fill": 0, - "fillGradient": 0, - "gridPos": { "h": 8, "w": 8, "x": 16, "y": 58 }, - "hiddenSeries": false, - "id": 28, - "legend": { - "alignAsTable": false, - "avg": false, - "current": false, - "hideEmpty": false, - "hideZero": false, - "max": false, - "min": false, - "rightSide": false, - "show": false, - "total": false, - "values": false - }, - "lines": true, - "linewidth": 1, - "nullPointMode": "null", - "options": { "alertThreshold": true }, - "percentage": false, - "pluginVersion": "10.2.0-59981", - "pointradius": 2, - "points": false, - "renderer": "flot", - "seriesOverrides": [], - "spaceLength": 10, - "span": 0, - "stack": false, - "steppedLine": false, - "targets": [ - { - "datasource": { "type": "prometheus", "uid": "${Datasource}" }, - "expr": "histogram_quantile(0.99, sum(rate(aptos_executor_vm_execute_block_seconds_bucket{chain_name=~\"$chain_name\", cluster=~\"$cluster\", metrics_source=~\"$metrics_source\", namespace=~\"$namespace\", kubernetes_pod_name=~\"$kubernetes_pod_name\", role=~\"$role\"}[$interval])) by (kubernetes_pod_name, role, le))", - "format": "time_series", - "intervalFactor": 1, - "legendFormat": "{{kubernetes_pod_name}}-{{role}}", - "refId": "B" - } - ], - "thresholds": [], - "timeRegions": [], - "title": "VM execute block time [p99]", - "tooltip": { "shared": true, "sort": 0, "value_type": "individual" }, - "type": "graph", - "xaxis": { "format": "", "logBase": 0, "mode": "time", "show": true, "values": [] }, - "yaxes": [ - { "format": "s", "logBase": 1, "show": true }, - { "format": "short", "logBase": 1, "show": true } - ], - "yaxis": { "align": false } - }, - { - "aliasColors": {}, - "bars": false, - "dashLength": 10, - "dashes": false, - "datasource": { "type": "prometheus", "uid": "${Datasource}" }, - "description": "p50, p95 and p99 of the time spent to commit blocks", - "editable": false, - "error": false, - "fieldConfig": { "defaults": { "unit": "" }, "overrides": [] }, - "fill": 0, - "fillGradient": 0, - "gridPos": { "h": 8, "w": 8, "x": 0, "y": 66 }, - "hiddenSeries": false, - "id": 17, - "legend": { - "alignAsTable": false, - "avg": false, - "current": false, - "hideEmpty": false, - "hideZero": false, - "max": false, - "min": false, - "rightSide": false, - "show": false, - "total": false, - "values": false - }, - "lines": true, - "linewidth": 1, - "nullPointMode": "null", - "options": { "alertThreshold": true }, - "percentage": false, - "pluginVersion": "10.2.0-59981", - "pointradius": 2, - "points": false, - "renderer": "flot", - "seriesOverrides": [], - "spaceLength": 10, - "span": 0, - "stack": false, - "steppedLine": false, - "targets": [ - { - "datasource": { "type": "prometheus", "uid": "${Datasource}" }, - "expr": "histogram_quantile(0.50, sum(rate(aptos_executor_commit_blocks_seconds_bucket{chain_name=~\"$chain_name\", cluster=~\"$cluster\", metrics_source=~\"$metrics_source\", namespace=~\"$namespace\", kubernetes_pod_name=~\"$kubernetes_pod_name\", role=~\"$role\"}[$interval])) by (kubernetes_pod_name, role, le))", - "format": "time_series", - "intervalFactor": 1, - "legendFormat": "{{kubernetes_pod_name}}-{{role}}", - "refId": "B" - } - ], - "thresholds": [], - "timeRegions": [], - "title": "Total time to commit blocks [p50]", - "tooltip": { "shared": true, "sort": 0, "value_type": "individual" }, - "type": "graph", - "xaxis": { "format": "", "logBase": 0, "mode": "time", "show": true, "values": [] }, - "yaxes": [ - { "format": "s", "logBase": 1, "show": true }, - { "format": "short", "logBase": 1, "show": true } - ], - "yaxis": { "align": false } - }, - { - "aliasColors": {}, - "bars": false, - "dashLength": 10, - "dashes": false, - "datasource": { "type": "prometheus", "uid": "${Datasource}" }, - "description": "p50, p95 and p99 of the time spent to commit blocks", - "editable": false, - "error": false, - "fieldConfig": { "defaults": { "unit": "" }, "overrides": [] }, - "fill": 0, - "fillGradient": 0, - "gridPos": { "h": 8, "w": 8, "x": 8, "y": 66 }, - "hiddenSeries": false, - "id": 29, - "legend": { - "alignAsTable": false, - "avg": false, - "current": false, - "hideEmpty": false, - "hideZero": false, - "max": false, - "min": false, - "rightSide": false, - "show": false, - "total": false, - "values": false - }, - "lines": true, - "linewidth": 1, - "nullPointMode": "null", - "options": { "alertThreshold": true }, - "percentage": false, - "pluginVersion": "10.2.0-59981", - "pointradius": 2, - "points": false, - "renderer": "flot", - "seriesOverrides": [], - "spaceLength": 10, - "span": 0, - "stack": false, - "steppedLine": false, - "targets": [ - { - "datasource": { "type": "prometheus", "uid": "${Datasource}" }, - "expr": "histogram_quantile(0.90, sum(rate(aptos_executor_commit_blocks_seconds_bucket{chain_name=~\"$chain_name\", cluster=~\"$cluster\", metrics_source=~\"$metrics_source\", namespace=~\"$namespace\", kubernetes_pod_name=~\"$kubernetes_pod_name\", role=~\"$role\"}[$interval])) by (kubernetes_pod_name, role, le))", - "format": "time_series", - "intervalFactor": 1, - "legendFormat": "{{kubernetes_pod_name}}-{{role}}", - "refId": "B" - } - ], - "thresholds": [], - "timeRegions": [], - "title": "Total time to commit blocks [p90]", - "tooltip": { "shared": true, "sort": 0, "value_type": "individual" }, - "type": "graph", - "xaxis": { "format": "", "logBase": 0, "mode": "time", "show": true, "values": [] }, - "yaxes": [ - { "format": "s", "logBase": 1, "show": true }, - { "format": "short", "logBase": 1, "show": true } - ], - "yaxis": { "align": false } - }, - { - "aliasColors": {}, - "bars": false, - "dashLength": 10, - "dashes": false, - "datasource": { "type": "prometheus", "uid": "${Datasource}" }, - "description": "p50, p95 and p99 of the time spent to commit blocks", - "editable": false, - "error": false, - "fieldConfig": { "defaults": { "unit": "" }, "overrides": [] }, - "fill": 0, - "fillGradient": 0, - "gridPos": { "h": 8, "w": 8, "x": 16, "y": 66 }, - "hiddenSeries": false, - "id": 30, - "legend": { - "alignAsTable": false, - "avg": false, - "current": false, - "hideEmpty": false, - "hideZero": false, - "max": false, - "min": false, - "rightSide": false, - "show": false, - "total": false, - "values": false - }, - "lines": true, - "linewidth": 1, - "nullPointMode": "null", - "options": { "alertThreshold": true }, - "percentage": false, - "pluginVersion": "10.2.0-59981", - "pointradius": 2, - "points": false, - "renderer": "flot", - "seriesOverrides": [], - "spaceLength": 10, - "span": 0, - "stack": false, - "steppedLine": false, - "targets": [ - { - "datasource": { "type": "prometheus", "uid": "${Datasource}" }, - "expr": "histogram_quantile(0.99, sum(rate(aptos_executor_commit_blocks_seconds_bucket{chain_name=~\"$chain_name\", cluster=~\"$cluster\", metrics_source=~\"$metrics_source\", namespace=~\"$namespace\", kubernetes_pod_name=~\"$kubernetes_pod_name\", role=~\"$role\"}[$interval])) by (kubernetes_pod_name, role, le))", - "format": "time_series", - "intervalFactor": 1, - "legendFormat": "{{kubernetes_pod_name}}-{{role}}", - "refId": "B" - } - ], - "thresholds": [], - "timeRegions": [], - "title": "Total time to commit blocks [p99]", - "tooltip": { "shared": true, "sort": 0, "value_type": "individual" }, - "type": "graph", - "xaxis": { "format": "", "logBase": 0, "mode": "time", "show": true, "values": [] }, - "yaxes": [ - { "format": "s", "logBase": 1, "show": true }, - { "format": "short", "logBase": 1, "show": true } - ], - "yaxis": { "align": false } - }, - { - "collapsed": false, - "datasource": { "type": "prometheus", "uid": "${Datasource}" }, - "editable": false, - "error": false, - "gridPos": { "h": 1, "w": 24, "x": 0, "y": 74 }, - "id": 11, - "panels": [], - "span": 0, - "targets": [{ "datasource": { "type": "prometheus", "uid": "${Datasource}" }, "refId": "A" }], - "title": "Validators", - "type": "row" - }, - { - "aliasColors": {}, - "bars": false, - "dashLength": 10, - "dashes": false, - "datasource": { "type": "prometheus", "uid": "${Datasource}" }, - "description": "The average time spent to execute a block", - "editable": false, - "error": false, - "fieldConfig": { "defaults": { "unit": "" }, "overrides": [] }, - "fill": 0, - "fillGradient": 0, - "gridPos": { "h": 8, "w": 8, "x": 0, "y": 75 }, - "hiddenSeries": false, - "id": 6, - "legend": { - "alignAsTable": false, - "avg": false, - "current": false, - "hideEmpty": false, - "hideZero": false, - "max": false, - "min": false, - "rightSide": false, - "show": false, - "total": false, - "values": false - }, - "lines": true, - "linewidth": 1, - "nullPointMode": "null", - "options": { "alertThreshold": true }, - "percentage": false, - "pluginVersion": "10.2.0-59981", - "pointradius": 2, - "points": false, - "renderer": "flot", - "seriesOverrides": [], - "spaceLength": 10, - "span": 0, - "stack": false, - "steppedLine": false, - "targets": [ - { - "datasource": { "type": "prometheus", "uid": "${Datasource}" }, - "expr": "rate(aptos_executor_execute_block_seconds_sum{chain_name=~\"$chain_name\", cluster=~\"$cluster\", metrics_source=~\"$metrics_source\", namespace=~\"$namespace\", kubernetes_pod_name=~\"$kubernetes_pod_name\", role=~\"$role\"}[$interval]) / rate(aptos_executor_execute_block_seconds_count{chain_name=~\"$chain_name\", cluster=~\"$cluster\", metrics_source=~\"$metrics_source\", namespace=~\"$namespace\", kubernetes_pod_name=~\"$kubernetes_pod_name\", role=~\"$role\"}[$interval])", - "format": "time_series", - "intervalFactor": 1, - "legendFormat": "{{kubernetes_pod_name}}-{{role}}", - "refId": "A" - } - ], - "thresholds": [], - "timeRegions": [], - "title": "Avg Block execution time", - "tooltip": { "shared": true, "sort": 0, "value_type": "individual" }, - "type": "graph", - "xaxis": { "format": "", "logBase": 0, "mode": "time", "show": true, "values": [] }, - "yaxes": [ - { "format": "µs", "logBase": 1, "show": true }, - { "format": "short", "logBase": 1, "show": true } - ], - "yaxis": { "align": false } - }, - { - "aliasColors": {}, - "bars": false, - "dashLength": 10, - "dashes": false, - "datasource": { "type": "prometheus", "uid": "${Datasource}" }, - "description": "The average time spent to execute a block in VM", - "editable": false, - "error": false, - "fieldConfig": { "defaults": { "unit": "" }, "overrides": [] }, - "fill": 0, - "fillGradient": 0, - "gridPos": { "h": 8, "w": 8, "x": 8, "y": 75 }, - "hiddenSeries": false, - "id": 15, - "legend": { - "alignAsTable": false, - "avg": false, - "current": false, - "hideEmpty": false, - "hideZero": false, - "max": false, - "min": false, - "rightSide": false, - "show": false, - "total": false, - "values": false - }, - "lines": true, - "linewidth": 1, - "nullPointMode": "null", - "options": { "alertThreshold": true }, - "percentage": false, - "pluginVersion": "10.2.0-59981", - "pointradius": 2, - "points": false, - "renderer": "flot", - "seriesOverrides": [], - "spaceLength": 10, - "span": 0, - "stack": false, - "steppedLine": false, - "targets": [ - { - "datasource": { "type": "prometheus", "uid": "${Datasource}" }, - "expr": "rate(aptos_executor_vm_execute_block_seconds_sum{chain_name=~\"$chain_name\", cluster=~\"$cluster\", metrics_source=~\"$metrics_source\", namespace=~\"$namespace\", kubernetes_pod_name=~\"$kubernetes_pod_name\", role=~\"$role\"}[$interval]) / rate(aptos_executor_vm_execute_block_seconds_count{chain_name=~\"$chain_name\", cluster=~\"$cluster\", metrics_source=~\"$metrics_source\", namespace=~\"$namespace\", kubernetes_pod_name=~\"$kubernetes_pod_name\", role=~\"$role\"}[$interval])", - "format": "time_series", - "intervalFactor": 1, - "legendFormat": "{{kubernetes_pod_name}}-{{role}}", - "refId": "A" - } - ], - "thresholds": [], - "timeRegions": [], - "title": "Avg VM execute block time", - "tooltip": { "shared": true, "sort": 0, "value_type": "individual" }, - "type": "graph", - "xaxis": { "format": "", "logBase": 0, "mode": "time", "show": true, "values": [] }, - "yaxes": [ - { "format": "µs", "logBase": 1, "show": true }, - { "format": "short", "logBase": 1, "show": true } - ], - "yaxis": { "align": false } - }, - { - "aliasColors": {}, - "bars": false, - "dashLength": 10, - "dashes": false, - "datasource": { "type": "prometheus", "uid": "${Datasource}" }, - "description": "The average time spent to commit blocks", - "editable": false, - "error": false, - "fieldConfig": { "defaults": { "unit": "" }, "overrides": [] }, - "fill": 0, - "fillGradient": 0, - "gridPos": { "h": 8, "w": 8, "x": 16, "y": 75 }, - "hiddenSeries": false, - "id": 4, - "legend": { - "alignAsTable": false, - "avg": false, - "current": false, - "hideEmpty": false, - "hideZero": false, - "max": false, - "min": false, - "rightSide": false, - "show": false, - "total": false, - "values": false - }, - "lines": true, - "linewidth": 1, - "nullPointMode": "null", - "options": { "alertThreshold": true }, - "percentage": false, - "pluginVersion": "10.2.0-59981", - "pointradius": 2, - "points": false, - "renderer": "flot", - "seriesOverrides": [], - "spaceLength": 10, - "span": 0, - "stack": false, - "steppedLine": false, - "targets": [ - { - "datasource": { "type": "prometheus", "uid": "${Datasource}" }, - "expr": "rate(aptos_executor_commit_blocks_seconds_sum{chain_name=~\"$chain_name\", cluster=~\"$cluster\", metrics_source=~\"$metrics_source\", namespace=~\"$namespace\", kubernetes_pod_name=~\"$kubernetes_pod_name\", role=~\"$role\"}[$interval]) / rate(aptos_executor_commit_blocks_seconds_count{chain_name=~\"$chain_name\", cluster=~\"$cluster\", metrics_source=~\"$metrics_source\", namespace=~\"$namespace\", kubernetes_pod_name=~\"$kubernetes_pod_name\", role=~\"$role\"}[$interval])", - "format": "time_series", - "intervalFactor": 1, - "legendFormat": "{{kubernetes_pod_name}}-{{role}}", - "refId": "A" - } - ], - "thresholds": [], - "timeRegions": [], - "title": "Avg Total time to commit blocks", - "tooltip": { "shared": true, "sort": 0, "value_type": "individual" }, - "type": "graph", - "xaxis": { "format": "", "logBase": 0, "mode": "time", "show": true, "values": [] }, - "yaxes": [ - { "format": "s", "logBase": 1, "show": true }, - { "format": "short", "logBase": 1, "show": true } - ], - "yaxis": { "align": false } - }, - { - "aliasColors": {}, - "bars": false, - "dashLength": 10, - "dashes": false, - "datasource": { "type": "prometheus", "uid": "${Datasource}" }, - "description": "The average time spent for persisting execution result to storage when committing blocks", - "editable": false, - "error": false, - "fieldConfig": { "defaults": { "unit": "" }, "overrides": [] }, - "fill": 0, - "fillGradient": 0, - "gridPos": { "h": 8, "w": 8, "x": 0, "y": 83 }, - "hiddenSeries": false, - "id": 2, - "legend": { - "alignAsTable": false, - "avg": false, - "current": false, - "hideEmpty": false, - "hideZero": false, - "max": false, - "min": false, - "rightSide": false, - "show": false, - "total": false, - "values": false - }, - "lines": true, - "linewidth": 1, - "nullPointMode": "null", - "options": { "alertThreshold": true }, - "percentage": false, - "pluginVersion": "10.2.0-59981", - "pointradius": 2, - "points": false, - "renderer": "flot", - "seriesOverrides": [], - "spaceLength": 10, - "span": 0, - "stack": false, - "steppedLine": false, - "targets": [ - { - "datasource": { "type": "prometheus", "uid": "${Datasource}" }, - "expr": "rate(aptos_executor_save_transactions_seconds_sum{chain_name=~\"$chain_name\", cluster=~\"$cluster\", metrics_source=~\"$metrics_source\", namespace=~\"$namespace\", kubernetes_pod_name=~\"$kubernetes_pod_name\", role=~\"$role\"}[$interval]) / rate(aptos_executor_save_transactions_seconds_count{chain_name=~\"$chain_name\", cluster=~\"$cluster\", metrics_source=~\"$metrics_source\", namespace=~\"$namespace\", kubernetes_pod_name=~\"$kubernetes_pod_name\", role=~\"$role\"}[$interval])", - "format": "time_series", - "intervalFactor": 1, - "legendFormat": "{{kubernetes_pod_name}}-{{role}}", - "refId": "A" - } - ], - "thresholds": [], - "timeRegions": [], - "title": "Avg Time spent in storage to save transactions", - "tooltip": { "shared": true, "sort": 0, "value_type": "individual" }, - "type": "graph", - "xaxis": { "format": "", "logBase": 0, "mode": "time", "show": true, "values": [] }, - "yaxes": [ - { "format": "s", "logBase": 1, "show": true }, - { "format": "short", "logBase": 1, "show": true } - ], - "yaxis": { "align": false } - }, - { - "aliasColors": {}, - "bars": false, - "dashLength": 10, - "dashes": false, - "datasource": { "type": "prometheus", "uid": "${Datasource}" }, - "description": "The average time spent for executing and committing each chunk in state-syncing", - "editable": false, - "error": false, - "fieldConfig": { "defaults": { "unit": "" }, "overrides": [] }, - "fill": 0, - "fillGradient": 0, - "gridPos": { "h": 8, "w": 8, "x": 8, "y": 83 }, - "hiddenSeries": false, - "id": 20, - "legend": { - "alignAsTable": false, - "avg": false, - "current": false, - "hideEmpty": false, - "hideZero": false, - "max": false, - "min": false, - "rightSide": false, - "show": false, - "total": false, - "values": false - }, - "lines": true, - "linewidth": 1, - "nullPointMode": "null", - "options": { "alertThreshold": true }, - "percentage": false, - "pluginVersion": "10.2.0-59981", - "pointradius": 2, - "points": false, - "renderer": "flot", - "seriesOverrides": [], - "spaceLength": 10, - "span": 0, - "stack": false, - "steppedLine": false, - "targets": [ - { - "datasource": { "type": "prometheus", "uid": "${Datasource}" }, - "expr": "rate(aptos_executor_execute_and_commit_chunk_seconds_sum{chain_name=~\"$chain_name\", cluster=~\"$cluster\", metrics_source=~\"$metrics_source\", namespace=~\"$namespace\", kubernetes_pod_name=~\"$kubernetes_pod_name\", role=~\"$role\"}[$interval]) / rate(aptos_executor_execute_and_commit_chunk_seconds_count{chain_name=~\"$chain_name\", cluster=~\"$cluster\", metrics_source=~\"$metrics_source\", namespace=~\"$namespace\", kubernetes_pod_name=~\"$kubernetes_pod_name\", role=~\"$role\"}[$interval])", - "legendFormat": "{{kubernetes_pod_name}}-{{role}}", - "refId": "A" - } - ], - "thresholds": [], - "timeRegions": [], - "title": "Avg Execute and commit chunk time", - "tooltip": { "shared": true, "sort": 0, "value_type": "individual" }, - "type": "graph", - "xaxis": { "format": "", "logBase": 0, "mode": "time", "show": true, "values": [] }, - "yaxes": [ - { "format": "short", "logBase": 1, "show": true }, - { "format": "short", "logBase": 1, "show": true } - ], - "yaxis": { "align": false } - }, - { - "aliasColors": {}, - "bars": false, - "dashLength": 10, - "dashes": false, - "datasource": { "type": "prometheus", "uid": "${Datasource}" }, - "description": "The average time spent to commit blocks", - "editable": false, - "error": false, - "fieldConfig": { "defaults": { "unit": "" }, "overrides": [] }, - "fill": 0, - "fillGradient": 0, - "gridPos": { "h": 8, "w": 8, "x": 16, "y": 83 }, - "hiddenSeries": false, - "id": 50, - "legend": { - "alignAsTable": false, - "avg": false, - "current": false, - "hideEmpty": false, - "hideZero": false, - "max": false, - "min": false, - "rightSide": false, - "show": false, - "total": false, - "values": false - }, - "lines": true, - "linewidth": 1, - "nullPointMode": "null", - "options": { "alertThreshold": true }, - "percentage": false, - "pluginVersion": "10.2.0-59981", - "pointradius": 2, - "points": false, - "renderer": "flot", - "seriesOverrides": [], - "spaceLength": 10, - "span": 0, - "stack": false, - "steppedLine": false, - "targets": [ - { - "datasource": { "type": "prometheus", "uid": "${Datasource}" }, - "editorMode": "code", - "expr": "rate(aptos_executor_execute_block_seconds_sum{chain_name=~\"$chain_name\", cluster=~\"$cluster\", metrics_source=~\"$metrics_source\", namespace=~\"$namespace\", kubernetes_pod_name=~\"$kubernetes_pod_name\", role=~\"$role\"}[$interval]) / rate(aptos_executor_execute_block_seconds_count{chain_name=~\"$chain_name\", cluster=~\"$cluster\", metrics_source=~\"$metrics_source\", namespace=~\"$namespace\", kubernetes_pod_name=~\"$kubernetes_pod_name\", role=~\"$role\"}[$interval])", - "format": "time_series", - "intervalFactor": 1, - "legendFormat": "{{kubernetes_pod_name}}-{{role}}", - "range": true, - "refId": "A" - } - ], - "thresholds": [], - "timeRegions": [], - "title": "Avg Total time to execute blocks", - "tooltip": { "shared": true, "sort": 0, "value_type": "individual" }, - "type": "graph", - "xaxis": { "format": "", "logBase": 0, "mode": "time", "show": true, "values": [] }, - "yaxes": [ - { "format": "s", "logBase": 1, "show": true }, - { "format": "short", "logBase": 1, "show": true } - ], - "yaxis": { "align": false } - }, - { - "aliasColors": {}, - "bars": false, - "dashLength": 10, - "dashes": false, - "datasource": { "type": "prometheus", "uid": "${Datasource}" }, - "description": "The rate of speculative ", - "editable": false, - "error": false, - "fieldConfig": { "defaults": { "unit": "" }, "overrides": [] }, - "fill": 0, - "fillGradient": 0, - "gridPos": { "h": 8, "w": 8, "x": 0, "y": 91 }, - "hiddenSeries": false, - "id": 32, - "legend": { - "alignAsTable": false, - "avg": false, - "current": false, - "hideEmpty": false, - "hideZero": false, - "max": false, - "min": false, - "rightSide": false, - "show": false, - "total": false, - "values": false - }, - "lines": true, - "linewidth": 1, - "nullPointMode": "null", - "options": { "alertThreshold": true }, - "percentage": false, - "pluginVersion": "10.2.0-59981", - "pointradius": 2, - "points": false, - "renderer": "flot", - "seriesOverrides": [], - "spaceLength": 10, - "span": 0, - "stack": false, - "steppedLine": false, - "targets": [ - { - "datasource": { "type": "prometheus", "uid": "${Datasource}" }, - "editorMode": "code", - "expr": "rate(aptos_execution_speculative_abort_count{chain_name=~\"$chain_name\", cluster=~\"$cluster\", metrics_source=~\"$metrics_source\", namespace=~\"$namespace\", kubernetes_pod_name=~\"$kubernetes_pod_name\", role=~\"$role\"}[$interval])", - "format": "time_series", - "intervalFactor": 1, - "legendFormat": "{{kubernetes_pod_name}}-{{role}}", - "range": true, - "refId": "A" - } - ], - "thresholds": [], - "timeRegions": [], - "title": "Execution speculative abort rate", - "tooltip": { "shared": true, "sort": 0, "value_type": "individual" }, - "type": "graph", - "xaxis": { "format": "", "logBase": 0, "mode": "time", "show": true, "values": [] }, - "yaxes": [ - { "$$hashKey": "object:125", "format": "none", "label": "aborts / s", "logBase": 1, "show": true }, - { "$$hashKey": "object:126", "format": "short", "logBase": 1, "show": true } - ], - "yaxis": { "align": false } - }, - { - "aliasColors": {}, - "bars": false, - "dashLength": 10, - "dashes": false, - "datasource": { "type": "prometheus", "uid": "${Datasource}" }, - "description": "", - "editable": false, - "error": false, - "fieldConfig": { "defaults": { "unit": "" }, "overrides": [] }, - "fill": 0, - "fillGradient": 0, - "gridPos": { "h": 8, "w": 8, "x": 8, "y": 91 }, - "hiddenSeries": false, - "id": 49, - "legend": { - "alignAsTable": false, - "avg": false, - "current": false, - "hideEmpty": false, - "hideZero": false, - "max": false, - "min": false, - "rightSide": false, - "show": true, - "total": false, - "values": false - }, - "lines": true, - "linewidth": 1, - "nullPointMode": "null", - "options": { "alertThreshold": true }, - "percentage": false, - "pluginVersion": "10.2.0-59981", - "pointradius": 2, - "points": false, - "renderer": "flot", - "seriesOverrides": [], - "spaceLength": 10, - "span": 0, - "stack": false, - "steppedLine": false, - "targets": [ - { - "datasource": { "type": "prometheus", "uid": "${Datasource}" }, - "editorMode": "code", - "expr": "quantile(0.67, rate(aptos_executor_vm_execute_block_seconds_sum{ chain_name=~\"$chain_name\", cluster=~\"$cluster\", metrics_source=~\"$metrics_source\", namespace=~\"$namespace\", kubernetes_pod_name=~\"$kubernetes_pod_name\", role=~\"$role\"}[$interval]) ) / quantile(0.67, rate(aptos_executor_execute_block_seconds_count{ chain_name=~\"$chain_name\", cluster=~\"$cluster\", metrics_source=~\"$metrics_source\", namespace=~\"$namespace\", kubernetes_pod_name=~\"$kubernetes_pod_name\", role=~\"$role\"}[$interval]) )", - "format": "time_series", - "hide": false, - "intervalFactor": 1, - "legendFormat": "vm_execution_time", - "range": true, - "refId": "C" - }, - { - "datasource": { "type": "prometheus", "uid": "${Datasource}" }, - "editorMode": "code", - "expr": "quantile(0.67, rate(aptos_execution_dependency_wait_sum{ chain_name=~\"$chain_name\", cluster=~\"$cluster\", metrics_source=~\"$metrics_source\", namespace=~\"$namespace\", kubernetes_pod_name=~\"$kubernetes_pod_name\", role=~\"$role\"}[$interval]) / avg(block_executor_concurrency{chain_name=~\"$chain_name\", cluster=~\"$cluster\", metrics_source=~\"$metrics_source\", namespace=~\"$namespace\", kubernetes_pod_name=~\"$kubernetes_pod_name\", role=~\"$role\"}) ) / quantile(0.67, rate(aptos_executor_execute_block_seconds_count{ chain_name=~\"$chain_name\", cluster=~\"$cluster\", metrics_source=~\"$metrics_source\", namespace=~\"$namespace\", kubernetes_pod_name=~\"$kubernetes_pod_name\", role=~\"$role\"}[$interval]) )", - "hide": false, - "legendFormat": "executor_dependency_wait", - "range": true, - "refId": "A" - }, - { - "datasource": { "type": "prometheus", "uid": "${Datasource}" }, - "editorMode": "code", - "expr": "quantile(0.67, rate(aptos_execution_vm_init_seconds_sum{ chain_name=~\"$chain_name\", cluster=~\"$cluster\", metrics_source=~\"$metrics_source\", namespace=~\"$namespace\", kubernetes_pod_name=~\"$kubernetes_pod_name\", role=~\"$role\"}[$interval]) / avg(block_executor_concurrency{chain_name=~\"$chain_name\", cluster=~\"$cluster\", metrics_source=~\"$metrics_source\", namespace=~\"$namespace\", kubernetes_pod_name=~\"$kubernetes_pod_name\", role=~\"$role\"}) ) / quantile(0.67, rate(aptos_executor_execute_block_seconds_count{ chain_name=~\"$chain_name\", cluster=~\"$cluster\", metrics_source=~\"$metrics_source\", namespace=~\"$namespace\", kubernetes_pod_name=~\"$kubernetes_pod_name\", role=~\"$role\"}[$interval]) )", - "hide": false, - "legendFormat": "vm_init", - "range": true, - "refId": "B" - }, - { - "datasource": { "type": "prometheus", "uid": "${Datasource}" }, - "editorMode": "code", - "expr": "quantile(0.67, rate(aptos_execution_task_validate_seconds_sum{ chain_name=~\"$chain_name\", cluster=~\"$cluster\", metrics_source=~\"$metrics_source\", namespace=~\"$namespace\", kubernetes_pod_name=~\"$kubernetes_pod_name\", role=~\"$role\"}[$interval]) / avg(block_executor_concurrency{chain_name=~\"$chain_name\", cluster=~\"$cluster\", metrics_source=~\"$metrics_source\", namespace=~\"$namespace\", kubernetes_pod_name=~\"$kubernetes_pod_name\", role=~\"$role\"}) ) / quantile(0.67, rate(aptos_executor_execute_block_seconds_count{ chain_name=~\"$chain_name\", cluster=~\"$cluster\", metrics_source=~\"$metrics_source\", namespace=~\"$namespace\", kubernetes_pod_name=~\"$kubernetes_pod_name\", role=~\"$role\"}[$interval]) )", - "hide": false, - "legendFormat": "task_validate", - "range": true, - "refId": "D" - }, - { - "datasource": { "type": "prometheus", "uid": "${Datasource}" }, - "editorMode": "code", - "expr": "quantile(0.67, rate(aptos_execution_task_execute_seconds_sum{ chain_name=~\"$chain_name\", cluster=~\"$cluster\", metrics_source=~\"$metrics_source\", namespace=~\"$namespace\", kubernetes_pod_name=~\"$kubernetes_pod_name\", role=~\"$role\"}[$interval]) / avg(block_executor_concurrency{chain_name=~\"$chain_name\", cluster=~\"$cluster\", metrics_source=~\"$metrics_source\", namespace=~\"$namespace\", kubernetes_pod_name=~\"$kubernetes_pod_name\", role=~\"$role\"}) ) / quantile(0.67, rate(aptos_executor_execute_block_seconds_count{ chain_name=~\"$chain_name\", cluster=~\"$cluster\", metrics_source=~\"$metrics_source\", namespace=~\"$namespace\", kubernetes_pod_name=~\"$kubernetes_pod_name\", role=~\"$role\"}[$interval]) )", - "hide": false, - "legendFormat": "task_execute", - "range": true, - "refId": "E" - }, - { - "datasource": { "type": "prometheus", "uid": "${Datasource}" }, - "editorMode": "code", - "expr": "quantile(0.67, rate(block_executor_signature_verification_seconds_sum{ chain_name=~\"$chain_name\", cluster=~\"$cluster\", metrics_source=~\"$metrics_source\", namespace=~\"$namespace\", kubernetes_pod_name=~\"$kubernetes_pod_name\", role=~\"$role\"}[$interval])) / quantile(0.67, rate(aptos_executor_execute_block_seconds_count{ chain_name=~\"$chain_name\", cluster=~\"$cluster\", metrics_source=~\"$metrics_source\", namespace=~\"$namespace\", kubernetes_pod_name=~\"$kubernetes_pod_name\", role=~\"$role\"}[$interval]) )", - "hide": false, - "legendFormat": "signature_verification", - "range": true, - "refId": "F" - }, - { - "datasource": { "type": "prometheus", "uid": "${Datasource}" }, - "editorMode": "code", - "expr": "quantile(0.67, rate(aptos_vm_txn_total_seconds_sum{ chain_name=~\"$chain_name\", cluster=~\"$cluster\", metrics_source=~\"$metrics_source\", namespace=~\"$namespace\", kubernetes_pod_name=~\"$kubernetes_pod_name\", role=~\"$role\"}[$interval]) / avg(block_executor_concurrency{chain_name=~\"$chain_name\", cluster=~\"$cluster\", metrics_source=~\"$metrics_source\", namespace=~\"$namespace\", kubernetes_pod_name=~\"$kubernetes_pod_name\", role=~\"$role\"}) ) / quantile(0.67, rate(aptos_executor_execute_block_seconds_count{ chain_name=~\"$chain_name\", cluster=~\"$cluster\", metrics_source=~\"$metrics_source\", namespace=~\"$namespace\", kubernetes_pod_name=~\"$kubernetes_pod_name\", role=~\"$role\"}[$interval]) )", - "hide": true, - "legendFormat": "vm_execute_user_txn", - "range": true, - "refId": "G" - }, - { - "datasource": { "type": "prometheus", "uid": "${Datasource}" }, - "editorMode": "code", - "expr": "quantile(0.67, rate(block_executor_execute_block_seconds_sum{ chain_name=~\"$chain_name\", cluster=~\"$cluster\", metrics_source=~\"$metrics_source\", namespace=~\"$namespace\", kubernetes_pod_name=~\"$kubernetes_pod_name\", role=~\"$role\"}[$interval]) ) / quantile(0.67, rate(aptos_executor_execute_block_seconds_count{ chain_name=~\"$chain_name\", cluster=~\"$cluster\", metrics_source=~\"$metrics_source\", namespace=~\"$namespace\", kubernetes_pod_name=~\"$kubernetes_pod_name\", role=~\"$role\"}[$interval]) )", - "format": "time_series", - "hide": false, - "intervalFactor": 1, - "legendFormat": "block_executor_execute_block", - "range": true, - "refId": "H" - }, - { - "datasource": { "type": "prometheus", "uid": "${Datasource}" }, - "editorMode": "code", - "expr": "quantile(0.67, rate(aptos_parallel_execution_seconds_sum{ chain_name=~\"$chain_name\", cluster=~\"$cluster\", metrics_source=~\"$metrics_source\", namespace=~\"$namespace\", kubernetes_pod_name=~\"$kubernetes_pod_name\", role=~\"$role\"}[$interval]) ) / quantile(0.67, rate(aptos_executor_execute_block_seconds_count{ chain_name=~\"$chain_name\", cluster=~\"$cluster\", metrics_source=~\"$metrics_source\", namespace=~\"$namespace\", kubernetes_pod_name=~\"$kubernetes_pod_name\", role=~\"$role\"}[$interval]) )", - "format": "time_series", - "hide": false, - "intervalFactor": 1, - "legendFormat": "parallel_execution_seconds", - "range": true, - "refId": "I" - }, - { - "datasource": { "type": "prometheus", "uid": "${Datasource}" }, - "editorMode": "code", - "expr": "quantile(0.67, rate(aptos_rayon_execution_seconds_sum{ chain_name=~\"$chain_name\", cluster=~\"$cluster\", metrics_source=~\"$metrics_source\", namespace=~\"$namespace\", kubernetes_pod_name=~\"$kubernetes_pod_name\", role=~\"$role\"}[$interval]) ) / quantile(0.67, rate(aptos_executor_execute_block_seconds_count{ chain_name=~\"$chain_name\", cluster=~\"$cluster\", metrics_source=~\"$metrics_source\", namespace=~\"$namespace\", kubernetes_pod_name=~\"$kubernetes_pod_name\", role=~\"$role\"}[$interval]) )", - "format": "time_series", - "hide": false, - "intervalFactor": 1, - "legendFormat": "rayon_execution_seconds", - "range": true, - "refId": "J" - }, - { - "datasource": { "type": "prometheus", "uid": "${Datasource}" }, - "editorMode": "code", - "expr": "quantile(0.67, rate(aptos_execution_work_with_task_seconds_sum{ chain_name=~\"$chain_name\", cluster=~\"$cluster\", metrics_source=~\"$metrics_source\", namespace=~\"$namespace\", kubernetes_pod_name=~\"$kubernetes_pod_name\", role=~\"$role\"}[$interval]) / avg(block_executor_concurrency{chain_name=~\"$chain_name\", cluster=~\"$cluster\", metrics_source=~\"$metrics_source\", namespace=~\"$namespace\", kubernetes_pod_name=~\"$kubernetes_pod_name\", role=~\"$role\"}) ) / quantile(0.67, rate(aptos_executor_execute_block_seconds_count{ chain_name=~\"$chain_name\", cluster=~\"$cluster\", metrics_source=~\"$metrics_source\", namespace=~\"$namespace\", kubernetes_pod_name=~\"$kubernetes_pod_name\", role=~\"$role\"}[$interval]) )", - "hide": false, - "legendFormat": "work_with_task", - "range": true, - "refId": "K" - }, - { - "datasource": { "type": "prometheus", "uid": "${Datasource}" }, - "editorMode": "code", - "expr": "quantile(0.67, rate(aptos_execution_get_next_task_seconds_sum{ chain_name=~\"$chain_name\", cluster=~\"$cluster\", metrics_source=~\"$metrics_source\", namespace=~\"$namespace\", kubernetes_pod_name=~\"$kubernetes_pod_name\", role=~\"$role\"}[$interval]) / avg(block_executor_concurrency{chain_name=~\"$chain_name\", cluster=~\"$cluster\", metrics_source=~\"$metrics_source\", namespace=~\"$namespace\", kubernetes_pod_name=~\"$kubernetes_pod_name\", role=~\"$role\"}) ) / quantile(0.67, rate(aptos_executor_execute_block_seconds_count{ chain_name=~\"$chain_name\", cluster=~\"$cluster\", metrics_source=~\"$metrics_source\", namespace=~\"$namespace\", kubernetes_pod_name=~\"$kubernetes_pod_name\", role=~\"$role\"}[$interval]) )", - "hide": false, - "legendFormat": "get_next_task", - "range": true, - "refId": "L" - }, - { - "datasource": { "type": "prometheus", "uid": "${Datasource}" }, - "editorMode": "code", - "expr": "quantile(0.67, rate(final_output_extraction_seconds_sum{ chain_name=~\"$chain_name\", cluster=~\"$cluster\", metrics_source=~\"$metrics_source\", namespace=~\"$namespace\", kubernetes_pod_name=~\"$kubernetes_pod_name\", role=~\"$role\"}[$interval]) ) / quantile(0.67, rate(aptos_executor_execute_block_seconds_count{ chain_name=~\"$chain_name\", cluster=~\"$cluster\", metrics_source=~\"$metrics_source\", namespace=~\"$namespace\", kubernetes_pod_name=~\"$kubernetes_pod_name\", role=~\"$role\"}[$interval]) )", - "format": "time_series", - "hide": false, - "intervalFactor": 1, - "legendFormat": "final_output_extraction", - "range": true, - "refId": "M" - }, - { - "datasource": { "type": "prometheus", "uid": "${Datasource}" }, - "editorMode": "code", - "expr": "quantile(0.67, rate(final_result_collection_seconds_sum{ chain_name=~\"$chain_name\", cluster=~\"$cluster\", metrics_source=~\"$metrics_source\", namespace=~\"$namespace\", kubernetes_pod_name=~\"$kubernetes_pod_name\", role=~\"$role\"}[$interval]) ) / quantile(0.67, rate(aptos_executor_execute_block_seconds_count{ chain_name=~\"$chain_name\", cluster=~\"$cluster\", metrics_source=~\"$metrics_source\", namespace=~\"$namespace\", kubernetes_pod_name=~\"$kubernetes_pod_name\", role=~\"$role\"}[$interval]) )", - "format": "time_series", - "hide": false, - "intervalFactor": 1, - "legendFormat": "fina_result_collection", - "range": true, - "refId": "N" - }, - { - "datasource": { "type": "prometheus", "uid": "${Datasource}" }, - "editorMode": "code", - "expr": "quantile(0.67, rate(update_counters_seconds_sum{ chain_name=~\"$chain_name\", cluster=~\"$cluster\", metrics_source=~\"$metrics_source\", namespace=~\"$namespace\", kubernetes_pod_name=~\"$kubernetes_pod_name\", role=~\"$role\"}[$interval]) ) / quantile(0.67, rate(aptos_executor_execute_block_seconds_count{ chain_name=~\"$chain_name\", cluster=~\"$cluster\", metrics_source=~\"$metrics_source\", namespace=~\"$namespace\", kubernetes_pod_name=~\"$kubernetes_pod_name\", role=~\"$role\"}[$interval]) )", - "format": "time_series", - "hide": false, - "intervalFactor": 1, - "legendFormat": "update_counters_latency", - "range": true, - "refId": "O" - } - ], - "thresholds": [], - "timeRegions": [], - "title": "BlockSTM time breakdown per block (temp)", - "tooltip": { "shared": true, "sort": 0, "value_type": "individual" }, - "type": "graph", - "xaxis": { "format": "", "logBase": 0, "mode": "time", "show": true, "values": [] }, - "yaxes": [ - { "$$hashKey": "object:314", "format": "s", "logBase": 1, "show": true }, - { "$$hashKey": "object:315", "format": "short", "logBase": 1, "show": true } - ], - "yaxis": { "align": false } - }, - { - "aliasColors": {}, - "bars": false, - "dashLength": 10, - "dashes": false, - "datasource": { "type": "prometheus", "uid": "${Datasource}" }, - "description": "The counter that indicates the number of errors reported.", - "editable": false, - "error": false, - "fieldConfig": { "defaults": { "unit": "" }, "overrides": [] }, - "fill": 0, - "fillGradient": 0, - "gridPos": { "h": 8, "w": 8, "x": 16, "y": 91 }, - "hiddenSeries": false, - "id": 22, - "legend": { - "alignAsTable": false, - "avg": false, - "current": false, - "hideEmpty": false, - "hideZero": false, - "max": false, - "min": false, - "rightSide": false, - "show": false, - "total": false, - "values": false - }, - "lines": true, - "linewidth": 1, - "nullPointMode": "null", - "options": { "alertThreshold": true }, - "percentage": false, - "pluginVersion": "10.2.0-59981", - "pointradius": 2, - "points": false, - "renderer": "flot", - "seriesOverrides": [], - "spaceLength": 10, - "span": 0, - "stack": false, - "steppedLine": false, - "targets": [ - { - "datasource": { "type": "prometheus", "uid": "${Datasource}" }, - "expr": "aptos_executor_error_total{chain_name=~\"$chain_name\", cluster=~\"$cluster\", metrics_source=~\"$metrics_source\", namespace=~\"$namespace\", kubernetes_pod_name=~\"$kubernetes_pod_name\", role=~\"$role\"}", - "legendFormat": "{{kubernetes_pod_name}}-{{role}}", - "refId": "A" - } - ], - "thresholds": [], - "timeRegions": [], - "title": "Executor Error Counter", - "tooltip": { "shared": true, "sort": 0, "value_type": "individual" }, - "type": "graph", - "xaxis": { "format": "", "logBase": 0, "mode": "time", "show": true, "values": [] }, - "yaxes": [ - { "format": "short", "logBase": 1, "show": true }, - { "format": "short", "logBase": 1, "show": true } - ], - "yaxis": { "align": false } - }, - { - "collapsed": false, - "gridPos": { "h": 1, "w": 24, "x": 0, "y": 99 }, - "id": 55, - "panels": [], - "title": "Execution Per Block Gas", - "type": "row" - }, - { - "aliasColors": {}, - "bars": false, - "dashLength": 10, - "dashes": false, - "datasource": { "type": "prometheus", "uid": "${Datasource}" }, - "description": "The per-block gas consumed by parallel execution (BlockSTM) or sequential execution.", - "editable": false, - "error": false, - "fieldConfig": { "defaults": { "unit": "" }, "overrides": [] }, - "fill": 0, - "fillGradient": 0, - "gridPos": { "h": 8, "w": 8, "x": 0, "y": 100 }, - "hiddenSeries": false, - "id": 52, - "legend": { - "alignAsTable": false, - "avg": false, - "current": false, - "hideEmpty": false, - "hideZero": false, - "max": false, - "min": false, - "rightSide": false, - "show": true, - "total": false, - "values": false - }, - "lines": true, - "linewidth": 1, - "nullPointMode": "null", - "options": { "alertThreshold": true }, - "percentage": false, - "pluginVersion": "10.2.0-59981", - "pointradius": 2, - "points": false, - "renderer": "flot", - "seriesOverrides": [], - "spaceLength": 10, - "span": 0, - "stack": false, - "steppedLine": false, - "targets": [ - { - "datasource": { "type": "prometheus", "uid": "${Datasource}" }, - "editorMode": "code", - "expr": "quantile by(stage) (0.5, rate(aptos_execution_block_gas_sum{ chain_name=~\"$chain_name\", cluster=~\"$cluster\", metrics_source=~\"$metrics_source\", namespace=~\"$namespace\", kubernetes_pod_name=~\"$kubernetes_pod_name\", role=~\"$role\", stage=~\"execution_gas|io_gas|non_storage_gas\", mode=\"parallel\"}[$interval]) / rate(aptos_execution_block_gas_count{ chain_name=~\"$chain_name\", cluster=~\"$cluster\", metrics_source=~\"$metrics_source\", namespace=~\"$namespace\", kubernetes_pod_name=~\"$kubernetes_pod_name\", role=~\"$role\", stage=~\"execution_gas|io_gas|non_storage_gas\", mode=\"parallel\"}[$interval]))", - "hide": false, - "legendFormat": "parallel_{{stage}}", - "range": true, - "refId": "A" - }, - { - "datasource": { "type": "prometheus", "uid": "${Datasource}" }, - "editorMode": "code", - "expr": "quantile by(stage) (0.5, rate(aptos_execution_block_gas_sum{ chain_name=~\"$chain_name\", cluster=~\"$cluster\", metrics_source=~\"$metrics_source\", namespace=~\"$namespace\", kubernetes_pod_name=~\"$kubernetes_pod_name\", role=~\"$role\", stage=~\"execution_gas|io_gas|non_storage_gas\", mode=\"sequential\"}[$interval]) / rate(aptos_execution_block_gas_count{ chain_name=~\"$chain_name\", cluster=~\"$cluster\", metrics_source=~\"$metrics_source\", namespace=~\"$namespace\", kubernetes_pod_name=~\"$kubernetes_pod_name\", role=~\"$role\", stage=~\"execution_gas|io_gas|non_storage_gas\", mode=\"sequential\"}[$interval]))", - "hide": false, - "legendFormat": "sequential_{{stage}}", - "range": true, - "refId": "B" - } - ], - "thresholds": [], - "timeRegions": [], - "title": "Per-block execution gas", - "tooltip": { "shared": true, "sort": 0, "value_type": "individual" }, - "type": "graph", - "xaxis": { "format": "", "logBase": 0, "mode": "time", "show": true, "values": [] }, - "yaxes": [ - { "$$hashKey": "object:314", "format": "none", "logBase": 1, "show": true }, - { "$$hashKey": "object:315", "format": "short", "logBase": 1, "show": true } - ], - "yaxis": { "align": false } - }, - { - "aliasColors": {}, - "bars": false, - "dashLength": 10, - "dashes": false, - "datasource": { "type": "prometheus", "uid": "${Datasource}" }, - "description": "The per-block gas consumed by parallel execution (BlockSTM) or sequential execution.", - "editable": false, - "error": false, - "fieldConfig": { "defaults": { "unit": "" }, "overrides": [] }, - "fill": 0, - "fillGradient": 0, - "gridPos": { "h": 8, "w": 8, "x": 8, "y": 100 }, - "hiddenSeries": false, - "id": 62, - "legend": { - "alignAsTable": false, - "avg": false, - "current": false, - "hideEmpty": false, - "hideZero": false, - "max": false, - "min": false, - "rightSide": false, - "show": true, - "total": false, - "values": false - }, - "lines": true, - "linewidth": 1, - "nullPointMode": "null", - "options": { "alertThreshold": true }, - "percentage": false, - "pluginVersion": "10.2.0-59981", - "pointradius": 2, - "points": false, - "renderer": "flot", - "seriesOverrides": [], - "spaceLength": 10, - "span": 0, - "stack": false, - "steppedLine": false, - "targets": [ - { - "datasource": { "type": "prometheus", "uid": "${Datasource}" }, - "editorMode": "code", - "expr": "quantile by(stage) (0.5, rate(aptos_execution_block_gas_sum{ chain_name=~\"$chain_name\", cluster=~\"$cluster\", metrics_source=~\"$metrics_source\", namespace=~\"$namespace\", kubernetes_pod_name=~\"$kubernetes_pod_name\", role=~\"$role\", stage=~\"storage_in_octas|storage_in_gas\", mode=\"parallel\"}[$interval]) / rate(aptos_execution_block_gas_count{ chain_name=~\"$chain_name\", cluster=~\"$cluster\", metrics_source=~\"$metrics_source\", namespace=~\"$namespace\", kubernetes_pod_name=~\"$kubernetes_pod_name\", role=~\"$role\", stage=~\"storage_in_octas|storage_in_gas\", mode=\"parallel\"}[$interval]))", - "hide": false, - "legendFormat": "parallel_{{stage}}", - "range": true, - "refId": "A" - }, - { - "datasource": { "type": "prometheus", "uid": "${Datasource}" }, - "editorMode": "code", - "expr": "quantile by(stage) (0.5, rate(aptos_execution_block_gas_sum{ chain_name=~\"$chain_name\", cluster=~\"$cluster\", metrics_source=~\"$metrics_source\", namespace=~\"$namespace\", kubernetes_pod_name=~\"$kubernetes_pod_name\", role=~\"$role\", stage=~\"storage_in_octas|storage_in_gas\", mode=\"sequential\"}[$interval]) / rate(aptos_execution_block_gas_count{ chain_name=~\"$chain_name\", cluster=~\"$cluster\", metrics_source=~\"$metrics_source\", namespace=~\"$namespace\", kubernetes_pod_name=~\"$kubernetes_pod_name\", role=~\"$role\", stage=~\"storage_in_octas|storage_in_gas\", mode=\"sequential\"}[$interval]))", - "hide": false, - "legendFormat": "sequential_{{stage}}", - "range": true, - "refId": "B" - } - ], - "thresholds": [], - "timeRegions": [], - "title": "Per-block storage gas", - "tooltip": { "shared": true, "sort": 0, "value_type": "individual" }, - "type": "graph", - "xaxis": { "format": "", "logBase": 0, "mode": "time", "show": true, "values": [] }, - "yaxes": [ - { "$$hashKey": "object:314", "format": "none", "logBase": 1, "show": true }, - { "$$hashKey": "object:315", "format": "short", "logBase": 1, "show": true } - ], - "yaxis": { "align": false } - }, - { - "aliasColors": {}, - "bars": false, - "dashLength": 10, - "dashes": false, - "datasource": { "type": "prometheus", "uid": "${Datasource}" }, - "description": "The per-block gas consumed by parallel execution (BlockSTM) or sequential execution.", - "editable": false, - "error": false, - "fieldConfig": { "defaults": { "unit": "" }, "overrides": [] }, - "fill": 0, - "fillGradient": 0, - "gridPos": { "h": 8, "w": 8, "x": 16, "y": 100 }, - "hiddenSeries": false, - "id": 58, - "legend": { - "alignAsTable": false, - "avg": false, - "current": false, - "hideEmpty": false, - "hideZero": false, - "max": false, - "min": false, - "rightSide": false, - "show": true, - "total": false, - "values": false - }, - "lines": true, - "linewidth": 1, - "nullPointMode": "null", - "options": { "alertThreshold": true }, - "percentage": false, - "pluginVersion": "10.2.0-59981", - "pointradius": 2, - "points": false, - "renderer": "flot", - "seriesOverrides": [], - "spaceLength": 10, - "span": 0, - "stack": false, - "steppedLine": false, - "targets": [ - { - "datasource": { "type": "prometheus", "uid": "${Datasource}" }, - "editorMode": "code", - "expr": "quantile by(stage) (0.5, rate(aptos_execution_block_gas_sum{ chain_name=~\"$chain_name\", cluster=~\"$cluster\", metrics_source=~\"$metrics_source\", namespace=~\"$namespace\", kubernetes_pod_name=~\"$kubernetes_pod_name\", role=~\"$role\", stage='storage_in_octas', mode=\"parallel\"}[$interval]) / rate(aptos_execution_block_gas_count{ chain_name=~\"$chain_name\", cluster=~\"$cluster\", metrics_source=~\"$metrics_source\", namespace=~\"$namespace\", kubernetes_pod_name=~\"$kubernetes_pod_name\", role=~\"$role\", stage='storage_in_octas', mode=\"parallel\"}[$interval]))", - "hide": false, - "legendFormat": "parallel_{{stage}}", - "range": true, - "refId": "A" - }, - { - "datasource": { "type": "prometheus", "uid": "${Datasource}" }, - "editorMode": "code", - "expr": "quantile by(stage) (0.5, rate(aptos_execution_block_gas_sum{ chain_name=~\"$chain_name\", cluster=~\"$cluster\", metrics_source=~\"$metrics_source\", namespace=~\"$namespace\", kubernetes_pod_name=~\"$kubernetes_pod_name\", role=~\"$role\", stage='storage_in_octas', mode=\"sequential\"}[$interval]) / rate(aptos_execution_block_gas_count{ chain_name=~\"$chain_name\", cluster=~\"$cluster\", metrics_source=~\"$metrics_source\", namespace=~\"$namespace\", kubernetes_pod_name=~\"$kubernetes_pod_name\", role=~\"$role\", stage='storage_in_octas', mode=\"sequential\"}[$interval]))", - "hide": false, - "legendFormat": "sequential_{{stage}}", - "range": true, - "refId": "B" - } - ], - "thresholds": [], - "timeRegions": [], - "title": "Per-block storage fee", - "tooltip": { "shared": true, "sort": 0, "value_type": "individual" }, - "type": "graph", - "xaxis": { "format": "", "logBase": 0, "mode": "time", "show": true, "values": [] }, - "yaxes": [ - { "$$hashKey": "object:314", "format": "none", "logBase": 1, "show": true }, - { "$$hashKey": "object:315", "format": "short", "logBase": 1, "show": true } - ], - "yaxis": { "align": false } - }, - { - "aliasColors": {}, - "bars": false, - "dashLength": 10, - "dashes": false, - "datasource": { "type": "prometheus", "uid": "${Datasource}" }, - "description": "The avg txn gas consumed by parallel execution (BlockSTM) or sequential execution.", - "editable": false, - "error": false, - "fieldConfig": { "defaults": { "unit": "" }, "overrides": [] }, - "fill": 0, - "fillGradient": 0, - "gridPos": { "h": 8, "w": 8, "x": 0, "y": 108 }, - "hiddenSeries": false, - "id": 61, - "legend": { - "alignAsTable": false, - "avg": false, - "current": false, - "hideEmpty": false, - "hideZero": false, - "max": false, - "min": false, - "rightSide": false, - "show": true, - "total": false, - "values": false - }, - "lines": true, - "linewidth": 1, - "nullPointMode": "null", - "options": { "alertThreshold": true }, - "percentage": false, - "pluginVersion": "10.2.0-59981", - "pointradius": 2, - "points": false, - "renderer": "flot", - "seriesOverrides": [], - "spaceLength": 10, - "span": 0, - "stack": false, - "steppedLine": false, - "targets": [ - { - "datasource": { "type": "prometheus", "uid": "${Datasource}" }, - "editorMode": "code", - "expr": "quantile by(stage) (0.5, rate(aptos_execution_txn_gas_sum{ chain_name=~\"$chain_name\", cluster=~\"$cluster\", metrics_source=~\"$metrics_source\", namespace=~\"$namespace\", kubernetes_pod_name=~\"$kubernetes_pod_name\", role=~\"$role\", stage!='storage_in_octas', mode=\"parallel\"}[$interval]) / rate(aptos_execution_txn_gas_count{ chain_name=~\"$chain_name\", cluster=~\"$cluster\", metrics_source=~\"$metrics_source\", namespace=~\"$namespace\", kubernetes_pod_name=~\"$kubernetes_pod_name\", role=~\"$role\", stage!='storage_in_octas', mode=\"parallel\"}[$interval]))", - "hide": false, - "legendFormat": "parallel_{{stage}}", - "range": true, - "refId": "A" - }, - { - "datasource": { "type": "prometheus", "uid": "${Datasource}" }, - "editorMode": "code", - "expr": "quantile by(stage) (0.5, rate(aptos_execution_txn_gas_sum{ chain_name=~\"$chain_name\", cluster=~\"$cluster\", metrics_source=~\"$metrics_source\", namespace=~\"$namespace\", kubernetes_pod_name=~\"$kubernetes_pod_name\", role=~\"$role\", stage!='storage_in_octas', mode=\"sequential\"}[$interval]) / rate(aptos_execution_txn_gas_count{ chain_name=~\"$chain_name\", cluster=~\"$cluster\", metrics_source=~\"$metrics_source\", namespace=~\"$namespace\", kubernetes_pod_name=~\"$kubernetes_pod_name\", role=~\"$role\", stage!='storage_in_octas', mode=\"sequential\"}[$interval]))", - "hide": false, - "legendFormat": "sequential_{{stage}}", - "range": true, - "refId": "B" - } - ], - "thresholds": [], - "timeRegions": [], - "title": "Avg txn gas", - "tooltip": { "shared": true, "sort": 0, "value_type": "individual" }, - "type": "graph", - "xaxis": { "format": "", "logBase": 0, "mode": "time", "show": true, "values": [] }, - "yaxes": [ - { "$$hashKey": "object:314", "format": "none", "logBase": 1, "show": true }, - { "$$hashKey": "object:315", "format": "short", "logBase": 1, "show": true } - ], - "yaxis": { "align": false } - }, - { - "aliasColors": {}, - "bars": false, - "dashLength": 10, - "dashes": false, - "datasource": { "type": "prometheus", "uid": "${Datasource}" }, - "description": "The per-block gas consumed by parallel execution (BlockSTM) or sequential execution.", - "editable": false, - "error": false, - "fieldConfig": { "defaults": { "unit": "" }, "overrides": [] }, - "fill": 0, - "fillGradient": 0, - "gridPos": { "h": 8, "w": 8, "x": 8, "y": 108 }, - "hiddenSeries": false, - "id": 60, - "legend": { - "alignAsTable": false, - "avg": false, - "current": false, - "hideEmpty": false, - "hideZero": false, - "max": false, - "min": false, - "rightSide": false, - "show": true, - "total": false, - "values": false - }, - "lines": true, - "linewidth": 1, - "nullPointMode": "null", - "options": { "alertThreshold": true }, - "percentage": false, - "pluginVersion": "10.2.0-59981", - "pointradius": 2, - "points": false, - "renderer": "flot", - "seriesOverrides": [], - "spaceLength": 10, - "span": 0, - "stack": false, - "steppedLine": false, - "targets": [ - { - "datasource": { "type": "prometheus", "uid": "${Datasource}" }, - "editorMode": "code", - "expr": "quantile (0.5, \n (\n max by (kubernetes_pod_name, role) (\n rate(aptos_execution_block_gas_sum{ chain_name=~\"$chain_name\", cluster=~\"$cluster\", metrics_source=~\"$metrics_source\", namespace=~\"$namespace\", kubernetes_pod_name=~\"$kubernetes_pod_name\", role=~\"$role\", stage='non_storage_gas'}[$interval])\n )\n / \n max by (kubernetes_pod_name, role) (\n rate(aptos_execution_block_gas_count{ chain_name=~\"$chain_name\", cluster=~\"$cluster\", metrics_source=~\"$metrics_source\", namespace=~\"$namespace\", kubernetes_pod_name=~\"$kubernetes_pod_name\", role=~\"$role\", stage='non_storage_gas'}[$interval])\n )\n )\n / \n (\n max by (kubernetes_pod_name, role) (\n rate(consensus_duration_sum{ chain_name=~\"$chain_name\", cluster=~\"$cluster\", metrics_source=~\"$metrics_source\", namespace=~\"$namespace\", kubernetes_pod_name=~\"$kubernetes_pod_name\", role=~\"$role\", op='execute_block'}[$interval])\n )\n / \n max by (kubernetes_pod_name, role) (\n rate(consensus_duration_count{ chain_name=~\"$chain_name\", cluster=~\"$cluster\", metrics_source=~\"$metrics_source\", namespace=~\"$namespace\", kubernetes_pod_name=~\"$kubernetes_pod_name\", role=~\"$role\", op='execute_block'}[$interval])\n )\n )\n)", - "hide": false, - "legendFormat": "gas / s", - "range": true, - "refId": "A" - } - ], - "thresholds": [], - "timeRegions": [], - "title": "Execution speed - run-time gas / s", - "tooltip": { "shared": true, "sort": 0, "value_type": "individual" }, - "type": "graph", - "xaxis": { "format": "", "logBase": 0, "mode": "time", "show": true, "values": [] }, - "yaxes": [ - { "$$hashKey": "object:314", "format": "none", "logBase": 1, "show": true }, - { "$$hashKey": "object:315", "format": "short", "logBase": 1, "show": true } - ], - "yaxis": { "align": false } - }, - { - "aliasColors": {}, - "bars": false, - "dashLength": 10, - "dashes": false, - "datasource": { "type": "prometheus", "uid": "${Datasource}" }, - "description": "The rate of the parallel / sequential execution exceeding the per-block gas limit. ", - "editable": false, - "error": false, - "fieldConfig": { "defaults": { "unit": "" }, "overrides": [] }, - "fill": 0, - "fillGradient": 0, - "gridPos": { "h": 8, "w": 8, "x": 16, "y": 108 }, - "hiddenSeries": false, - "id": 53, - "legend": { - "alignAsTable": false, - "avg": false, - "current": false, - "hideEmpty": false, - "hideZero": false, - "max": false, - "min": false, - "rightSide": false, - "show": true, - "total": false, - "values": false - }, - "lines": true, - "linewidth": 1, - "nullPointMode": "null", - "options": { "alertThreshold": true }, - "percentage": false, - "pluginVersion": "10.2.0-59981", - "pointradius": 2, - "points": false, - "renderer": "flot", - "seriesOverrides": [], - "spaceLength": 10, - "span": 0, - "stack": false, - "steppedLine": false, - "targets": [ - { - "datasource": { "type": "prometheus", "uid": "${Datasource}" }, - "editorMode": "code", - "expr": "rate(aptos_execution_gas_limit_count{chain_name=~\"$chain_name\", cluster=~\"$cluster\", metrics_source=~\"$metrics_source\", namespace=~\"$namespace\", kubernetes_pod_name=~\"$kubernetes_pod_name\", role=~\"$role\", mode=\"parallel\"}[$interval])", - "format": "time_series", - "intervalFactor": 1, - "legendFormat": "parallel execution", - "range": true, - "refId": "A" - }, - { - "datasource": { "type": "prometheus", "uid": "${Datasource}" }, - "editorMode": "code", - "expr": "rate(aptos_execution_gas_limit_count{chain_name=~\"$chain_name\", cluster=~\"$cluster\", metrics_source=~\"$metrics_source\", namespace=~\"$namespace\", kubernetes_pod_name=~\"$kubernetes_pod_name\", role=~\"$role\", mode=\"sequential\"}[$interval])", - "format": "time_series", - "hide": false, - "intervalFactor": 1, - "legendFormat": "sequential execution", - "range": true, - "refId": "B" - } - ], - "thresholds": [], - "timeRegions": [], - "title": "Execution exceed block gas limit rate", - "tooltip": { "shared": true, "sort": 0, "value_type": "individual" }, - "type": "graph", - "xaxis": { "format": "", "logBase": 0, "mode": "time", "show": true, "values": [] }, - "yaxes": [ - { "$$hashKey": "object:125", "format": "none", "label": "aborts / s", "logBase": 1, "show": true }, - { "$$hashKey": "object:126", "format": "short", "logBase": 1, "show": true } - ], - "yaxis": { "align": false } - }, - { - "aliasColors": {}, - "bars": false, - "dashLength": 10, - "dashes": false, - "datasource": { "type": "prometheus", "uid": "${Datasource}" }, - "description": "The average time spent to execute a block", - "editable": false, - "error": false, - "fieldConfig": { "defaults": { "unit": "" }, "overrides": [] }, - "fill": 0, - "fillGradient": 0, - "gridPos": { "h": 8, "w": 8, "x": 0, "y": 116 }, - "hiddenSeries": false, - "id": 63, - "legend": { - "alignAsTable": false, - "avg": false, - "current": false, - "hideEmpty": false, - "hideZero": false, - "max": false, - "min": false, - "rightSide": false, - "show": false, - "total": false, - "values": false - }, - "lines": true, - "linewidth": 1, - "nullPointMode": "null", - "options": { "alertThreshold": true }, - "percentage": false, - "pluginVersion": "10.2.0-59981", - "pointradius": 2, - "points": false, - "renderer": "flot", - "seriesOverrides": [], - "spaceLength": 10, - "span": 0, - "stack": false, - "steppedLine": false, - "targets": [ - { - "datasource": { "type": "prometheus", "uid": "${Datasource}" }, - "editorMode": "code", - "expr": "rate(aptos_execution_transaction_shuffle_seconds_sum{chain_name=~\"$chain_name\", cluster=~\"$cluster\", metrics_source=~\"$metrics_source\", namespace=~\"$namespace\", kubernetes_pod_name=~\"$kubernetes_pod_name\", role=~\"$role\"}[$interval]) / rate(aptos_execution_transaction_shuffle_seconds_count{chain_name=~\"$chain_name\", cluster=~\"$cluster\", metrics_source=~\"$metrics_source\", namespace=~\"$namespace\", kubernetes_pod_name=~\"$kubernetes_pod_name\", role=~\"$role\"}[$interval])", - "format": "time_series", - "intervalFactor": 1, - "legendFormat": "{{kubernetes_pod_name}}-{{role}}", - "range": true, - "refId": "A" - } - ], - "thresholds": [], - "timeRegions": [], - "title": "Avg Txn shuffling time", - "tooltip": { "shared": true, "sort": 0, "value_type": "individual" }, - "type": "graph", - "xaxis": { "format": "", "logBase": 0, "mode": "time", "show": true, "values": [] }, - "yaxes": [ - { "format": "µs", "logBase": 1, "show": true }, - { "format": "short", "logBase": 1, "show": true } - ], - "yaxis": { "align": false } - }, - { - "aliasColors": {}, - "bars": false, - "dashLength": 10, - "dashes": false, - "datasource": { "type": "prometheus", "uid": "${Datasource}" }, - "description": "The average time spent to dedup the txns in a block", - "editable": false, - "error": false, - "fieldConfig": { "defaults": { "unit": "" }, "overrides": [] }, - "fill": 0, - "fillGradient": 0, - "gridPos": { "h": 8, "w": 8, "x": 8, "y": 116 }, - "hiddenSeries": false, - "id": 70, - "legend": { - "alignAsTable": false, - "avg": false, - "current": false, - "hideEmpty": false, - "hideZero": false, - "max": false, - "min": false, - "rightSide": false, - "show": false, - "total": false, - "values": false - }, - "lines": true, - "linewidth": 1, - "nullPointMode": "null", - "options": { "alertThreshold": true }, - "percentage": false, - "pluginVersion": "10.2.0-59981", - "pointradius": 2, - "points": false, - "renderer": "flot", - "seriesOverrides": [], - "spaceLength": 10, - "span": 0, - "stack": false, - "steppedLine": false, - "targets": [ - { - "datasource": { "type": "prometheus", "uid": "${Datasource}" }, - "editorMode": "code", - "expr": "rate(aptos_execution_transaction_dedup_seconds_sum{chain_name=~\"$chain_name\", cluster=~\"$cluster\", metrics_source=~\"$metrics_source\", namespace=~\"$namespace\", kubernetes_pod_name=~\"$kubernetes_pod_name\", role=~\"$role\"}[$interval]) / rate(aptos_execution_transaction_dedup_seconds_count{chain_name=~\"$chain_name\", cluster=~\"$cluster\", metrics_source=~\"$metrics_source\", namespace=~\"$namespace\", kubernetes_pod_name=~\"$kubernetes_pod_name\", role=~\"$role\"}[$interval])", - "format": "time_series", - "intervalFactor": 1, - "legendFormat": "{{kubernetes_pod_name}}-{{role}}", - "range": true, - "refId": "A" - } - ], - "thresholds": [], - "timeRegions": [], - "title": "Avg Txn dedup time", - "tooltip": { "shared": true, "sort": 0, "value_type": "individual" }, - "type": "graph", - "xaxis": { "format": "", "logBase": 0, "mode": "time", "show": true, "values": [] }, - "yaxes": [ - { "format": "µs", "logBase": 1, "show": true }, - { "format": "short", "logBase": 1, "show": true } - ], - "yaxis": { "align": false } - }, - { - "aliasColors": {}, - "bars": false, - "dashLength": 10, - "dashes": false, - "datasource": { "type": "prometheus", "uid": "${Datasource}" }, - "description": "The time spent waiting for batches, per second", - "editable": false, - "error": false, - "fieldConfig": { "defaults": { "unit": "" }, "overrides": [] }, - "fill": 0, - "fillGradient": 0, - "gridPos": { "h": 8, "w": 8, "x": 16, "y": 116 }, - "hiddenSeries": false, - "id": 71, - "legend": { - "alignAsTable": false, - "avg": false, - "current": false, - "hideEmpty": false, - "hideZero": false, - "max": false, - "min": false, - "rightSide": false, - "show": false, - "total": false, - "values": false - }, - "lines": true, - "linewidth": 1, - "nullPointMode": "null", - "options": { "alertThreshold": true }, - "percentage": false, - "pluginVersion": "10.2.0-59981", - "pointradius": 2, - "points": false, - "renderer": "flot", - "seriesOverrides": [], - "spaceLength": 10, - "span": 0, - "stack": false, - "steppedLine": false, - "targets": [ - { - "datasource": { "type": "prometheus", "uid": "${Datasource}" }, - "editorMode": "code", - "expr": "rate(aptos_consensus_batch_wait_duration_sum{chain_name=~\"$chain_name\", cluster=~\"$cluster\", metrics_source=~\"$metrics_source\", namespace=~\"$namespace\", kubernetes_pod_name=~\"$kubernetes_pod_name\", role=~\"$role\"}[$interval])", - "format": "time_series", - "intervalFactor": 1, - "legendFormat": "{{kubernetes_pod_name}}-{{role}}", - "range": true, - "refId": "A" - } - ], - "thresholds": [], - "timeRegions": [], - "title": "Batch wait duration (per s)", - "tooltip": { "shared": true, "sort": 0, "value_type": "individual" }, - "type": "graph", - "xaxis": { "format": "", "logBase": 0, "mode": "time", "show": true, "values": [] }, - "yaxes": [ - { "format": "µs", "logBase": 1, "show": true }, - { "format": "short", "logBase": 1, "show": true } - ], - "yaxis": { "align": false } - }, - { - "collapsed": false, - "gridPos": { "h": 1, "w": 24, "x": 0, "y": 124 }, - "id": 65, - "panels": [], - "title": "Sharded Execution", - "type": "row" - }, - { - "aliasColors": {}, - "bars": false, - "dashLength": 10, - "dashes": false, - "datasource": { "type": "prometheus", "uid": "${Datasource}" }, - "description": "", - "editable": false, - "error": false, - "fieldConfig": { "defaults": { "unit": "none" }, "overrides": [] }, - "fill": 0, - "fillGradient": 0, - "gridPos": { "h": 8, "w": 8, "x": 0, "y": 125 }, - "hiddenSeries": false, - "id": 66, - "legend": { - "alignAsTable": false, - "avg": false, - "current": false, - "hideEmpty": false, - "hideZero": false, - "max": false, - "min": false, - "rightSide": false, - "show": false, - "total": false, - "values": false - }, - "lines": true, - "linewidth": 1, - "nullPointMode": "null", - "options": { "alertThreshold": true }, - "percentage": false, - "pluginVersion": "10.2.0-59981", - "pointradius": 2, - "points": false, - "renderer": "flot", - "seriesOverrides": [], - "spaceLength": 10, - "span": 0, - "stack": false, - "steppedLine": false, - "targets": [ - { - "datasource": { "type": "prometheus", "uid": "${Datasource}" }, - "editorMode": "code", - "expr": " avg by (round_id)(max by(shard_id, round_id) (rate(sharded_block_execution_by_rounds_seconds_sum{ chain_name=~\"$chain_name\", cluster=~\"$cluster\", metrics_source=~\"$metrics_source\", namespace=~\"$namespace\", kubernetes_pod_name=~\"$kubernetes_pod_name\", role=~\"$role\"}[$interval]) ))", - "hide": false, - "legendFormat": "sub_block_execution_{{round_id}}", - "range": true, - "refId": "D" - }, - { - "datasource": { "type": "prometheus", "uid": "${Datasource}" }, - "editorMode": "code", - "expr": "rate(sharded_block_execution_seconds_sum{ chain_name=~\"$chain_name\", cluster=~\"$cluster\", metrics_source=~\"$metrics_source\", namespace=~\"$namespace\", kubernetes_pod_name=~\"$kubernetes_pod_name\", role=~\"$role\"}[$interval])", - "hide": false, - "legendFormat": "sharded_execution", - "range": true, - "refId": "A" - }, - { - "datasource": { "type": "prometheus", "uid": "${Datasource}" }, - "editorMode": "code", - "expr": "rate(sharded_execution_result_aggregation_seconds_sum{ chain_name=~\"$chain_name\", cluster=~\"$cluster\", metrics_source=~\"$metrics_source\", namespace=~\"$namespace\", kubernetes_pod_name=~\"$kubernetes_pod_name\", role=~\"$role\"}[$interval])", - "hide": false, - "legendFormat": "aggregation_result", - "range": true, - "refId": "B" - }, - { - "datasource": { "type": "prometheus", "uid": "${Datasource}" }, - "editorMode": "code", - "expr": "rate(wait_for_sharded_output_seconds_sum{ chain_name=~\"$chain_name\", cluster=~\"$cluster\", metrics_source=~\"$metrics_source\", namespace=~\"$namespace\", kubernetes_pod_name=~\"$kubernetes_pod_name\", role=~\"$role\"}[$interval])", - "hide": false, - "legendFormat": "wait_for_sharded_output", - "range": true, - "refId": "C" - } - ], - "thresholds": [], - "timeRegions": [], - "title": "Sharded execution time in 1s", - "tooltip": { "shared": true, "sort": 0, "value_type": "individual" }, - "type": "graph", - "xaxis": { "format": "", "logBase": 0, "mode": "time", "show": true, "values": [] }, - "yaxes": [ - { "$$hashKey": "object:140", "format": "none", "logBase": 1, "show": true }, - { "$$hashKey": "object:141", "format": "short", "logBase": 1, "show": true } - ], - "yaxis": { "align": false } - }, - { - "aliasColors": {}, - "bars": false, - "dashLength": 10, - "dashes": false, - "datasource": { "type": "prometheus", "uid": "${Datasource}" }, - "description": "", - "editable": false, - "error": false, - "fieldConfig": { "defaults": { "unit": "none" }, "overrides": [] }, - "fill": 0, - "fillGradient": 0, - "gridPos": { "h": 8, "w": 8, "x": 8, "y": 125 }, - "hiddenSeries": false, - "id": 68, - "legend": { - "alignAsTable": false, - "avg": false, - "current": false, - "hideEmpty": false, - "hideZero": false, - "max": false, - "min": false, - "rightSide": false, - "show": false, - "total": false, - "values": false - }, - "lines": true, - "linewidth": 1, - "nullPointMode": "null", - "options": { "alertThreshold": true }, - "percentage": false, - "pluginVersion": "10.2.0-59981", - "pointradius": 2, - "points": false, - "renderer": "flot", - "seriesOverrides": [], - "spaceLength": 10, - "span": 0, - "stack": false, - "steppedLine": false, - "targets": [ - { - "datasource": { "type": "prometheus", "uid": "${Datasource}" }, - "editorMode": "code", - "expr": "sum by (round_id) (sum by (round_id, shard_id) (rate(sharded_block_executor_txn_count_sum{ chain_name=~\"$chain_name\", cluster=~\"$cluster\", metrics_source=~\"$metrics_source\", namespace=~\"$namespace\", kubernetes_pod_name=~\"$kubernetes_pod_name\", role=~\"$role\"}[$interval]) ) / sum by(round_id, shard_id) (rate(sharded_block_execution_by_rounds_seconds_sum{ chain_name=~\"$chain_name\", cluster=~\"$cluster\", metrics_source=~\"$metrics_source\", namespace=~\"$namespace\", kubernetes_pod_name=~\"$kubernetes_pod_name\", role=~\"$role\"}[$interval]) ) )", - "hide": false, - "legendFormat": "{{round_id}}", - "range": true, - "refId": "D" - } - ], - "thresholds": [], - "timeRegions": [], - "title": "Sharded execution TPS by round", - "tooltip": { "shared": true, "sort": 0, "value_type": "individual" }, - "type": "graph", - "xaxis": { "format": "", "logBase": 0, "mode": "time", "show": true, "values": [] }, - "yaxes": [ - { "$$hashKey": "object:140", "format": "none", "logBase": 1, "show": true }, - { "$$hashKey": "object:141", "format": "short", "logBase": 1, "show": true } - ], - "yaxis": { "align": false } - }, - { - "aliasColors": {}, - "bars": false, - "dashLength": 10, - "dashes": false, - "datasource": { "type": "prometheus", "uid": "${Datasource}" }, - "description": "", - "editable": false, - "error": false, - "fieldConfig": { "defaults": { "unit": "none" }, "overrides": [] }, - "fill": 0, - "fillGradient": 0, - "gridPos": { "h": 8, "w": 8, "x": 16, "y": 125 }, - "hiddenSeries": false, - "id": 67, - "legend": { - "alignAsTable": false, - "avg": false, - "current": false, - "hideEmpty": false, - "hideZero": false, - "max": false, - "min": false, - "rightSide": false, - "show": false, - "total": false, - "values": false - }, - "lines": true, - "linewidth": 1, - "nullPointMode": "null", - "options": { "alertThreshold": true }, - "percentage": false, - "pluginVersion": "10.2.0-59981", - "pointradius": 2, - "points": false, - "renderer": "flot", - "seriesOverrides": [], - "spaceLength": 10, - "span": 0, - "stack": false, - "steppedLine": false, - "targets": [ - { - "datasource": { "type": "prometheus", "uid": "${Datasource}" }, - "editorMode": "code", - "expr": "sum by (round_id) (avg by (round_id, shard_id) (rate(sharded_block_executor_txn_count_sum{ chain_name=~\"$chain_name\", cluster=~\"$cluster\", metrics_source=~\"$metrics_source\", namespace=~\"$namespace\", kubernetes_pod_name=~\"$kubernetes_pod_name\", role=~\"$role\"}[$interval]) ) )", - "hide": false, - "legendFormat": "{{round_id}}", - "range": true, - "refId": "D" - } - ], - "thresholds": [], - "timeRegions": [], - "title": "(to fix) Sharded execution transaction counts by round", - "tooltip": { "shared": true, "sort": 0, "value_type": "individual" }, - "type": "graph", - "xaxis": { "format": "", "logBase": 0, "mode": "time", "show": true, "values": [] }, - "yaxes": [ - { "$$hashKey": "object:140", "format": "none", "logBase": 1, "show": true }, - { "$$hashKey": "object:141", "format": "short", "logBase": 1, "show": true } - ], - "yaxis": { "align": false } - }, - { - "aliasColors": {}, - "bars": false, - "dashLength": 10, - "dashes": false, - "datasource": { "type": "prometheus", "uid": "${Datasource}" }, - "description": "", - "editable": false, - "error": false, - "fieldConfig": { "defaults": { "unit": "none" }, "overrides": [] }, - "fill": 0, - "fillGradient": 0, - "gridPos": { "h": 8, "w": 8, "x": 0, "y": 133 }, - "hiddenSeries": false, - "id": 69, - "legend": { - "alignAsTable": false, - "avg": false, - "current": false, - "hideEmpty": false, - "hideZero": false, - "max": false, - "min": false, - "rightSide": false, - "show": false, - "total": false, - "values": false - }, - "lines": true, - "linewidth": 1, - "nullPointMode": "null", - "options": { "alertThreshold": true }, - "percentage": false, - "pluginVersion": "10.2.0-59422pre", - "pointradius": 2, - "points": false, - "renderer": "flot", - "seriesOverrides": [], - "spaceLength": 10, - "span": 0, - "stack": false, - "steppedLine": false, - "targets": [ - { - "datasource": { "type": "prometheus", "uid": "${Datasource}" }, - "editorMode": "code", - "expr": " avg by (round_id)(max by(shard_id, round_id) (rate(sharded_block_execution_by_rounds_seconds_sum{ chain_name=~\"$chain_name\", cluster=~\"$cluster\", metrics_source=~\"$metrics_source\", namespace=~\"$namespace\", kubernetes_pod_name=~\"$kubernetes_pod_name\", role=~\"$role\"}[$interval]) ))", - "hide": false, - "legendFormat": "sub_block_execution_{{round_id}}", - "range": true, - "refId": "D" - }, - { - "datasource": { "type": "prometheus", "uid": "${Datasource}" }, - "editorMode": "code", - "expr": "rate(sharded_block_execution_seconds_sum{ chain_name=~\"$chain_name\", cluster=~\"$cluster\", metrics_source=~\"$metrics_source\", namespace=~\"$namespace\", kubernetes_pod_name=~\"$kubernetes_pod_name\", role=~\"$role\"}[$interval])", - "hide": false, - "legendFormat": "sharded_execution", - "range": true, - "refId": "A" - }, - { - "datasource": { "type": "prometheus", "uid": "${Datasource}" }, - "editorMode": "code", - "expr": "rate(sharded_execution_result_aggregation_seconds_sum{ chain_name=~\"$chain_name\", cluster=~\"$cluster\", metrics_source=~\"$metrics_source\", namespace=~\"$namespace\", kubernetes_pod_name=~\"$kubernetes_pod_name\", role=~\"$role\"}[$interval])", - "hide": false, - "legendFormat": "aggregation_result", - "range": true, - "refId": "B" - }, - { - "datasource": { "type": "prometheus", "uid": "${Datasource}" }, - "editorMode": "code", - "expr": "rate(wait_for_sharded_output_seconds_sum{ chain_name=~\"$chain_name\", cluster=~\"$cluster\", metrics_source=~\"$metrics_source\", namespace=~\"$namespace\", kubernetes_pod_name=~\"$kubernetes_pod_name\", role=~\"$role\"}[$interval])", - "hide": false, - "legendFormat": "wait_for_sharded_output", - "range": true, - "refId": "C" - }, - { - "datasource": { "type": "prometheus", "uid": "${Datasource}" }, - "editorMode": "code", - "expr": "avg(rate(drop_state_view_seconds_sum{ chain_name=~\"$chain_name\", cluster=~\"$cluster\", metrics_source=~\"$metrics_source\", namespace=~\"$namespace\", kubernetes_pod_name=~\"$kubernetes_pod_name\", role=~\"$role\"}[$interval]))", - "hide": false, - "legendFormat": "drop_state_view", - "range": true, - "refId": "E" - }, - { - "datasource": { "type": "prometheus", "uid": "${Datasource}" }, - "editorMode": "code", - "expr": "max(avg by(shard_id) (rate(execute_shard_command_seconds_sum{ chain_name=~\"$chain_name\", cluster=~\"$cluster\", metrics_source=~\"$metrics_source\", namespace=~\"$namespace\", kubernetes_pod_name=~\"$kubernetes_pod_name\", role=~\"$role\"}[$interval])))", - "hide": false, - "legendFormat": "execute_shard_command_seconds", - "range": true, - "refId": "F" - } - ], - "thresholds": [], - "timeRegions": [], - "title": "(test-only)Sharded execution time in 1s", - "tooltip": { "shared": true, "sort": 0, "value_type": "individual" }, - "type": "graph", - "xaxis": { "format": "", "logBase": 0, "mode": "time", "show": true, "values": [] }, - "yaxes": [ - { "$$hashKey": "object:140", "format": "none", "logBase": 1, "show": true }, - { "$$hashKey": "object:141", "format": "short", "logBase": 1, "show": true } - ], - "yaxis": { "align": false } + "title": "Sharded Execution", + "type": "row" } ], "refresh": false, @@ -3931,6 +3938,6 @@ "timezone": "", "title": "execution", "uid": "execution", - "version": 45, + "version": 46, "weekStart": "" } diff --git a/dashboards/execution.json.gz b/dashboards/execution.json.gz index 52dce5287d9a2..9dc61835820de 100644 Binary files a/dashboards/execution.json.gz and b/dashboards/execution.json.gz differ diff --git a/dashboards/fullnodes.json b/dashboards/fullnodes.json index 14eadad2dcd4a..db3db84e60eaa 100644 --- a/dashboards/fullnodes.json +++ b/dashboards/fullnodes.json @@ -54,77 +54,69 @@ "error": false, "gridPos": { "h": 1, "w": 24, "x": 0, "y": 0 }, "id": 7, - "isNew": false, "panels": [], "span": 0, "title": "State Sync", "type": "row" }, { - "aliasColors": {}, - "bars": false, - "dashLength": 10, - "dashes": false, "datasource": { "type": "prometheus", "uid": "${Datasource}" }, - "description": "Ledger timestamp's lag compared to the nodes current time.", - "editable": false, - "error": false, - "fill": 0, - "fillGradient": 0, + "description": "", + "fieldConfig": { + "defaults": { + "color": { "mode": "palette-classic" }, + "custom": { + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "axisShow": false, + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { "legend": false, "tooltip": false, "viz": false }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { "type": "linear" }, + "showPoints": "never", + "spanNulls": false, + "stacking": { "group": "A", "mode": "none" }, + "thresholdsStyle": { "mode": "off" } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { "color": "green", "value": null }, + { "color": "red", "value": 80 } + ] + }, + "unit": "none" + }, + "overrides": [] + }, "gridPos": { "h": 8, "w": 12, "x": 0, "y": 1 }, - "hiddenSeries": false, "id": 29, - "isNew": false, - "legend": { - "alignAsTable": false, - "avg": false, - "current": false, - "hideEmpty": false, - "hideZero": false, - "max": false, - "min": false, - "rightSide": false, - "show": false, - "total": false, - "values": false + "options": { + "legend": { "calcs": [], "displayMode": "list", "placement": "bottom", "showLegend": false }, + "tooltip": { "mode": "multi", "sort": "none" } }, - "lines": true, - "linewidth": 1, - "nullPointMode": "null", - "options": { "alertThreshold": true }, - "percentage": false, - "pluginVersion": "9.1.1", - "pointradius": 2, - "points": false, - "renderer": "flot", - "seriesOverrides": [], - "spaceLength": 10, - "span": 0, - "stack": false, - "steppedLine": false, + "pluginVersion": "10.2.0-60139", "targets": [ { - "expr": "(aptos_state_sync_timestamp{chain_name=~\"$chain_name\", cluster=~\"$cluster\", metrics_source=~\"$metrics_source\", namespace=~\"$namespace\", type=\"real\", kubernetes_pod_name=~\".*fullnode.*\", kubernetes_pod_name=~\"$kubernetes_pod_name\"} - on(kubernetes_pod_name, kubernetes_pod_name) aptos_state_sync_timestamp{chain_name=~\"$chain_name\", cluster=~\"$cluster\", metrics_source=~\"$metrics_source\", namespace=~\"$namespace\", type=\"synced\", kubernetes_pod_name=~\".*fullnode.*\", kubernetes_pod_name=~\"$kubernetes_pod_name\"})", + "datasource": { "type": "prometheus", "uid": "fHo-R604z" }, + "editorMode": "code", + "expr": "aptos_state_sync_version{type=\"synced\", kubernetes_pod_name=~\"$kubernetes_pod_name\", kubernetes_pod_name=~\".*fullnode.*\", chain_name=~\"$chain_name\", cluster=~\"$cluster\", metrics_source=~\"$metrics_source\", namespace=~\"$namespace\"}", "legendFormat": "{{kubernetes_pod_name}}-{{kubernetes_pod_name}}", + "range": true, "refId": "A" - }, - { - "expr": "(aptos_state_sync_timestamp{chain_name=~\"$chain_name\", cluster=~\"$cluster\", metrics_source=~\"$metrics_source\", namespace=~\"$namespace\", type=\"real\", job=~\".*fullnode.*\", kubernetes_pod_name=~\"$kubernetes_pod_name\"} - on(kubernetes_pod_name, job) aptos_state_sync_timestamp{chain_name=~\"$chain_name\", cluster=~\"$cluster\", metrics_source=~\"$metrics_source\", namespace=~\"$namespace\", type=\"synced\", job=~\".*fullnode.*\", kubernetes_pod_name=~\"$kubernetes_pod_name\"})", - "legendFormat": "{{kubernetes_pod_name}}-{{job}}", - "refId": "B" } ], - "thresholds": [], - "timeRegions": [], - "title": "State Sync Lag", - "tooltip": { "shared": true, "sort": 0, "value_type": "individual" }, - "type": "graph", - "xaxis": { "format": "", "logBase": 0, "mode": "time", "show": true, "values": [] }, - "yaxes": [ - { "format": "ms", "logBase": 1, "show": true }, - { "format": "short", "logBase": 1, "show": true } - ], - "yaxis": { "align": false } + "title": "State Sync Version", + "type": "timeseries" }, { "aliasColors": {}, @@ -132,7 +124,7 @@ "dashLength": 10, "dashes": false, "datasource": { "type": "prometheus", "uid": "${Datasource}" }, - "description": "1 = It's fully synchronized\n0 = It's NOT synchronized", + "description": "", "editable": false, "error": false, "fill": 0, @@ -140,7 +132,6 @@ "gridPos": { "h": 8, "w": 12, "x": 12, "y": 1 }, "hiddenSeries": false, "id": 33, - "isNew": false, "legend": { "alignAsTable": false, "avg": false, @@ -159,7 +150,7 @@ "nullPointMode": "null", "options": { "alertThreshold": true }, "percentage": false, - "pluginVersion": "9.1.1", + "pluginVersion": "10.2.0-60139", "pointradius": 0.5, "points": false, "renderer": "flot", @@ -170,19 +161,17 @@ "steppedLine": false, "targets": [ { - "expr": "(aptos_state_sync_version{chain_name=~\"$chain_name\", cluster=~\"$cluster\", metrics_source=~\"$metrics_source\", namespace=~\"$namespace\", type=\"highest\", kubernetes_pod_name=~\".*fullnode.*\", kubernetes_pod_name=~\"$kubernetes_pod_name\"} - on(kubernetes_pod_name, kubernetes_pod_name) aptos_state_sync_version{chain_name=~\"$chain_name\", cluster=~\"$cluster\", metrics_source=~\"$metrics_source\", namespace=~\"$namespace\", type=\"committed\", kubernetes_pod_name=~\".*fullnode.*\", kubernetes_pod_name=~\"$kubernetes_pod_name\"}) < bool 15", + "datasource": { "type": "prometheus", "uid": "fHo-R604z" }, + "editorMode": "code", + "expr": "count(aptos_data_client_highest_advertised_data{chain_name=~\"$chain_name\", cluster=~\"$cluster\", metrics_source=~\"$metrics_source\", namespace=~\"$namespace\", kubernetes_pod_name=~\"$kubernetes_pod_name\", role=\"validator_fullnode\", data_type=\"transactions\"} - on(kubernetes_pod_name,role,run_uuid) aptos_state_sync_version{chain_name=~\"$chain_name\", cluster=~\"$cluster\", namespace=~\"telemetry-service\", kubernetes_pod_name=~\"$kubernetes_pod_name\", role=\"validator_fullnode\", type=\"synced\"} > 50000) OR on() vector(0)", "legendFormat": "{{kubernetes_pod_name}}-{{kubernetes_pod_name}}", + "range": true, "refId": "A" - }, - { - "expr": "(aptos_state_sync_version{chain_name=~\"$chain_name\", cluster=~\"$cluster\", metrics_source=~\"$metrics_source\", namespace=~\"$namespace\", type=\"highest\", job=~\".*fullnode.*\", kubernetes_pod_name=~\"$kubernetes_pod_name\"} - on(kubernetes_pod_name, job) aptos_state_sync_version{chain_name=~\"$chain_name\", cluster=~\"$cluster\", metrics_source=~\"$metrics_source\", namespace=~\"$namespace\", type=\"committed\", job=~\".*fullnode.*\", kubernetes_pod_name=~\"$kubernetes_pod_name\"}) < bool 15", - "legendFormat": "{{kubernetes_pod_name}}-{{job}}", - "refId": "B" } ], "thresholds": [], "timeRegions": [], - "title": "Synchronized", + "title": "Fullnodes lagging behind", "tooltip": { "shared": true, "sort": 0, "value_type": "individual" }, "type": "graph", "xaxis": { "format": "", "logBase": 0, "mode": "time", "show": true, "values": [] }, @@ -206,7 +195,6 @@ "gridPos": { "h": 8, "w": 12, "x": 0, "y": 9 }, "hiddenSeries": false, "id": 2, - "isNew": false, "legend": { "alignAsTable": false, "avg": false, @@ -225,7 +213,7 @@ "nullPointMode": "null", "options": { "alertThreshold": true }, "percentage": false, - "pluginVersion": "9.1.1", + "pluginVersion": "10.2.0-60139", "pointradius": 2, "points": false, "renderer": "flot", @@ -272,7 +260,6 @@ "gridPos": { "h": 8, "w": 12, "x": 12, "y": 9 }, "hiddenSeries": false, "id": 13, - "isNew": false, "legend": { "alignAsTable": false, "avg": false, @@ -291,7 +278,7 @@ "nullPointMode": "null", "options": { "alertThreshold": true }, "percentage": false, - "pluginVersion": "9.1.1", + "pluginVersion": "10.2.0-60139", "pointradius": 2, "points": false, "renderer": "flot", @@ -302,19 +289,17 @@ "steppedLine": false, "targets": [ { - "expr": "sum(aptos_state_sync_active_upstream_peers{chain_name=~\"$chain_name\", cluster=~\"$cluster\", metrics_source=~\"$metrics_source\", namespace=~\"$namespace\", kubernetes_pod_name=~\".*fullnode.*\", kubernetes_pod_name=~\"$kubernetes_pod_name\"}) by (kubernetes_pod_name, kubernetes_pod_name) ", + "datasource": { "type": "prometheus", "uid": "fHo-R604z" }, + "editorMode": "code", + "expr": "sum by (kubernetes_pod_name, network_id)(aptos_connections{kubernetes_pod_name=~\"$kubernetes_pod_name\", kubernetes_pod_name=~\".*fullnode.*\", chain_name=~\"$chain_name\", cluster=~\"$cluster\", metrics_source=~\"$metrics_source\", namespace=~\"$namespace\"})", "legendFormat": "{{kubernetes_pod_name}}-{{kubernetes_pod_name}}", + "range": true, "refId": "A" - }, - { - "expr": "sum(aptos_state_sync_active_upstream_peers{chain_name=~\"$chain_name\", cluster=~\"$cluster\", metrics_source=~\"$metrics_source\", namespace=~\"$namespace\", job=~\".*fullnode.*\", kubernetes_pod_name=~\"$kubernetes_pod_name\"}) by (job, kubernetes_pod_name) ", - "legendFormat": "{{kubernetes_pod_name}}-{{job}}", - "refId": "B" } ], "thresholds": [], "timeRegions": [], - "title": "State Sync Upstream Peers", + "title": "Fullnode Upstream Peers", "tooltip": { "shared": true, "sort": 0, "value_type": "individual" }, "type": "graph", "xaxis": { "format": "", "logBase": 0, "mode": "time", "show": true, "values": [] }, @@ -331,7 +316,6 @@ "error": false, "gridPos": { "h": 1, "w": 24, "x": 0, "y": 17 }, "id": 31, - "isNew": false, "panels": [], "span": 0, "title": "Mempool", @@ -351,7 +335,6 @@ "gridPos": { "h": 8, "w": 12, "x": 0, "y": 18 }, "hiddenSeries": false, "id": 26, - "isNew": false, "legend": { "alignAsTable": false, "avg": false, @@ -370,7 +353,7 @@ "nullPointMode": "null", "options": { "alertThreshold": true }, "percentage": false, - "pluginVersion": "9.1.1", + "pluginVersion": "10.2.0-60139", "pointradius": 2, "points": false, "renderer": "flot", @@ -417,7 +400,6 @@ "gridPos": { "h": 8, "w": 12, "x": 12, "y": 18 }, "hiddenSeries": false, "id": 34, - "isNew": false, "legend": { "alignAsTable": false, "avg": false, @@ -436,7 +418,7 @@ "nullPointMode": "null", "options": { "alertThreshold": true }, "percentage": false, - "pluginVersion": "9.1.1", + "pluginVersion": "10.2.0-60139", "pointradius": 2, "points": false, "renderer": "flot", @@ -476,7 +458,6 @@ "error": false, "gridPos": { "h": 1, "w": 24, "x": 0, "y": 26 }, "id": 22, - "isNew": false, "panels": [], "span": 0, "title": "Networking", @@ -492,9 +473,10 @@ "editable": false, "error": false, "fill": 0, + "fillGradient": 0, "gridPos": { "h": 8, "w": 12, "x": 0, "y": 27 }, + "hiddenSeries": false, "id": 24, - "isNew": false, "legend": { "alignAsTable": false, "avg": false, @@ -511,10 +493,13 @@ "lines": true, "linewidth": 1, "nullPointMode": "null", + "options": { "alertThreshold": true }, "percentage": false, + "pluginVersion": "10.2.0-60139", "pointradius": 2, "points": false, "renderer": "flot", + "seriesOverrides": [], "spaceLength": 10, "span": 0, "stack": false, @@ -526,14 +511,17 @@ "refId": "A" } ], + "thresholds": [], + "timeRegions": [], "title": "Network Connections (Incoming)", - "tooltip": { "shared": true, "value_type": "individual" }, + "tooltip": { "shared": true, "sort": 0, "value_type": "individual" }, "type": "graph", - "xaxis": { "format": "", "logBase": 0, "show": true }, + "xaxis": { "format": "", "logBase": 0, "mode": "time", "show": true, "values": [] }, "yaxes": [ { "format": "short", "logBase": 1, "show": true }, { "format": "short", "logBase": 1, "show": true } - ] + ], + "yaxis": { "align": false } }, { "aliasColors": {}, @@ -545,9 +533,10 @@ "editable": false, "error": false, "fill": 0, + "fillGradient": 0, "gridPos": { "h": 8, "w": 12, "x": 12, "y": 27 }, + "hiddenSeries": false, "id": 35, - "isNew": false, "legend": { "alignAsTable": false, "avg": false, @@ -564,10 +553,13 @@ "lines": true, "linewidth": 1, "nullPointMode": "null", + "options": { "alertThreshold": true }, "percentage": false, + "pluginVersion": "10.2.0-60139", "pointradius": 2, "points": false, "renderer": "flot", + "seriesOverrides": [], "spaceLength": 10, "span": 0, "stack": false, @@ -579,14 +571,17 @@ "refId": "A" } ], + "thresholds": [], + "timeRegions": [], "title": "Network Connections (Outgoing)", - "tooltip": { "shared": true, "value_type": "individual" }, + "tooltip": { "shared": true, "sort": 0, "value_type": "individual" }, "type": "graph", - "xaxis": { "format": "", "logBase": 0, "show": true }, + "xaxis": { "format": "", "logBase": 0, "mode": "time", "show": true, "values": [] }, "yaxes": [ { "format": "short", "logBase": 1, "show": true }, { "format": "short", "logBase": 1, "show": true } - ] + ], + "yaxis": { "align": false } }, { "aliasColors": {}, @@ -597,9 +592,10 @@ "editable": false, "error": false, "fill": 0, + "fillGradient": 0, "gridPos": { "h": 8, "w": 12, "x": 0, "y": 35 }, + "hiddenSeries": false, "id": 20, - "isNew": false, "legend": { "alignAsTable": false, "avg": false, @@ -616,34 +612,46 @@ "lines": true, "linewidth": 1, "nullPointMode": "null", + "options": { "alertThreshold": true }, "percentage": false, + "pluginVersion": "10.2.0-60139", "pointradius": 2, "points": false, "renderer": "flot", + "seriesOverrides": [], "spaceLength": 10, "span": 0, "stack": false, "steppedLine": false, "targets": [ { - "expr": "sum(irate(container_network_transmit_bytes_total{chain_name=~\"$chain_name\", cluster=~\"$cluster\", metrics_source=~\"$metrics_source\", namespace=~\"$namespace\", pod=~\"$kubernetes_pod_name.*fullnode.*\"}[$interval])) by (pod)", + "datasource": { "type": "prometheus", "uid": "fHo-R604z" }, + "editorMode": "code", + "expr": "sum(irate(container_network_transmit_bytes_total{chain_name=~\"$chain_name\", cluster=~\"$cluster\", metrics_source=~\"$metrics_source\"}[$interval])) by (instance)", "legendFormat": "{{pod}}", + "range": true, "refId": "A" }, { - "expr": "sum(irate(container_network_transmit_bytes_total{chain_name=~\"$chain_name\", cluster=~\"$cluster\", metrics_source=~\"$metrics_source\", namespace=~\"$namespace\", pod=~\"$kubernetes_pod_name.*fullnode.*\"}[$interval]))", + "datasource": { "type": "prometheus", "uid": "fHo-R604z" }, + "editorMode": "code", + "expr": "sum(irate(container_network_transmit_bytes_total{chain_name=~\"$chain_name\", cluster=~\"$cluster\", metrics_source=~\"$metrics_source\"}[$interval]))", "legendFormat": "total", + "range": true, "refId": "B" } ], + "thresholds": [], + "timeRegions": [], "title": "Transmit Bandwidth", - "tooltip": { "shared": true, "value_type": "individual" }, + "tooltip": { "shared": true, "sort": 0, "value_type": "individual" }, "type": "graph", - "xaxis": { "format": "", "logBase": 0, "show": true }, + "xaxis": { "format": "", "logBase": 0, "mode": "time", "show": true, "values": [] }, "yaxes": [ { "format": "Bps", "logBase": 1, "show": true }, { "format": "short", "logBase": 1, "show": true } - ] + ], + "yaxis": { "align": false } }, { "aliasColors": {}, @@ -654,9 +662,10 @@ "editable": false, "error": false, "fill": 0, + "fillGradient": 0, "gridPos": { "h": 8, "w": 12, "x": 12, "y": 35 }, + "hiddenSeries": false, "id": 19, - "isNew": false, "legend": { "alignAsTable": false, "avg": false, @@ -673,10 +682,13 @@ "lines": true, "linewidth": 1, "nullPointMode": "null", + "options": { "alertThreshold": true }, "percentage": false, + "pluginVersion": "10.2.0-60139", "pointradius": 2, "points": false, "renderer": "flot", + "seriesOverrides": [], "spaceLength": 10, "span": 0, "stack": false, @@ -693,14 +705,17 @@ "refId": "B" } ], + "thresholds": [], + "timeRegions": [], "title": "Receive Bandwidth", - "tooltip": { "shared": true, "value_type": "individual" }, + "tooltip": { "shared": true, "sort": 0, "value_type": "individual" }, "type": "graph", - "xaxis": { "format": "", "logBase": 0, "show": true }, + "xaxis": { "format": "", "logBase": 0, "mode": "time", "show": true, "values": [] }, "yaxes": [ { "format": "Bps", "logBase": 1, "show": true }, { "format": "short", "logBase": 1, "show": true } - ] + ], + "yaxis": { "align": false } }, { "collapsed": false, @@ -709,7 +724,6 @@ "error": false, "gridPos": { "h": 1, "w": 24, "x": 0, "y": 43 }, "id": 9, - "isNew": false, "panels": [], "span": 0, "title": "System", @@ -721,12 +735,14 @@ "dashLength": 10, "dashes": false, "datasource": { "type": "prometheus", "uid": "${Datasource}" }, + "description": "", "editable": false, "error": false, "fill": 0, + "fillGradient": 0, "gridPos": { "h": 8, "w": 12, "x": 0, "y": 44 }, + "hiddenSeries": false, "id": 5, - "isNew": false, "legend": { "alignAsTable": false, "avg": false, @@ -743,34 +759,46 @@ "lines": true, "linewidth": 1, "nullPointMode": "null", + "options": { "alertThreshold": true }, "percentage": false, + "pluginVersion": "10.2.0-60139", "pointradius": 2, "points": false, "renderer": "flot", + "seriesOverrides": [], "spaceLength": 10, "span": 0, "stack": false, "steppedLine": false, "targets": [ { - "expr": "1 - kubelet_volume_stats_available_bytes{chain_name=~\"$chain_name\", cluster=~\"$cluster\", metrics_source=~\"$metrics_source\", namespace=~\"$namespace\", persistentvolumeclaim=~\"fn.$kubernetes_pod_name.*\", kubernetes_pod_name=~\"$kubernetes_pod_name\"} / kubelet_volume_stats_capacity_bytes{chain_name=~\"$chain_name\", cluster=~\"$cluster\", metrics_source=~\"$metrics_source\", namespace=~\"$namespace\", persistentvolumeclaim=~\"fn.$kubernetes_pod_name.*\", kubernetes_pod_name=~\"$kubernetes_pod_name\"}", - "legendFormat": "{{persistentvolumeclaim}}", + "datasource": { "type": "prometheus", "uid": "fHo-R604z" }, + "editorMode": "code", + "expr": "1 - kubelet_volume_stats_available_bytes{cluster=~\"$cluster\", metrics_source=~\"$metrics_source\", namespace=~\"$namespace\", persistentvolumeclaim=~\"fn.$kubernetes_pod_name.*\", kubernetes_pod_name=~\"$kubernetes_pod_name\"} / kubelet_volume_stats_capacity_bytes{cluster=~\"$cluster\", metrics_source=~\"$metrics_source\", namespace=~\"$namespace\", persistentvolumeclaim=~\"fn.$kubernetes_pod_name.*\", kubernetes_pod_name=~\"$kubernetes_pod_name\"}", + "legendFormat": "{{persistentvolumeclaim}}:{{namespace}}", + "range": true, "refId": "A" }, { - "expr": "1 - kubelet_volume_stats_available_bytes{chain_name=~\"$chain_name\", cluster=~\"$cluster\", metrics_source=~\"$metrics_source\", namespace=~\"$namespace\", persistentvolumeclaim=~\"$kubernetes_pod_name.*fullnode.*\", kubernetes_pod_name!~\"val.*\"} / kubelet_volume_stats_capacity_bytes{chain_name=~\"$chain_name\", cluster=~\"$cluster\", metrics_source=~\"$metrics_source\", namespace=~\"$namespace\", persistentvolumeclaim=~\"$kubernetes_pod_name.*fullnode.*\", kubernetes_pod_name!~\"val.*\"}", - "legendFormat": "{{persistentvolumeclaim}}", + "datasource": { "type": "prometheus", "uid": "fHo-R604z" }, + "editorMode": "code", + "expr": "1 - kubelet_volume_stats_available_bytes{cluster=~\"$cluster\", metrics_source=~\"$metrics_source\", namespace=~\"$namespace\", persistentvolumeclaim=~\"$kubernetes_pod_name.*fullnode.*\", kubernetes_pod_name!~\"val.*\"} / kubelet_volume_stats_capacity_bytes{cluster=~\"$cluster\", metrics_source=~\"$metrics_source\", namespace=~\"$namespace\", persistentvolumeclaim=~\"$kubernetes_pod_name.*fullnode.*\", kubernetes_pod_name!~\"val.*\"}", + "legendFormat": "{{persistentvolumeclaim}}:{{namespace}}", + "range": true, "refId": "B" } ], - "title": "Percentage Disk Used", - "tooltip": { "shared": true, "value_type": "individual" }, + "thresholds": [], + "timeRegions": [], + "title": "Percentage Disk Used (global)", + "tooltip": { "shared": true, "sort": 0, "value_type": "individual" }, "type": "graph", - "xaxis": { "format": "", "logBase": 0, "show": true }, + "xaxis": { "format": "", "logBase": 0, "mode": "time", "show": true, "values": [] }, "yaxes": [ { "format": "percentunit", "logBase": 1, "show": true }, { "format": "short", "logBase": 1, "show": true } - ] + ], + "yaxis": { "align": false } }, { "aliasColors": {}, @@ -781,9 +809,10 @@ "editable": false, "error": false, "fill": 0, + "fillGradient": 0, "gridPos": { "h": 8, "w": 12, "x": 12, "y": 44 }, + "hiddenSeries": false, "id": 11, - "isNew": false, "legend": { "alignAsTable": false, "avg": false, @@ -800,29 +829,38 @@ "lines": true, "linewidth": 1, "nullPointMode": "null", + "options": { "alertThreshold": true }, "percentage": false, + "pluginVersion": "10.2.0-60139", "pointradius": 2, "points": false, "renderer": "flot", + "seriesOverrides": [], "spaceLength": 10, "span": 0, "stack": false, "steppedLine": false, "targets": [ { - "expr": "container_memory_working_set_bytes{container=\"fullnode\", pod=~\"$kubernetes_pod_name.*\", job=\"kubernetes-cadvisor\"}", + "datasource": { "type": "prometheus", "uid": "fHo-R604z" }, + "editorMode": "code", + "expr": "container_memory_working_set_bytes{container=\"fullnode\", pod=~\"$kubernetes_pod_name.*\", job=\"kubernetes-cadvisor\", cluster=\"$cluster\"}", "legendFormat": "{{pod}}", + "range": true, "refId": "A" } ], + "thresholds": [], + "timeRegions": [], "title": "Memory Usage", - "tooltip": { "shared": true, "value_type": "individual" }, + "tooltip": { "shared": true, "sort": 0, "value_type": "individual" }, "type": "graph", - "xaxis": { "format": "", "logBase": 0, "show": true }, + "xaxis": { "format": "", "logBase": 0, "mode": "time", "show": true, "values": [] }, "yaxes": [ { "format": "bytes", "logBase": 1, "show": true }, { "format": "short", "logBase": 1, "show": true } - ] + ], + "yaxis": { "align": false } }, { "aliasColors": {}, @@ -833,9 +871,10 @@ "editable": false, "error": false, "fill": 0, + "fillGradient": 0, "gridPos": { "h": 8, "w": 12, "x": 0, "y": 52 }, + "hiddenSeries": false, "id": 17, - "isNew": false, "legend": { "alignAsTable": false, "avg": false, @@ -852,34 +891,46 @@ "lines": true, "linewidth": 1, "nullPointMode": "null", + "options": { "alertThreshold": true }, "percentage": false, + "pluginVersion": "10.2.0-60139", "pointradius": 2, "points": false, "renderer": "flot", + "seriesOverrides": [], "spaceLength": 10, "span": 0, "stack": false, "steppedLine": false, "targets": [ { - "expr": "rate(container_cpu_usage_seconds_total{container=\"fullnode\", kubernetes_pod_name=~\"$kubernetes_pod_name\"}[$interval])", + "datasource": { "type": "prometheus", "uid": "fHo-R604z" }, + "editorMode": "code", + "expr": "rate(container_cpu_usage_seconds_total{container=\"fullnode\", kubernetes_pod_name=~\"$kubernetes_pod_name\", cluster=\"$cluster\"}[$interval])", "legendFormat": "{{kubernetes_pod_name}}-{{pod}}", + "range": true, "refId": "A" }, { - "expr": "rate(container_cpu_usage_seconds_total{container=\"fullnode\", pod=~\"pfn.*\"}[$interval])", + "datasource": { "type": "prometheus", "uid": "fHo-R604z" }, + "editorMode": "code", + "expr": "rate(container_cpu_usage_seconds_total{container=\"fullnode\", pod=~\"pfn.*\", cluster=\"$cluster\"}[$interval])", "legendFormat": "{{pod}}", + "range": true, "refId": "B" } ], + "thresholds": [], + "timeRegions": [], "title": "CPU Usage", - "tooltip": { "shared": true, "value_type": "individual" }, + "tooltip": { "shared": true, "sort": 0, "value_type": "individual" }, "type": "graph", - "xaxis": { "format": "", "logBase": 0, "show": true }, + "xaxis": { "format": "", "logBase": 0, "mode": "time", "show": true, "values": [] }, "yaxes": [ { "format": "short", "logBase": 1, "show": true }, { "format": "short", "logBase": 1, "show": true } - ] + ], + "yaxis": { "align": false } }, { "aliasColors": {}, @@ -890,9 +941,10 @@ "editable": false, "error": false, "fill": 0, + "fillGradient": 0, "gridPos": { "h": 8, "w": 12, "x": 12, "y": 52 }, + "hiddenSeries": false, "id": 15, - "isNew": false, "legend": { "alignAsTable": false, "avg": false, @@ -909,33 +961,42 @@ "lines": true, "linewidth": 1, "nullPointMode": "null", + "options": { "alertThreshold": true }, "percentage": false, + "pluginVersion": "10.2.0-60139", "pointradius": 2, "points": false, "renderer": "flot", + "seriesOverrides": [], "spaceLength": 10, "span": 0, "stack": false, "steppedLine": false, "targets": [ { - "expr": "time() - container_start_time_seconds{container=\"fullnode\", pod=~\"$kubernetes_pod_name.*\", job=\"kubernetes-cadvisor\"}", + "datasource": { "type": "prometheus", "uid": "fHo-R604z" }, + "editorMode": "code", + "expr": "time() - container_start_time_seconds{container=\"fullnode\", pod=~\"$kubernetes_pod_name.*\", job=\"kubernetes-cadvisor\", cluster=\"$cluster\"}", "legendFormat": "{{pod}}", + "range": true, "refId": "A" } ], + "thresholds": [], + "timeRegions": [], "title": "Uptime", - "tooltip": { "shared": true, "value_type": "individual" }, + "tooltip": { "shared": true, "sort": 0, "value_type": "individual" }, "type": "graph", - "xaxis": { "format": "", "logBase": 0, "show": true }, + "xaxis": { "format": "", "logBase": 0, "mode": "time", "show": true, "values": [] }, "yaxes": [ { "format": "s", "logBase": 1, "show": true }, { "format": "short", "logBase": 1, "show": true } - ] + ], + "yaxis": { "align": false } } ], - "schemaVersion": 37, - "style": "dark", + "refresh": "", + "schemaVersion": 38, "tags": [], "templating": { "list": [ @@ -966,7 +1027,8 @@ "allFormat": "", "allValue": ".*", "current": { "text": ["telemetry-service"], "value": "telemetry-service" }, - "datasource": null, + "datasource": { "type": "prometheus", "uid": "fHo-R604z" }, + "definition": "", "hide": 0, "includeAll": true, "label": "", @@ -980,16 +1042,18 @@ }, "refresh": 1, "regex": "", + "skipUrlSync": false, "sort": 0, "type": "query" }, { "allFormat": "", - "allValue": "", + "allValue": ".*", "current": { "text": ["mainnet"], "value": "mainnet" }, "datasource": { "type": "prometheus", "uid": "${Datasource}" }, + "definition": "", "hide": 0, - "includeAll": false, + "includeAll": true, "label": "", "multi": false, "multiFormat": "", @@ -1001,6 +1065,7 @@ }, "refresh": 1, "regex": "", + "skipUrlSync": false, "sort": 1, "type": "query" }, @@ -1008,7 +1073,8 @@ "allFormat": "", "allValue": ".*", "current": { "text": ["All"], "value": "$__all" }, - "datasource": null, + "datasource": { "type": "prometheus", "uid": "fHo-R604z" }, + "definition": "", "hide": 0, "includeAll": true, "label": "", @@ -1022,6 +1088,7 @@ }, "refresh": 1, "regex": "", + "skipUrlSync": false, "sort": 0, "type": "query" }, @@ -1030,6 +1097,7 @@ "allValue": ".*", "current": { "text": ["All"], "value": "$__all" }, "datasource": { "type": "prometheus", "uid": "${Datasource}" }, + "definition": "", "hide": 0, "includeAll": true, "label": "", @@ -1043,6 +1111,7 @@ }, "refresh": 1, "regex": "", + "skipUrlSync": false, "sort": 1, "type": "query" }, @@ -1051,6 +1120,7 @@ "allValue": ".*", "current": { "text": ["All"], "value": ["$__all"] }, "datasource": { "type": "prometheus", "uid": "${Datasource}" }, + "definition": "", "hide": 0, "includeAll": true, "label": "", @@ -1064,6 +1134,7 @@ }, "refresh": 1, "regex": "", + "skipUrlSync": false, "sort": 1, "type": "query" }, @@ -1102,6 +1173,6 @@ "timezone": "", "title": "fullnodes", "uid": "fullnodes", - "version": 3, + "version": 12, "weekStart": "" } diff --git a/dashboards/fullnodes.json.gz b/dashboards/fullnodes.json.gz index a376d37313eb2..0c8c10ad26d96 100644 Binary files a/dashboards/fullnodes.json.gz and b/dashboards/fullnodes.json.gz differ diff --git a/dashboards/state-sync-v2.json b/dashboards/state-sync-v2.json index 7371aba2b03ea..5ff49be99fd4d 100644 --- a/dashboards/state-sync-v2.json +++ b/dashboards/state-sync-v2.json @@ -54,11 +54,13 @@ "axisColorMode": "text", "axisLabel": "", "axisPlacement": "auto", + "axisShow": false, "barAlignment": 0, "drawStyle": "line", "fillOpacity": 0, "gradientMode": "none", "hideFrom": { "legend": false, "tooltip": false, "viz": false }, + "insertNulls": false, "lineInterpolation": "linear", "lineWidth": 1, "pointSize": 5, @@ -126,7 +128,7 @@ "showUnfilled": true, "valueMode": "color" }, - "pluginVersion": "10.0.3-cloud.3.cec3ab0e", + "pluginVersion": "10.2.0-60139", "targets": [ { "datasource": { "type": "prometheus", "uid": "${Datasource}" }, @@ -151,11 +153,13 @@ "axisColorMode": "text", "axisLabel": "", "axisPlacement": "auto", + "axisShow": false, "barAlignment": 0, "drawStyle": "line", "fillOpacity": 0, "gradientMode": "none", "hideFrom": { "legend": false, "tooltip": false, "viz": false }, + "insertNulls": false, "lineInterpolation": "linear", "lineWidth": 1, "pointSize": 5, @@ -208,11 +212,13 @@ "axisColorMode": "text", "axisLabel": "", "axisPlacement": "auto", + "axisShow": false, "barAlignment": 0, "drawStyle": "line", "fillOpacity": 0, "gradientMode": "none", "hideFrom": { "legend": false, "tooltip": false, "viz": false }, + "insertNulls": false, "lineInterpolation": "linear", "lineWidth": 1, "pointSize": 5, @@ -272,11 +278,13 @@ "axisColorMode": "text", "axisLabel": "", "axisPlacement": "auto", + "axisShow": false, "barAlignment": 0, "drawStyle": "line", "fillOpacity": 0, "gradientMode": "none", "hideFrom": { "legend": false, "tooltip": false, "viz": false }, + "insertNulls": false, "lineInterpolation": "linear", "lineWidth": 1, "pointSize": 5, @@ -329,11 +337,13 @@ "axisColorMode": "text", "axisLabel": "", "axisPlacement": "auto", + "axisShow": false, "barAlignment": 0, "drawStyle": "line", "fillOpacity": 0, "gradientMode": "none", "hideFrom": { "legend": false, "tooltip": false, "viz": false }, + "insertNulls": false, "lineInterpolation": "linear", "lineWidth": 1, "pointSize": 5, @@ -384,11 +394,13 @@ "axisColorMode": "text", "axisLabel": "", "axisPlacement": "auto", + "axisShow": false, "barAlignment": 0, "drawStyle": "line", "fillOpacity": 0, "gradientMode": "none", "hideFrom": { "legend": false, "tooltip": false, "viz": false }, + "insertNulls": false, "lineInterpolation": "linear", "lineWidth": 1, "pointSize": 5, @@ -439,11 +451,13 @@ "axisColorMode": "text", "axisLabel": "", "axisPlacement": "auto", + "axisShow": false, "barAlignment": 0, "drawStyle": "line", "fillOpacity": 0, "gradientMode": "none", "hideFrom": { "legend": false, "tooltip": false, "viz": false }, + "insertNulls": false, "lineInterpolation": "linear", "lineWidth": 1, "pointSize": 5, @@ -494,11 +508,13 @@ "axisColorMode": "text", "axisLabel": "", "axisPlacement": "auto", + "axisShow": false, "barAlignment": 0, "drawStyle": "line", "fillOpacity": 20, "gradientMode": "scheme", "hideFrom": { "legend": false, "tooltip": false, "viz": false }, + "insertNulls": false, "lineInterpolation": "smooth", "lineWidth": 3, "pointSize": 5, @@ -597,11 +613,13 @@ "axisColorMode": "text", "axisLabel": "", "axisPlacement": "auto", + "axisShow": false, "barAlignment": 0, "drawStyle": "line", "fillOpacity": 20, "gradientMode": "scheme", "hideFrom": { "legend": false, "tooltip": false, "viz": false }, + "insertNulls": false, "lineInterpolation": "smooth", "lineWidth": 3, "pointSize": 5, @@ -760,11 +778,13 @@ "axisColorMode": "text", "axisLabel": "", "axisPlacement": "auto", + "axisShow": false, "barAlignment": 0, "drawStyle": "line", "fillOpacity": 0, "gradientMode": "none", "hideFrom": { "legend": false, "tooltip": false, "viz": false }, + "insertNulls": false, "lineInterpolation": "linear", "lineWidth": 1, "pointSize": 5, @@ -816,11 +836,13 @@ "axisColorMode": "text", "axisLabel": "", "axisPlacement": "auto", + "axisShow": false, "barAlignment": 0, "drawStyle": "line", "fillOpacity": 0, "gradientMode": "none", "hideFrom": { "legend": false, "tooltip": false, "viz": false }, + "insertNulls": false, "lineInterpolation": "linear", "lineWidth": 1, "pointSize": 5, @@ -871,11 +893,13 @@ "axisColorMode": "text", "axisLabel": "", "axisPlacement": "auto", + "axisShow": false, "barAlignment": 0, "drawStyle": "line", "fillOpacity": 0, "gradientMode": "none", "hideFrom": { "legend": false, "tooltip": false, "viz": false }, + "insertNulls": false, "lineInterpolation": "linear", "lineWidth": 1, "pointSize": 5, @@ -926,11 +950,13 @@ "axisColorMode": "text", "axisLabel": "", "axisPlacement": "auto", + "axisShow": false, "barAlignment": 0, "drawStyle": "line", "fillOpacity": 0, "gradientMode": "none", "hideFrom": { "legend": false, "tooltip": false, "viz": false }, + "insertNulls": false, "lineInterpolation": "linear", "lineWidth": 1, "pointSize": 5, @@ -981,11 +1007,13 @@ "axisColorMode": "text", "axisLabel": "", "axisPlacement": "auto", + "axisShow": false, "barAlignment": 0, "drawStyle": "line", "fillOpacity": 0, "gradientMode": "none", "hideFrom": { "legend": false, "tooltip": false, "viz": false }, + "insertNulls": false, "lineInterpolation": "linear", "lineWidth": 1, "pointSize": 5, @@ -1036,11 +1064,13 @@ "axisColorMode": "text", "axisLabel": "", "axisPlacement": "auto", + "axisShow": false, "barAlignment": 0, "drawStyle": "line", "fillOpacity": 0, "gradientMode": "none", "hideFrom": { "legend": false, "tooltip": false, "viz": false }, + "insertNulls": false, "lineInterpolation": "linear", "lineWidth": 1, "pointSize": 5, @@ -1784,13 +1814,7 @@ "thresholdsStyle": { "mode": "off" } }, "mappings": [], - "thresholds": { - "mode": "absolute", - "steps": [ - { "color": "green", "value": null }, - { "color": "red", "value": 80 } - ] - } + "thresholds": { "mode": "absolute", "steps": [{ "color": "green" }, { "color": "red", "value": 80 }] } }, "overrides": [] }, @@ -1839,13 +1863,7 @@ "thresholdsStyle": { "mode": "off" } }, "mappings": [], - "thresholds": { - "mode": "absolute", - "steps": [ - { "color": "green", "value": null }, - { "color": "red", "value": 80 } - ] - } + "thresholds": { "mode": "absolute", "steps": [{ "color": "green" }, { "color": "red", "value": 80 }] } }, "overrides": [] }, @@ -1894,13 +1912,7 @@ "thresholdsStyle": { "mode": "off" } }, "mappings": [], - "thresholds": { - "mode": "absolute", - "steps": [ - { "color": "green", "value": null }, - { "color": "red", "value": 80 } - ] - } + "thresholds": { "mode": "absolute", "steps": [{ "color": "green" }, { "color": "red", "value": 80 }] } }, "overrides": [] }, @@ -1949,13 +1961,7 @@ "thresholdsStyle": { "mode": "off" } }, "mappings": [], - "thresholds": { - "mode": "absolute", - "steps": [ - { "color": "green", "value": null }, - { "color": "red", "value": 80 } - ] - } + "thresholds": { "mode": "absolute", "steps": [{ "color": "green" }, { "color": "red", "value": 80 }] } }, "overrides": [] }, @@ -2004,13 +2010,7 @@ "thresholdsStyle": { "mode": "off" } }, "mappings": [], - "thresholds": { - "mode": "absolute", - "steps": [ - { "color": "green", "value": null }, - { "color": "red", "value": 80 } - ] - } + "thresholds": { "mode": "absolute", "steps": [{ "color": "green" }, { "color": "red", "value": 80 }] } }, "overrides": [] }, @@ -2059,13 +2059,7 @@ "thresholdsStyle": { "mode": "off" } }, "mappings": [], - "thresholds": { - "mode": "absolute", - "steps": [ - { "color": "green", "value": null }, - { "color": "red", "value": 80 } - ] - }, + "thresholds": { "mode": "absolute", "steps": [{ "color": "green" }, { "color": "red", "value": 80 }] }, "unit": "s" }, "overrides": [] @@ -2116,13 +2110,7 @@ "thresholdsStyle": { "mode": "off" } }, "mappings": [], - "thresholds": { - "mode": "absolute", - "steps": [ - { "color": "green", "value": null }, - { "color": "red", "value": 80 } - ] - }, + "thresholds": { "mode": "absolute", "steps": [{ "color": "green" }, { "color": "red", "value": 80 }] }, "unit": "s" }, "overrides": [ @@ -2191,13 +2179,7 @@ "thresholdsStyle": { "mode": "off" } }, "mappings": [], - "thresholds": { - "mode": "absolute", - "steps": [ - { "color": "green", "value": null }, - { "color": "red", "value": 80 } - ] - } + "thresholds": { "mode": "absolute", "steps": [{ "color": "green" }, { "color": "red", "value": 80 }] } }, "overrides": [] }, @@ -2228,13 +2210,7 @@ "defaults": { "color": { "mode": "continuous-GrYlRd" }, "mappings": [], - "thresholds": { - "mode": "absolute", - "steps": [ - { "color": "green", "value": null }, - { "color": "red", "value": 80 } - ] - } + "thresholds": { "mode": "absolute", "steps": [{ "color": "green" }, { "color": "red", "value": 80 }] } }, "overrides": [] }, @@ -2289,13 +2265,7 @@ "thresholdsStyle": { "mode": "off" } }, "mappings": [], - "thresholds": { - "mode": "absolute", - "steps": [ - { "color": "green", "value": null }, - { "color": "red", "value": 80 } - ] - } + "thresholds": { "mode": "absolute", "steps": [{ "color": "green" }, { "color": "red", "value": 80 }] } }, "overrides": [] }, @@ -3490,11 +3460,13 @@ "axisColorMode": "text", "axisLabel": "", "axisPlacement": "auto", + "axisShow": false, "barAlignment": 0, "drawStyle": "line", "fillOpacity": 0, "gradientMode": "none", "hideFrom": { "legend": false, "tooltip": false, "viz": false }, + "insertNulls": false, "lineInterpolation": "linear", "lineWidth": 1, "pointSize": 5, @@ -3505,12 +3477,18 @@ "thresholdsStyle": { "mode": "off" } }, "mappings": [], - "thresholds": { "mode": "absolute", "steps": [{ "color": "green" }, { "color": "red", "value": 80 }] } + "thresholds": { + "mode": "absolute", + "steps": [ + { "color": "green", "value": null }, + { "color": "red", "value": 80 } + ] + } }, "overrides": [] }, - "gridPos": { "h": 8, "w": 12, "x": 0, "y": 125 }, - "id": 104, + "gridPos": { "h": 8, "w": 12, "x": 0, "y": 77 }, + "id": 120, "options": { "legend": { "calcs": [], "displayMode": "list", "placement": "bottom", "showLegend": true }, "tooltip": { "mode": "single", "sort": "none" } @@ -3540,11 +3518,13 @@ "axisColorMode": "text", "axisLabel": "", "axisPlacement": "auto", + "axisShow": false, "barAlignment": 0, "drawStyle": "line", "fillOpacity": 20, "gradientMode": "scheme", "hideFrom": { "legend": false, "tooltip": false, "viz": false }, + "insertNulls": false, "lineInterpolation": "smooth", "lineWidth": 3, "pointSize": 5, @@ -3555,7 +3535,13 @@ "thresholdsStyle": { "mode": "off" } }, "mappings": [], - "thresholds": { "mode": "absolute", "steps": [{ "color": "green" }, { "color": "red", "value": 80 }] }, + "thresholds": { + "mode": "absolute", + "steps": [ + { "color": "green", "value": null }, + { "color": "red", "value": 80 } + ] + }, "unit": "s" }, "overrides": [ @@ -3569,7 +3555,7 @@ } ] }, - "gridPos": { "h": 8, "w": 12, "x": 12, "y": 125 }, + "gridPos": { "h": 8, "w": 12, "x": 12, "y": 77 }, "id": 102, "options": { "legend": { "calcs": [], "displayMode": "list", "placement": "bottom", "showLegend": true }, @@ -3598,6 +3584,141 @@ "title": "Propose to sync latencies (seconds)", "type": "timeseries" }, + { + "datasource": { "type": "prometheus", "uid": "${Datasource}" }, + "description": "The average latencies taken from when data is first proposed, to when it is finally synced.", + "fieldConfig": { + "defaults": { + "color": { "mode": "palette-classic" }, + "custom": { + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "axisShow": false, + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { "legend": false, "tooltip": false, "viz": false }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { "type": "linear" }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { "group": "A", "mode": "none" }, + "thresholdsStyle": { "mode": "off" } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { "color": "green", "value": null }, + { "color": "red", "value": 80 } + ] + } + }, + "overrides": [] + }, + "gridPos": { "h": 8, "w": 12, "x": 0, "y": 85 }, + "id": 104, + "options": { + "legend": { "calcs": [], "displayMode": "list", "placement": "bottom", "showLegend": true }, + "tooltip": { "mode": "single", "sort": "none" } + }, + "pluginVersion": "v1.0", + "targets": [ + { + "datasource": { "type": "prometheus", "uid": "${Datasource}" }, + "editorMode": "code", + "expr": "rate(aptos_data_client_sync_latencies_sum{chain_name=~\"$chain_name\", namespace=~\"$namespace\", kubernetes_pod_name=~\"$kubernetes_pod_name\", role=~\"$role\", label=\"propose_to_sync_latency\"}[1m])/rate(aptos_data_client_sync_latencies_count{chain_name=~\"$chain_name\", namespace=~\"$namespace\", kubernetes_pod_name=~\"$kubernetes_pod_name\", role=~\"$role\", label=\"propose_to_sync_latency\"}[1m])\n-\nquantile(0.67, rate(aptos_consensus_block_tracing_sum{chain_name=~\"$chain_name\", cluster=~\"$cluster\", metrics_source=~\"$metrics_source\", namespace=~\"$namespace\", role=\"validator\", stage=\"committed\"}[$interval]) / rate(aptos_consensus_block_tracing_count{chain_name=~\"$chain_name\", cluster=~\"$cluster\", metrics_source=~\"$metrics_source\", namespace=~\"$namespace\", role=\"validator\", stage=\"committed\"}[$interval])) < 1000000", + "legendFormat": "{{kubernetes_pod_name}}-{{role}}-{{label}}", + "range": true, + "refId": "A" + } + ], + "title": "Average propose to sync latencies: state sync only (seconds)", + "type": "timeseries" + }, + { + "datasource": { "type": "prometheus", "uid": "${Datasource}" }, + "description": "The latencies taken from when data is first proposed, to when it is finally synced.", + "fieldConfig": { + "defaults": { + "color": { "mode": "continuous-GrYlRd" }, + "custom": { + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "axisShow": false, + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 20, + "gradientMode": "scheme", + "hideFrom": { "legend": false, "tooltip": false, "viz": false }, + "insertNulls": false, + "lineInterpolation": "smooth", + "lineWidth": 3, + "pointSize": 5, + "scaleDistribution": { "type": "linear" }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { "group": "A", "mode": "none" }, + "thresholdsStyle": { "mode": "off" } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { "color": "green", "value": null }, + { "color": "red", "value": 80 } + ] + }, + "unit": "s" + }, + "overrides": [ + { + "__systemRef": "hideSeriesFrom", + "matcher": { + "id": "byNames", + "options": { "mode": "exclude", "names": ["Value"], "prefix": "All except:", "readOnly": true } + }, + "properties": [{ "id": "custom.hideFrom", "value": { "legend": false, "tooltip": false, "viz": true } }] + } + ] + }, + "gridPos": { "h": 8, "w": 12, "x": 12, "y": 85 }, + "id": 121, + "options": { + "legend": { "calcs": [], "displayMode": "list", "placement": "bottom", "showLegend": true }, + "tooltip": { "mode": "single", "sort": "none" } + }, + "pluginVersion": "v1.0", + "targets": [ + { + "datasource": { "type": "prometheus", "uid": "${Datasource}" }, + "editorMode": "code", + "expr": "histogram_quantile(0.99, sum(rate(aptos_data_client_sync_latencies_bucket{chain_name=~\"$chain_name\", namespace=~\"$namespace\", kubernetes_pod_name=~\"$kubernetes_pod_name\",role=~\"$role\",label=\"propose_to_sync_latency\"}[$__rate_interval])) by (le))\n-\nquantile(0.67, rate(aptos_consensus_block_tracing_sum{chain_name=~\"$chain_name\", cluster=~\"$cluster\", metrics_source=~\"$metrics_source\", namespace=~\"$namespace\", role=\"validator\", stage=\"committed\"}[$interval]) / rate(aptos_consensus_block_tracing_count{chain_name=~\"$chain_name\", cluster=~\"$cluster\", metrics_source=~\"$metrics_source\", namespace=~\"$namespace\", role=\"validator\", stage=\"committed\"}[$interval])) < 1000000", + "legendFormat": "P99", + "range": true, + "refId": "A" + }, + { + "datasource": { "type": "prometheus", "uid": "${Datasource}" }, + "editorMode": "code", + "expr": "histogram_quantile(0.70, sum(rate(aptos_data_client_sync_latencies_bucket{chain_name=~\"$chain_name\", namespace=~\"$namespace\", kubernetes_pod_name=~\"$kubernetes_pod_name\",role=~\"$role\",label=\"propose_to_sync_latency\"}[$__rate_interval])) by (le))\n-\nquantile(0.67, rate(aptos_consensus_block_tracing_sum{chain_name=~\"$chain_name\", cluster=~\"$cluster\", metrics_source=~\"$metrics_source\", namespace=~\"$namespace\", role=\"validator\", stage=\"committed\"}[$interval]) / rate(aptos_consensus_block_tracing_count{chain_name=~\"$chain_name\", cluster=~\"$cluster\", metrics_source=~\"$metrics_source\", namespace=~\"$namespace\", role=\"validator\", stage=\"committed\"}[$interval])) < 1000000", + "hide": false, + "legendFormat": "P70", + "range": true, + "refId": "B" + } + ], + "title": "Propose to sync latencies: state sync only (seconds)", + "type": "timeseries" + }, { "datasource": { "type": "prometheus", "uid": "${Datasource}" }, "description": "The average latencies taken from when data is first proposed, to when it is finally seen.", @@ -3609,11 +3730,13 @@ "axisColorMode": "text", "axisLabel": "", "axisPlacement": "auto", + "axisShow": false, "barAlignment": 0, "drawStyle": "line", "fillOpacity": 0, "gradientMode": "none", "hideFrom": { "legend": false, "tooltip": false, "viz": false }, + "insertNulls": false, "lineInterpolation": "linear", "lineWidth": 1, "pointSize": 5, @@ -3624,11 +3747,17 @@ "thresholdsStyle": { "mode": "off" } }, "mappings": [], - "thresholds": { "mode": "absolute", "steps": [{ "color": "green" }, { "color": "red", "value": 80 }] } + "thresholds": { + "mode": "absolute", + "steps": [ + { "color": "green", "value": null }, + { "color": "red", "value": 80 } + ] + } }, "overrides": [] }, - "gridPos": { "h": 8, "w": 12, "x": 0, "y": 133 }, + "gridPos": { "h": 8, "w": 12, "x": 0, "y": 93 }, "id": 106, "options": { "legend": { "calcs": [], "displayMode": "list", "placement": "bottom", "showLegend": true }, @@ -3659,11 +3788,13 @@ "axisColorMode": "text", "axisLabel": "", "axisPlacement": "auto", + "axisShow": false, "barAlignment": 0, "drawStyle": "line", "fillOpacity": 20, "gradientMode": "scheme", "hideFrom": { "legend": false, "tooltip": false, "viz": false }, + "insertNulls": false, "lineInterpolation": "smooth", "lineWidth": 3, "pointSize": 5, @@ -3674,7 +3805,13 @@ "thresholdsStyle": { "mode": "off" } }, "mappings": [], - "thresholds": { "mode": "absolute", "steps": [{ "color": "green" }, { "color": "red", "value": 80 }] }, + "thresholds": { + "mode": "absolute", + "steps": [ + { "color": "green", "value": null }, + { "color": "red", "value": 80 } + ] + }, "unit": "s" }, "overrides": [ @@ -3688,7 +3825,7 @@ } ] }, - "gridPos": { "h": 8, "w": 12, "x": 12, "y": 133 }, + "gridPos": { "h": 8, "w": 12, "x": 12, "y": 93 }, "id": 107, "options": { "legend": { "calcs": [], "displayMode": "list", "placement": "bottom", "showLegend": true }, @@ -3728,11 +3865,13 @@ "axisColorMode": "text", "axisLabel": "", "axisPlacement": "auto", + "axisShow": false, "barAlignment": 0, "drawStyle": "line", "fillOpacity": 0, "gradientMode": "none", "hideFrom": { "legend": false, "tooltip": false, "viz": false }, + "insertNulls": false, "lineInterpolation": "linear", "lineWidth": 1, "pointSize": 5, @@ -3743,11 +3882,17 @@ "thresholdsStyle": { "mode": "off" } }, "mappings": [], - "thresholds": { "mode": "absolute", "steps": [{ "color": "green" }, { "color": "red", "value": 80 }] } + "thresholds": { + "mode": "absolute", + "steps": [ + { "color": "green", "value": null }, + { "color": "red", "value": 80 } + ] + } }, "overrides": [] }, - "gridPos": { "h": 8, "w": 12, "x": 0, "y": 141 }, + "gridPos": { "h": 8, "w": 12, "x": 0, "y": 101 }, "id": 100, "options": { "legend": { "calcs": [], "displayMode": "list", "placement": "bottom", "showLegend": true }, @@ -3778,11 +3923,13 @@ "axisColorMode": "text", "axisLabel": "", "axisPlacement": "auto", + "axisShow": false, "barAlignment": 0, "drawStyle": "line", "fillOpacity": 20, "gradientMode": "scheme", "hideFrom": { "legend": false, "tooltip": false, "viz": false }, + "insertNulls": false, "lineInterpolation": "smooth", "lineWidth": 3, "pointSize": 5, @@ -3793,7 +3940,13 @@ "thresholdsStyle": { "mode": "off" } }, "mappings": [], - "thresholds": { "mode": "absolute", "steps": [{ "color": "green" }, { "color": "red", "value": 80 }] }, + "thresholds": { + "mode": "absolute", + "steps": [ + { "color": "green", "value": null }, + { "color": "red", "value": 80 } + ] + }, "unit": "s" }, "overrides": [ @@ -3807,7 +3960,7 @@ } ] }, - "gridPos": { "h": 8, "w": 12, "x": 12, "y": 141 }, + "gridPos": { "h": 8, "w": 12, "x": 12, "y": 101 }, "id": 105, "options": { "legend": { "calcs": [], "displayMode": "list", "placement": "bottom", "showLegend": true }, @@ -3843,7 +3996,6 @@ ], "refresh": "", "schemaVersion": 38, - "style": "dark", "tags": ["aptos-core"], "templating": { "list": [ @@ -4046,6 +4198,6 @@ "timezone": "", "title": "state-sync-v2", "uid": "state_sync_v2", - "version": 24, + "version": 27, "weekStart": "" } diff --git a/dashboards/state-sync-v2.json.gz b/dashboards/state-sync-v2.json.gz index 06462109b7d17..ecefbdd175f7d 100644 Binary files a/dashboards/state-sync-v2.json.gz and b/dashboards/state-sync-v2.json.gz differ diff --git a/dashboards/storage-overview.json b/dashboards/storage-overview.json index 41c45e92a298e..2dacfb2953316 100644 --- a/dashboards/storage-overview.json +++ b/dashboards/storage-overview.json @@ -44,10 +44,11 @@ "gridPos": { "h": 3, "w": 24, "x": 0, "y": 1 }, "id": 3266, "options": { + "code": { "language": "plaintext", "showLineNumbers": false, "showMiniMap": false }, "content": "These are basic facts that can be useful for understanding what's happening on chain right now.\r\n\r\nFollow the sections below this to examine the three major focus areas of the storage system health.\r\n", "mode": "markdown" }, - "pluginVersion": "9.1.1", + "pluginVersion": "10.2.0-60139", "type": "text" }, { @@ -61,11 +62,13 @@ "axisColorMode": "text", "axisLabel": "version", "axisPlacement": "auto", + "axisShow": false, "barAlignment": 0, "drawStyle": "line", "fillOpacity": 0, "gradientMode": "none", "hideFrom": { "legend": false, "tooltip": false, "viz": false }, + "insertNulls": false, "lineInterpolation": "linear", "lineStyle": { "fill": "solid" }, "lineWidth": 1, @@ -160,11 +163,13 @@ "axisColorMode": "text", "axisLabel": "", "axisPlacement": "auto", + "axisShow": false, "barAlignment": 0, "drawStyle": "line", "fillOpacity": 0, "gradientMode": "none", "hideFrom": { "legend": false, "tooltip": false, "viz": false }, + "insertNulls": false, "lineInterpolation": "linear", "lineWidth": 1, "pointSize": 5, @@ -176,7 +181,13 @@ }, "links": [], "mappings": [], - "thresholds": { "mode": "absolute", "steps": [{ "color": "green" }, { "color": "red", "value": 80 }] }, + "thresholds": { + "mode": "absolute", + "steps": [ + { "color": "green", "value": null }, + { "color": "red", "value": 80 } + ] + }, "unit": "none" }, "overrides": [ @@ -230,11 +241,13 @@ "axisColorMode": "text", "axisLabel": "", "axisPlacement": "auto", + "axisShow": false, "barAlignment": 0, "drawStyle": "line", "fillOpacity": 0, "gradientMode": "none", "hideFrom": { "legend": false, "tooltip": false, "viz": false }, + "insertNulls": false, "lineInterpolation": "linear", "lineWidth": 1, "pointSize": 5, @@ -247,7 +260,13 @@ "decimals": 2, "links": [], "mappings": [], - "thresholds": { "mode": "absolute", "steps": [{ "color": "green" }, { "color": "red", "value": 80 }] }, + "thresholds": { + "mode": "absolute", + "steps": [ + { "color": "green", "value": null }, + { "color": "red", "value": 80 } + ] + }, "unit": "none" }, "overrides": [ @@ -1142,989 +1161,1048 @@ "type": "row" }, { - "collapsed": true, + "collapsed": false, "gridPos": { "h": 1, "w": 24, "x": 0, "y": 21 }, "id": 3292, - "panels": [ - { - "datasource": { "type": "prometheus", "uid": "PCD0403638111AF12" }, - "gridPos": { "h": 7, "w": 24, "x": 0, "y": 3 }, - "id": 3254, - "options": { - "content": "# Are we the bottleneck?\n\nThese graphs show how much time was spent in execution/commit stages.\n**If any of them is close to 1 second, that pipeline stage might be the current bottlenect in the system.**\nTo further understand the bottleneck, go to \"Storage Details\" dashboard.\n\nNote: These numbers are measured on the block_executor/chunk_executor side. It's not a bad idea to cross check these numbers with the numbers measured in consensus/state_sync side. If the numbers don't match, it might be an indication of a scheduling issue.\n\nOther things to look at if these stages are slow:\n- IOPS\n- CPU usage\n- API latencies\n- Number of txns per save (block size and/or chunk size)", - "mode": "markdown" + "panels": [], + "title": "Performance", + "type": "row" + }, + { + "datasource": { "type": "prometheus", "uid": "PCD0403638111AF12" }, + "gridPos": { "h": 7, "w": 24, "x": 0, "y": 22 }, + "id": 3254, + "options": { + "code": { "language": "plaintext", "showLineNumbers": false, "showMiniMap": false }, + "content": "# Are we the bottleneck?\n\nThese graphs show how much time was spent in execution/commit stages.\n**If any of them is close to 1 second, that pipeline stage might be the current bottlenect in the system.**\nTo further understand the bottleneck, go to \"Storage Details\" dashboard.\n\nNote: These numbers are measured on the block_executor/chunk_executor side. It's not a bad idea to cross check these numbers with the numbers measured in consensus/state_sync side. If the numbers don't match, it might be an indication of a scheduling issue.\n\nOther things to look at if these stages are slow:\n- IOPS\n- CPU usage\n- API latencies\n- Number of txns per save (block size and/or chunk size)", + "mode": "markdown" + }, + "pluginVersion": "10.2.0-60139", + "type": "text" + }, + { + "datasource": { "type": "prometheus", "uid": "${Datasource}" }, + "fieldConfig": { + "defaults": { + "color": { "mode": "palette-classic" }, + "custom": { + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "axisShow": false, + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { "legend": false, "tooltip": false, "viz": false }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { "type": "linear" }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { "group": "A", "mode": "none" }, + "thresholdsStyle": { "mode": "line" } }, - "pluginVersion": "9.1.1", - "type": "text" + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { "color": "green", "value": null }, + { "color": "red", "value": 950 } + ] + }, + "unit": "s" }, + "overrides": [] + }, + "gridPos": { "h": 9, "w": 12, "x": 0, "y": 29 }, + "id": 3262, + "options": { + "legend": { "calcs": ["mean"], "displayMode": "table", "placement": "right", "showLegend": false }, + "tooltip": { "mode": "single", "sort": "none" } + }, + "targets": [ { "datasource": { "type": "prometheus", "uid": "${Datasource}" }, - "fieldConfig": { - "defaults": { - "color": { "mode": "palette-classic" }, - "custom": { - "axisCenteredZero": false, - "axisColorMode": "text", - "axisLabel": "", - "axisPlacement": "auto", - "barAlignment": 0, - "drawStyle": "line", - "fillOpacity": 0, - "gradientMode": "none", - "hideFrom": { "legend": false, "tooltip": false, "viz": false }, - "lineInterpolation": "linear", - "lineWidth": 1, - "pointSize": 5, - "scaleDistribution": { "type": "linear" }, - "showPoints": "auto", - "spanNulls": false, - "stacking": { "group": "A", "mode": "none" }, - "thresholdsStyle": { "mode": "line" } - }, - "mappings": [], - "thresholds": { "mode": "absolute", "steps": [{ "color": "green" }, { "color": "red", "value": 950 }] }, - "unit": "s" - }, - "overrides": [] - }, - "gridPos": { "h": 9, "w": 12, "x": 0, "y": 10 }, - "id": 3262, - "options": { - "legend": { "calcs": ["mean"], "displayMode": "table", "placement": "right", "showLegend": false }, - "tooltip": { "mode": "single", "sort": "none" } - }, - "targets": [ - { - "datasource": { "type": "prometheus", "uid": "${Datasource}" }, - "editorMode": "code", - "expr": "sort_desc(rate(aptos_executor_execute_block_seconds_sum{chain_name=~\"$chain_name\", cluster=~\"$cluster\", metrics_source=~\"$metrics_source\", namespace=~\"$namespace\", kubernetes_pod_name=~\"$kubernetes_pod_name\", role=~\"$role\"}[1m]))\r\n", - "legendFormat": "{{kubernetes_pod_name}}", - "range": true, - "refId": "A" - } - ], - "title": "Time Spent in Execution per Second (consensus)", - "type": "timeseries" + "editorMode": "code", + "expr": "sort_desc(rate(aptos_executor_execute_block_seconds_sum{chain_name=~\"$chain_name\", cluster=~\"$cluster\", metrics_source=~\"$metrics_source\", namespace=~\"$namespace\", kubernetes_pod_name=~\"$kubernetes_pod_name\", role=~\"$role\"}[1m]))\r\n", + "legendFormat": "{{kubernetes_pod_name}}", + "range": true, + "refId": "A" }, { "datasource": { "type": "prometheus", "uid": "${Datasource}" }, - "fieldConfig": { - "defaults": { - "color": { "mode": "palette-classic" }, - "custom": { - "axisCenteredZero": false, - "axisColorMode": "text", - "axisLabel": "", - "axisPlacement": "auto", - "barAlignment": 0, - "drawStyle": "line", - "fillOpacity": 0, - "gradientMode": "none", - "hideFrom": { "legend": false, "tooltip": false, "viz": false }, - "lineInterpolation": "linear", - "lineWidth": 1, - "pointSize": 5, - "scaleDistribution": { "type": "linear" }, - "showPoints": "auto", - "spanNulls": false, - "stacking": { "group": "A", "mode": "none" }, - "thresholdsStyle": { "mode": "off" } - }, - "mappings": [], - "thresholds": { "mode": "absolute", "steps": [{ "color": "green" }, { "color": "red", "value": 80 }] }, - "unit": "s" - }, - "overrides": [] + "editorMode": "code", + "expr": "sort_desc(rate(aptos_executor_ledger_update_seconds_sum{chain_name=~\"$chain_name\", cluster=~\"$cluster\", metrics_source=~\"$metrics_source\", namespace=~\"$namespace\", kubernetes_pod_name=~\"$kubernetes_pod_name\", role=~\"$role\"}[1m]))\r\n", + "hide": false, + "instant": false, + "legendFormat": "__auto", + "range": true, + "refId": "B" + } + ], + "title": "Time Spent in Execution per Second (consensus)", + "type": "timeseries" + }, + { + "datasource": { "type": "prometheus", "uid": "${Datasource}" }, + "fieldConfig": { + "defaults": { + "color": { "mode": "palette-classic" }, + "custom": { + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "axisShow": false, + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { "legend": false, "tooltip": false, "viz": false }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { "type": "linear" }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { "group": "A", "mode": "none" }, + "thresholdsStyle": { "mode": "off" } }, - "gridPos": { "h": 9, "w": 12, "x": 12, "y": 10 }, - "id": 3264, - "options": { - "legend": { "calcs": [], "displayMode": "table", "placement": "right", "showLegend": false }, - "tooltip": { "mode": "single", "sort": "none" } + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { "color": "green", "value": null }, + { "color": "red", "value": 80 } + ] }, - "targets": [ - { - "datasource": { "type": "prometheus", "uid": "${Datasource}" }, - "editorMode": "code", - "expr": "rate(aptos_executor_commit_blocks_seconds_sum{chain_name=~\"$chain_name\", cluster=~\"$cluster\", metrics_source=~\"$metrics_source\", namespace=~\"$namespace\", kubernetes_pod_name=~\"$kubernetes_pod_name\", role=~\"$role\"}[1m])", - "hide": false, - "legendFormat": "{{kubernetes_pod_name}}", - "range": true, - "refId": "A" - } - ], - "title": "Time Spent in Commit per Second (consensus)", - "type": "timeseries" + "unit": "s" }, + "overrides": [] + }, + "gridPos": { "h": 9, "w": 12, "x": 12, "y": 29 }, + "id": 3264, + "options": { + "legend": { "calcs": [], "displayMode": "table", "placement": "right", "showLegend": false }, + "tooltip": { "mode": "single", "sort": "none" } + }, + "targets": [ { "datasource": { "type": "prometheus", "uid": "${Datasource}" }, - "fieldConfig": { - "defaults": { - "color": { "mode": "palette-classic" }, - "custom": { - "axisCenteredZero": false, - "axisColorMode": "text", - "axisLabel": "", - "axisPlacement": "auto", - "barAlignment": 0, - "drawStyle": "line", - "fillOpacity": 0, - "gradientMode": "none", - "hideFrom": { "legend": false, "tooltip": false, "viz": false }, - "lineInterpolation": "linear", - "lineWidth": 1, - "pointSize": 5, - "scaleDistribution": { "type": "linear" }, - "showPoints": "auto", - "spanNulls": false, - "stacking": { "group": "A", "mode": "none" }, - "thresholdsStyle": { "mode": "off" } - }, - "mappings": [], - "thresholds": { "mode": "absolute", "steps": [{ "color": "green" }, { "color": "red", "value": 80 }] }, - "unit": "s" - }, - "overrides": [] + "editorMode": "code", + "expr": "rate(aptos_executor_commit_blocks_seconds_sum{chain_name=~\"$chain_name\", cluster=~\"$cluster\", metrics_source=~\"$metrics_source\", namespace=~\"$namespace\", kubernetes_pod_name=~\"$kubernetes_pod_name\", role=~\"$role\"}[1m])", + "hide": false, + "legendFormat": "{{kubernetes_pod_name}}", + "range": true, + "refId": "A" + } + ], + "title": "Time Spent in Commit per Second (consensus)", + "type": "timeseries" + }, + { + "datasource": { "type": "prometheus", "uid": "${Datasource}" }, + "fieldConfig": { + "defaults": { + "color": { "mode": "palette-classic" }, + "custom": { + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "axisShow": false, + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { "legend": false, "tooltip": false, "viz": false }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { "type": "linear" }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { "group": "A", "mode": "none" }, + "thresholdsStyle": { "mode": "off" } }, - "gridPos": { "h": 9, "w": 12, "x": 0, "y": 19 }, - "id": 3280, - "options": { - "legend": { "calcs": ["mean"], "displayMode": "table", "placement": "right", "showLegend": false }, - "tooltip": { "mode": "single", "sort": "none" } + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { "color": "green", "value": null }, + { "color": "red", "value": 80 } + ] }, - "targets": [ - { - "datasource": { "type": "prometheus", "uid": "${Datasource}" }, - "editorMode": "code", - "expr": "sort_desc(rate(aptos_executor_apply_chunk_seconds_sum{chain_name=~\"$chain_name\", cluster=~\"$cluster\", metrics_source=~\"$metrics_source\", namespace=~\"$namespace\", kubernetes_pod_name=~\"$kubernetes_pod_name\", role=~\"$role\"}[1m]))", - "legendFormat": "{{kubernetes_pod_name}} apply", - "range": true, - "refId": "A" - }, - { - "datasource": { "type": "prometheus", "uid": "${Datasource}" }, - "editorMode": "code", - "expr": "sort_desc(rate(aptos_executor_execute_chunk_seconds_sum{chain_name=~\"$chain_name\", cluster=~\"$cluster\", metrics_source=~\"$metrics_source\", namespace=~\"$namespace\", kubernetes_pod_name=~\"$kubernetes_pod_name\", role=~\"$role\"}[1m]))", - "hide": false, - "legendFormat": "{{kubernetes_pod_name}} execute", - "range": true, - "refId": "B" - } - ], - "title": "Time Spent in Execution/Apply per Second (state sync)", - "type": "timeseries" + "unit": "s" }, + "overrides": [] + }, + "gridPos": { "h": 9, "w": 12, "x": 0, "y": 38 }, + "id": 3280, + "options": { + "legend": { "calcs": ["mean"], "displayMode": "table", "placement": "right", "showLegend": false }, + "tooltip": { "mode": "single", "sort": "none" } + }, + "targets": [ { "datasource": { "type": "prometheus", "uid": "${Datasource}" }, - "fieldConfig": { - "defaults": { - "color": { "mode": "palette-classic" }, - "custom": { - "axisCenteredZero": false, - "axisColorMode": "text", - "axisLabel": "", - "axisPlacement": "auto", - "barAlignment": 0, - "drawStyle": "line", - "fillOpacity": 0, - "gradientMode": "none", - "hideFrom": { "legend": false, "tooltip": false, "viz": false }, - "lineInterpolation": "linear", - "lineWidth": 1, - "pointSize": 5, - "scaleDistribution": { "type": "linear" }, - "showPoints": "auto", - "spanNulls": false, - "stacking": { "group": "A", "mode": "none" }, - "thresholdsStyle": { "mode": "off" } - }, - "mappings": [], - "thresholds": { "mode": "absolute", "steps": [{ "color": "green" }, { "color": "red", "value": 80 }] }, - "unit": "s" - }, - "overrides": [] - }, - "gridPos": { "h": 9, "w": 12, "x": 12, "y": 19 }, - "id": 3284, - "options": { - "legend": { "calcs": [], "displayMode": "table", "placement": "right", "showLegend": false }, - "tooltip": { "mode": "single", "sort": "none" } - }, - "targets": [ - { - "datasource": { "type": "prometheus", "uid": "${Datasource}" }, - "editorMode": "code", - "expr": "rate(aptos_executor_commit_chunk_seconds_sum{chain_name=~\"$chain_name\", cluster=~\"$cluster\", metrics_source=~\"$metrics_source\", namespace=~\"$namespace\", kubernetes_pod_name=~\"$kubernetes_pod_name\", role=~\"$role\"}[1m])", - "hide": false, - "legendFormat": "{{kubernetes_pod_name}}", - "range": true, - "refId": "A" - } - ], - "title": "Time Spent in Commit per Second (state sync)", - "type": "timeseries" + "editorMode": "code", + "expr": "sort_desc(rate(aptos_executor_apply_chunk_seconds_sum{chain_name=~\"$chain_name\", cluster=~\"$cluster\", metrics_source=~\"$metrics_source\", namespace=~\"$namespace\", kubernetes_pod_name=~\"$kubernetes_pod_name\", role=~\"$role\"}[1m]))", + "legendFormat": "{{kubernetes_pod_name}} apply", + "range": true, + "refId": "A" }, { - "datasource": { "type": "prometheus", "uid": "PCD0403638111AF12" }, - "gridPos": { "h": 5, "w": 24, "x": 0, "y": 28 }, - "id": 3278, - "options": { - "content": "# Disk IO\n\nFor markdown syntax help: [commonmark.org/help](https://commonmark.org/help/)", - "mode": "markdown" + "datasource": { "type": "prometheus", "uid": "${Datasource}" }, + "editorMode": "code", + "expr": "sort_desc(rate(aptos_executor_execute_chunk_seconds_sum{chain_name=~\"$chain_name\", cluster=~\"$cluster\", metrics_source=~\"$metrics_source\", namespace=~\"$namespace\", kubernetes_pod_name=~\"$kubernetes_pod_name\", role=~\"$role\"}[1m]))", + "hide": false, + "legendFormat": "{{kubernetes_pod_name}} execute", + "range": true, + "refId": "B" + } + ], + "title": "Time Spent in Execution/Apply per Second (state sync)", + "type": "timeseries" + }, + { + "datasource": { "type": "prometheus", "uid": "${Datasource}" }, + "fieldConfig": { + "defaults": { + "color": { "mode": "palette-classic" }, + "custom": { + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "axisShow": false, + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { "legend": false, "tooltip": false, "viz": false }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { "type": "linear" }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { "group": "A", "mode": "none" }, + "thresholdsStyle": { "mode": "off" } }, - "pluginVersion": "9.1.1", - "type": "text" + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { "color": "green", "value": null }, + { "color": "red", "value": 80 } + ] + }, + "unit": "s" }, + "overrides": [] + }, + "gridPos": { "h": 9, "w": 12, "x": 12, "y": 38 }, + "id": 3284, + "options": { + "legend": { "calcs": [], "displayMode": "table", "placement": "right", "showLegend": false }, + "tooltip": { "mode": "single", "sort": "none" } + }, + "targets": [ { "datasource": { "type": "prometheus", "uid": "${Datasource}" }, - "fieldConfig": { - "defaults": { - "color": { "mode": "palette-classic" }, - "custom": { - "axisCenteredZero": false, - "axisColorMode": "text", - "axisLabel": "", - "axisPlacement": "auto", - "barAlignment": 0, - "drawStyle": "line", - "fillOpacity": 0, - "gradientMode": "none", - "hideFrom": { "legend": false, "tooltip": false, "viz": false }, - "lineInterpolation": "linear", - "lineWidth": 1, - "pointSize": 5, - "scaleDistribution": { "type": "linear" }, - "showPoints": "auto", - "spanNulls": false, - "stacking": { "group": "A", "mode": "none" }, - "thresholdsStyle": { "mode": "off" } - }, - "mappings": [], - "thresholds": { "mode": "absolute", "steps": [{ "color": "green" }, { "color": "red", "value": 80 }] } - }, - "overrides": [] - }, - "gridPos": { "h": 10, "w": 12, "x": 0, "y": 33 }, - "id": 3272, - "options": { - "legend": { "calcs": [], "displayMode": "table", "placement": "right", "showLegend": false }, - "tooltip": { "mode": "single", "sort": "none" } + "editorMode": "code", + "expr": "rate(aptos_executor_commit_chunk_seconds_sum{chain_name=~\"$chain_name\", cluster=~\"$cluster\", metrics_source=~\"$metrics_source\", namespace=~\"$namespace\", kubernetes_pod_name=~\"$kubernetes_pod_name\", role=~\"$role\"}[1m])", + "hide": false, + "legendFormat": "{{kubernetes_pod_name}}", + "range": true, + "refId": "A" + } + ], + "title": "Time Spent in Commit per Second (state sync)", + "type": "timeseries" + }, + { + "datasource": { "type": "prometheus", "uid": "PCD0403638111AF12" }, + "gridPos": { "h": 5, "w": 24, "x": 0, "y": 47 }, + "id": 3278, + "options": { + "code": { "language": "plaintext", "showLineNumbers": false, "showMiniMap": false }, + "content": "# Disk IO\n\nFor markdown syntax help: [commonmark.org/help](https://commonmark.org/help/)", + "mode": "markdown" + }, + "pluginVersion": "10.2.0-60139", + "type": "text" + }, + { + "datasource": { "type": "prometheus", "uid": "${Datasource}" }, + "fieldConfig": { + "defaults": { + "color": { "mode": "palette-classic" }, + "custom": { + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "axisShow": false, + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { "legend": false, "tooltip": false, "viz": false }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { "type": "linear" }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { "group": "A", "mode": "none" }, + "thresholdsStyle": { "mode": "off" } }, - "targets": [ - { - "datasource": { "type": "prometheus", "uid": "${Datasource}" }, - "editorMode": "code", - "expr": "rate(node_linux_disk_io_in_progress{metrics_source=~\"$metrics_source\", namespace=~\"$namespace\", chain_name=~\"$chain_name\", cluster=~\"$cluster\", role=~\"$role\", kubernetes_pod_name=~\"$kubernetes_pod_name\"}[1m])", - "legendFormat": "{{kubernetes_pod_name}} {{name}}", - "range": true, - "refId": "A" - } - ], - "title": "# of disk IO in progress", - "type": "timeseries" + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { "color": "green", "value": null }, + { "color": "red", "value": 80 } + ] + } }, + "overrides": [] + }, + "gridPos": { "h": 10, "w": 12, "x": 0, "y": 52 }, + "id": 3272, + "options": { + "legend": { "calcs": [], "displayMode": "table", "placement": "right", "showLegend": false }, + "tooltip": { "mode": "single", "sort": "none" } + }, + "targets": [ { "datasource": { "type": "prometheus", "uid": "${Datasource}" }, - "description": "100% = 1vCPU", - "fieldConfig": { - "defaults": { - "color": { "mode": "palette-classic" }, - "custom": { - "axisCenteredZero": false, - "axisColorMode": "text", - "axisLabel": "", - "axisPlacement": "auto", - "barAlignment": 0, - "drawStyle": "line", - "fillOpacity": 0, - "gradientMode": "none", - "hideFrom": { "legend": false, "tooltip": false, "viz": false }, - "lineInterpolation": "linear", - "lineWidth": 1, - "pointSize": 5, - "scaleDistribution": { "type": "linear" }, - "showPoints": "auto", - "spanNulls": false, - "stacking": { "group": "A", "mode": "none" }, - "thresholdsStyle": { "mode": "off" } - }, - "mappings": [], - "thresholds": { "mode": "absolute", "steps": [{ "color": "green" }, { "color": "red", "value": 80 }] }, - "unit": "percentunit" - }, - "overrides": [] + "editorMode": "code", + "expr": "rate(node_linux_disk_io_in_progress{metrics_source=~\"$metrics_source\", namespace=~\"$namespace\", chain_name=~\"$chain_name\", cluster=~\"$cluster\", role=~\"$role\", kubernetes_pod_name=~\"$kubernetes_pod_name\"}[1m])", + "legendFormat": "{{kubernetes_pod_name}} {{name}}", + "range": true, + "refId": "A" + } + ], + "title": "# of disk IO in progress", + "type": "timeseries" + }, + { + "datasource": { "type": "prometheus", "uid": "${Datasource}" }, + "description": "100% = 1vCPU", + "fieldConfig": { + "defaults": { + "color": { "mode": "palette-classic" }, + "custom": { + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "axisShow": false, + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { "legend": false, "tooltip": false, "viz": false }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { "type": "linear" }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { "group": "A", "mode": "none" }, + "thresholdsStyle": { "mode": "off" } }, - "gridPos": { "h": 10, "w": 12, "x": 12, "y": 33 }, - "id": 3276, - "options": { - "legend": { "calcs": ["mean"], "displayMode": "table", "placement": "right", "showLegend": false }, - "tooltip": { "mode": "single", "sort": "none" } + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { "color": "green", "value": null }, + { "color": "red", "value": 80 } + ] }, - "targets": [ - { - "datasource": { "type": "prometheus", "uid": "${Datasource}" }, - "editorMode": "code", - "expr": "sort_desc(irate(node_linux_system_cpu_usage{metrics_source=~\"$metrics_source\", namespace=~\"$namespace\", chain_name=~\"$chain_name\", cluster=~\"$cluster\", role=~\"$role\", kubernetes_pod_name=~\"$kubernetes_pod_name\", state=\"iowait_ms\"}[1m]) / 1000)", - "legendFormat": "{{kubernetes_pod_name}}", - "range": true, - "refId": "A" - } - ], - "title": "CPU time waiting for IO", - "type": "timeseries" + "unit": "percentunit" }, + "overrides": [] + }, + "gridPos": { "h": 10, "w": 12, "x": 12, "y": 52 }, + "id": 3276, + "options": { + "legend": { "calcs": ["mean"], "displayMode": "table", "placement": "right", "showLegend": false }, + "tooltip": { "mode": "single", "sort": "none" } + }, + "targets": [ { "datasource": { "type": "prometheus", "uid": "${Datasource}" }, - "fieldConfig": { - "defaults": { - "color": { "mode": "palette-classic" }, - "custom": { - "axisCenteredZero": false, - "axisColorMode": "text", - "axisLabel": "Read (-) / Write (+)", - "axisPlacement": "auto", - "barAlignment": 0, - "drawStyle": "line", - "fillOpacity": 0, - "gradientMode": "none", - "hideFrom": { "legend": false, "tooltip": false, "viz": false }, - "lineInterpolation": "linear", - "lineWidth": 1, - "pointSize": 5, - "scaleDistribution": { "type": "linear" }, - "showPoints": "auto", - "spanNulls": false, - "stacking": { "group": "A", "mode": "none" }, - "thresholdsStyle": { "mode": "off" } - }, - "mappings": [], - "thresholds": { "mode": "absolute", "steps": [{ "color": "green" }, { "color": "red", "value": 80 }] }, - "unit": "short" - }, - "overrides": [] - }, - "gridPos": { "h": 9, "w": 12, "x": 0, "y": 43 }, - "id": 3270, - "options": { - "legend": { - "calcs": ["lastNotNull", "mean"], - "displayMode": "table", - "placement": "right", - "showLegend": false - }, - "tooltip": { "mode": "single", "sort": "none" } + "editorMode": "code", + "expr": "sort_desc(irate(node_linux_system_cpu_usage{metrics_source=~\"$metrics_source\", namespace=~\"$namespace\", chain_name=~\"$chain_name\", cluster=~\"$cluster\", role=~\"$role\", kubernetes_pod_name=~\"$kubernetes_pod_name\", state=\"iowait_ms\"}[1m]) / 1000)", + "legendFormat": "{{kubernetes_pod_name}}", + "range": true, + "refId": "A" + } + ], + "title": "CPU time waiting for IO", + "type": "timeseries" + }, + { + "datasource": { "type": "prometheus", "uid": "${Datasource}" }, + "fieldConfig": { + "defaults": { + "color": { "mode": "palette-classic" }, + "custom": { + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "Read (-) / Write (+)", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { "legend": false, "tooltip": false, "viz": false }, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { "type": "linear" }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { "group": "A", "mode": "none" }, + "thresholdsStyle": { "mode": "off" } }, - "pluginVersion": "9.1.1", - "targets": [ - { - "datasource": { "type": "prometheus", "uid": "${Datasource}" }, - "editorMode": "code", - "expr": "- sort_desc(rate(node_linux_disk_num_reads{metrics_source=~\"$metrics_source\", namespace=~\"$namespace\", chain_name=~\"$chain_name\", cluster=~\"$cluster\", role=~\"$role\", kubernetes_pod_name=~\"$kubernetes_pod_name\"}[1m]))", - "legendFormat": "{{kubernetes_pod_name}} read", - "range": true, - "refId": "A" - }, - { - "datasource": { "type": "prometheus", "uid": "${Datasource}" }, - "editorMode": "code", - "expr": "sort_desc(rate(node_linux_disk_num_writes{metrics_source=~\"$metrics_source\", namespace=~\"$namespace\", chain_name=~\"$chain_name\", cluster=~\"$cluster\", role=~\"$role\", kubernetes_pod_name=~\"$kubernetes_pod_name\"}[1m]))", - "hide": false, - "legendFormat": "{{kubernetes_pod_name}} write", - "range": true, - "refId": "B" - } - ], - "title": "IOPS", - "type": "timeseries" + "mappings": [], + "thresholds": { "mode": "absolute", "steps": [{ "color": "green" }, { "color": "red", "value": 80 }] }, + "unit": "short" + }, + "overrides": [] + }, + "gridPos": { "h": 9, "w": 12, "x": 0, "y": 62 }, + "id": 3270, + "options": { + "legend": { + "calcs": ["lastNotNull", "mean"], + "displayMode": "table", + "placement": "right", + "showLegend": false }, + "tooltip": { "mode": "single", "sort": "none" } + }, + "pluginVersion": "9.1.1", + "targets": [ { "datasource": { "type": "prometheus", "uid": "${Datasource}" }, - "fieldConfig": { - "defaults": { - "color": { "mode": "palette-classic" }, - "custom": { - "axisCenteredZero": false, - "axisColorMode": "text", - "axisLabel": "Read (-) / Write (+)", - "axisPlacement": "auto", - "barAlignment": 0, - "drawStyle": "line", - "fillOpacity": 0, - "gradientMode": "none", - "hideFrom": { "legend": false, "tooltip": false, "viz": false }, - "lineInterpolation": "linear", - "lineWidth": 1, - "pointSize": 5, - "scaleDistribution": { "type": "linear" }, - "showPoints": "auto", - "spanNulls": false, - "stacking": { "group": "A", "mode": "none" }, - "thresholdsStyle": { "mode": "off" } - }, - "mappings": [], - "thresholds": { "mode": "absolute", "steps": [{ "color": "green" }, { "color": "red", "value": 80 }] } - }, - "overrides": [] - }, - "gridPos": { "h": 9, "w": 12, "x": 12, "y": 43 }, - "id": 3309, - "options": { - "legend": { - "calcs": ["lastNotNull", "mean"], - "displayMode": "table", - "placement": "right", - "showLegend": false - }, - "tooltip": { "mode": "single", "sort": "none" } - }, - "pluginVersion": "9.1.1", - "targets": [ - { - "datasource": { "type": "prometheus", "uid": "${Datasource}" }, - "editorMode": "code", - "expr": "- sort_desc(rate(node_linux_disk_num_merged_reads{metrics_source=~\"$metrics_source\", namespace=~\"$namespace\", chain_name=~\"$chain_name\", cluster=~\"$cluster\", role=~\"$role\", kubernetes_pod_name=~\"$kubernetes_pod_name\"}[1m]))", - "legendFormat": "{{kubernetes_pod_name}} {{name}} read", - "range": true, - "refId": "A" - }, - { - "datasource": { "type": "prometheus", "uid": "${Datasource}" }, - "editorMode": "code", - "expr": "sort_desc(rate(node_linux_disk_num_merged_writes{metrics_source=~\"$metrics_source\", namespace=~\"$namespace\", chain_name=~\"$chain_name\", cluster=~\"$cluster\", role=~\"$role\", kubernetes_pod_name=~\"$kubernetes_pod_name\"}[1m]))", - "hide": false, - "legendFormat": "{{kubernetes_pod_name}} {{name}} write", - "range": true, - "refId": "B" - } - ], - "title": "IOPS Merged", - "type": "timeseries" + "editorMode": "code", + "expr": "- sort_desc(rate(node_linux_disk_num_reads{metrics_source=~\"$metrics_source\", namespace=~\"$namespace\", chain_name=~\"$chain_name\", cluster=~\"$cluster\", role=~\"$role\", kubernetes_pod_name=~\"$kubernetes_pod_name\"}[1m]))", + "legendFormat": "{{kubernetes_pod_name}} read", + "range": true, + "refId": "A" }, { - "aliasColors": {}, - "bars": false, - "dashLength": 10, - "dashes": false, "datasource": { "type": "prometheus", "uid": "${Datasource}" }, - "description": "", - "fill": 0, - "fillGradient": 0, - "gridPos": { "h": 9, "w": 12, "x": 0, "y": 52 }, - "hiddenSeries": false, - "id": 3311, - "legend": { - "avg": false, - "current": false, - "max": false, - "min": false, - "show": true, - "total": false, - "values": false + "editorMode": "code", + "expr": "sort_desc(rate(node_linux_disk_num_writes{metrics_source=~\"$metrics_source\", namespace=~\"$namespace\", chain_name=~\"$chain_name\", cluster=~\"$cluster\", role=~\"$role\", kubernetes_pod_name=~\"$kubernetes_pod_name\"}[1m]))", + "hide": false, + "legendFormat": "{{kubernetes_pod_name}} write", + "range": true, + "refId": "B" + } + ], + "title": "IOPS", + "type": "timeseries" + }, + { + "datasource": { "type": "prometheus", "uid": "${Datasource}" }, + "fieldConfig": { + "defaults": { + "color": { "mode": "palette-classic" }, + "custom": { + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "Read (-) / Write (+)", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { "legend": false, "tooltip": false, "viz": false }, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { "type": "linear" }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { "group": "A", "mode": "none" }, + "thresholdsStyle": { "mode": "off" } }, - "lines": true, - "linewidth": 1, - "maxPerRow": 3, - "nullPointMode": "null", - "options": { "alertThreshold": true }, - "percentage": false, - "pluginVersion": "9.1.1", - "pointradius": 2, - "points": false, - "renderer": "flot", - "repeatDirection": "h", - "seriesOverrides": [], - "spaceLength": 10, - "stack": false, - "steppedLine": false, - "targets": [ - { - "datasource": { "type": "prometheus", "uid": "${Datasource}" }, - "editorMode": "code", - "expr": "sum by (kubernetes_pod_name, run_uuid) (rate(aptos_schemadb_batch_commit_bytes_sum{chain_name=~\"$chain_name\", cluster=~\"$cluster\", metrics_source=~\"$metrics_source\", namespace=~\"$namespace\", kubernetes_pod_name=~\"$kubernetes_pod_name\", role=~\"$role\", db_name=~\"$db_name\"}[1m]))", - "interval": "", - "legendFormat": "{{kubernetes_pod_name}} {{kubernetes_pod_name}} {{db_name}}", - "range": true, - "refId": "A" - } - ], - "thresholds": [], - "timeRegions": [], - "title": "Commit Bytes per Second", - "tooltip": { "shared": true, "sort": 0, "value_type": "individual" }, - "type": "graph", - "xaxis": { "mode": "time", "show": true, "values": [] }, - "yaxes": [ - { "format": "Bps", "logBase": 1, "show": true }, - { "format": "short", "logBase": 1, "show": true } - ], - "yaxis": { "align": false } + "mappings": [], + "thresholds": { "mode": "absolute", "steps": [{ "color": "green" }, { "color": "red", "value": 80 }] } }, + "overrides": [] + }, + "gridPos": { "h": 9, "w": 12, "x": 12, "y": 62 }, + "id": 3309, + "options": { + "legend": { + "calcs": ["lastNotNull", "mean"], + "displayMode": "table", + "placement": "right", + "showLegend": false + }, + "tooltip": { "mode": "single", "sort": "none" } + }, + "pluginVersion": "9.1.1", + "targets": [ { "datasource": { "type": "prometheus", "uid": "${Datasource}" }, - "fieldConfig": { - "defaults": { - "color": { "mode": "palette-classic" }, - "custom": { - "axisCenteredZero": false, - "axisColorMode": "text", - "axisLabel": "Read (-) / Write (+)", - "axisPlacement": "auto", - "barAlignment": 0, - "drawStyle": "line", - "fillOpacity": 0, - "gradientMode": "none", - "hideFrom": { "legend": false, "tooltip": false, "viz": false }, - "lineInterpolation": "linear", - "lineWidth": 1, - "pointSize": 5, - "scaleDistribution": { "type": "linear" }, - "showPoints": "auto", - "spanNulls": false, - "stacking": { "group": "A", "mode": "none" }, - "thresholdsStyle": { "mode": "off" } - }, - "mappings": [], - "thresholds": { "mode": "absolute", "steps": [{ "color": "green" }, { "color": "red", "value": 80 }] }, - "unit": "bytes" - }, - "overrides": [] - }, - "gridPos": { "h": 9, "w": 12, "x": 12, "y": 52 }, - "id": 3305, - "options": { - "legend": { "calcs": [], "displayMode": "list", "placement": "bottom", "showLegend": false }, - "tooltip": { "mode": "single", "sort": "none" } - }, - "targets": [ - { - "datasource": { "type": "prometheus", "uid": "${Datasource}" }, - "editorMode": "code", - "expr": "sort_desc(rate(node_linux_disk_num_sectors_written{metrics_source=~\"$metrics_source\", namespace=~\"$namespace\", chain_name=~\"$chain_name\", cluster=~\"$cluster\", role=~\"$role\", kubernetes_pod_name=~\"$kubernetes_pod_name\"}[1m])) * 512", - "legendFormat": "{{kubernetes_pod_name}} {{name}} write", - "range": true, - "refId": "A" - }, - { - "datasource": { "type": "prometheus", "uid": "${Datasource}" }, - "editorMode": "code", - "expr": "- sort_desc(rate(node_linux_disk_num_sectors_read{metrics_source=~\"$metrics_source\", namespace=~\"$namespace\", chain_name=~\"$chain_name\", cluster=~\"$cluster\", role=~\"$role\", kubernetes_pod_name=~\"$kubernetes_pod_name\"}[1m])) * 512", - "hide": false, - "legendFormat": "{{kubernetes_pod_name}} {{name}} read", - "range": true, - "refId": "B" - } - ], - "title": "Disk Bandwidth (assuming 512B sectors)", - "type": "timeseries" + "editorMode": "code", + "expr": "- sort_desc(rate(node_linux_disk_num_merged_reads{metrics_source=~\"$metrics_source\", namespace=~\"$namespace\", chain_name=~\"$chain_name\", cluster=~\"$cluster\", role=~\"$role\", kubernetes_pod_name=~\"$kubernetes_pod_name\"}[1m]))", + "legendFormat": "{{kubernetes_pod_name}} {{name}} read", + "range": true, + "refId": "A" }, { "datasource": { "type": "prometheus", "uid": "${Datasource}" }, - "fieldConfig": { - "defaults": { - "color": { "mode": "palette-classic" }, - "custom": { - "axisCenteredZero": false, - "axisColorMode": "text", - "axisLabel": "", - "axisPlacement": "auto", - "barAlignment": 0, - "drawStyle": "line", - "fillOpacity": 0, - "gradientMode": "none", - "hideFrom": { "legend": false, "tooltip": false, "viz": false }, - "lineInterpolation": "linear", - "lineWidth": 1, - "pointSize": 5, - "scaleDistribution": { "type": "linear" }, - "showPoints": "auto", - "spanNulls": false, - "stacking": { "group": "A", "mode": "none" }, - "thresholdsStyle": { "mode": "off" } - }, - "mappings": [], - "thresholds": { "mode": "absolute", "steps": [{ "color": "green" }, { "color": "red", "value": 80 }] } - }, - "overrides": [] - }, - "gridPos": { "h": 9, "w": 12, "x": 0, "y": 61 }, - "id": 3307, - "options": { - "legend": { "calcs": [], "displayMode": "list", "placement": "bottom", "showLegend": false }, - "tooltip": { "mode": "single", "sort": "none" } - }, - "targets": [ - { - "datasource": { "type": "prometheus", "uid": "${Datasource}" }, - "editorMode": "code", - "expr": "avg_over_time(node_linux_disk_io_in_progress{metrics_source=~\"$metrics_source\", namespace=~\"$namespace\", chain_name=~\"$chain_name\", cluster=~\"$cluster\", role=~\"$role\", kubernetes_pod_name=~\"$kubernetes_pod_name\"}[1m])", - "legendFormat": "{{kubernetes_pod_name}} {{name}}", - "range": true, - "refId": "A" - } - ], - "title": "IO in progress (1m avg)", - "type": "timeseries" - }, + "editorMode": "code", + "expr": "sort_desc(rate(node_linux_disk_num_merged_writes{metrics_source=~\"$metrics_source\", namespace=~\"$namespace\", chain_name=~\"$chain_name\", cluster=~\"$cluster\", role=~\"$role\", kubernetes_pod_name=~\"$kubernetes_pod_name\"}[1m]))", + "hide": false, + "legendFormat": "{{kubernetes_pod_name}} {{name}} write", + "range": true, + "refId": "B" + } + ], + "title": "IOPS Merged", + "type": "timeseries" + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": { "type": "prometheus", "uid": "${Datasource}" }, + "description": "", + "fill": 0, + "fillGradient": 0, + "gridPos": { "h": 9, "w": 12, "x": 0, "y": 71 }, + "hiddenSeries": false, + "id": 3311, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "maxPerRow": 3, + "nullPointMode": "null", + "options": { "alertThreshold": true }, + "percentage": false, + "pluginVersion": "9.1.1", + "pointradius": 2, + "points": false, + "renderer": "flot", + "repeatDirection": "h", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ { "datasource": { "type": "prometheus", "uid": "${Datasource}" }, - "fieldConfig": { - "defaults": { - "color": { "mode": "palette-classic" }, - "custom": { - "axisCenteredZero": false, - "axisColorMode": "text", - "axisLabel": "Read (-) / Write (+)", - "axisPlacement": "auto", - "barAlignment": 0, - "drawStyle": "line", - "fillOpacity": 0, - "gradientMode": "none", - "hideFrom": { "legend": false, "tooltip": false, "viz": false }, - "lineInterpolation": "linear", - "lineWidth": 1, - "pointSize": 5, - "scaleDistribution": { "type": "linear" }, - "showPoints": "auto", - "spanNulls": false, - "stacking": { "group": "A", "mode": "none" }, - "thresholdsStyle": { "mode": "off" } - }, - "mappings": [], - "thresholds": { "mode": "absolute", "steps": [{ "color": "green" }, { "color": "red", "value": 80 }] }, - "unit": "ms" - }, - "overrides": [] - }, - "gridPos": { "h": 9, "w": 12, "x": 12, "y": 61 }, - "id": 3308, - "options": { - "legend": { "calcs": [], "displayMode": "table", "placement": "right", "showLegend": false }, - "tooltip": { "mode": "single", "sort": "none" } + "editorMode": "code", + "expr": "sum by (kubernetes_pod_name, run_uuid) (rate(aptos_schemadb_batch_commit_bytes_sum{chain_name=~\"$chain_name\", cluster=~\"$cluster\", metrics_source=~\"$metrics_source\", namespace=~\"$namespace\", kubernetes_pod_name=~\"$kubernetes_pod_name\", role=~\"$role\", db_name=~\"$db_name\"}[1m]))", + "interval": "", + "legendFormat": "{{kubernetes_pod_name}} {{kubernetes_pod_name}} {{db_name}}", + "range": true, + "refId": "A" + } + ], + "thresholds": [], + "timeRegions": [], + "title": "Commit Bytes per Second", + "tooltip": { "shared": true, "sort": 0, "value_type": "individual" }, + "type": "graph", + "xaxis": { "mode": "time", "show": true, "values": [] }, + "yaxes": [ + { "format": "Bps", "logBase": 1, "show": true }, + { "format": "short", "logBase": 1, "show": true } + ], + "yaxis": { "align": false } + }, + { + "datasource": { "type": "prometheus", "uid": "${Datasource}" }, + "fieldConfig": { + "defaults": { + "color": { "mode": "palette-classic" }, + "custom": { + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "Read (-) / Write (+)", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { "legend": false, "tooltip": false, "viz": false }, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { "type": "linear" }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { "group": "A", "mode": "none" }, + "thresholdsStyle": { "mode": "off" } }, - "targets": [ - { - "datasource": { "type": "prometheus", "uid": "${Datasource}" }, - "editorMode": "code", - "expr": "- rate(node_linux_disk_time_reading_ms{metrics_source=~\"$metrics_source\", namespace=~\"$namespace\", chain_name=~\"$chain_name\", cluster=~\"$cluster\", role=~\"$role\", kubernetes_pod_name=~\"$kubernetes_pod_name\"}[1m]) / rate(node_linux_disk_num_reads[1m])", - "hide": false, - "legendFormat": "{{kubernetes_pod_name}} {{name}}", - "range": true, - "refId": "A" - }, - { - "datasource": { "type": "prometheus", "uid": "${Datasource}" }, - "editorMode": "code", - "expr": "rate(node_linux_disk_time_writing_ms{metrics_source=~\"$metrics_source\", namespace=~\"$namespace\", chain_name=~\"$chain_name\", cluster=~\"$cluster\", role=~\"$role\", kubernetes_pod_name=~\"$kubernetes_pod_name\"}[1m]) / rate(node_linux_disk_num_writes[1m])", - "hide": false, - "legendFormat": "{{kubernetes_pod_name}} {{name}}", - "range": true, - "refId": "B" - } - ], - "title": "Avg IO latency", - "type": "timeseries" + "mappings": [], + "thresholds": { "mode": "absolute", "steps": [{ "color": "green" }, { "color": "red", "value": 80 }] }, + "unit": "bytes" }, + "overrides": [] + }, + "gridPos": { "h": 9, "w": 12, "x": 12, "y": 71 }, + "id": 3305, + "options": { + "legend": { "calcs": [], "displayMode": "list", "placement": "bottom", "showLegend": false }, + "tooltip": { "mode": "single", "sort": "none" } + }, + "targets": [ { "datasource": { "type": "prometheus", "uid": "${Datasource}" }, - "fieldConfig": { - "defaults": { - "color": { "mode": "palette-classic" }, - "custom": { - "axisCenteredZero": false, - "axisColorMode": "text", - "axisLabel": "Read (-) / Write (+)", - "axisPlacement": "auto", - "barAlignment": 0, - "drawStyle": "line", - "fillOpacity": 0, - "gradientMode": "none", - "hideFrom": { "legend": false, "tooltip": false, "viz": false }, - "lineInterpolation": "linear", - "lineWidth": 1, - "pointSize": 5, - "scaleDistribution": { "type": "linear" }, - "showPoints": "auto", - "spanNulls": false, - "stacking": { "group": "A", "mode": "none" }, - "thresholdsStyle": { "mode": "off" } - }, - "mappings": [], - "thresholds": { "mode": "absolute", "steps": [{ "color": "green" }, { "color": "red", "value": 80 }] }, - "unit": "bytes" - }, - "overrides": [] + "editorMode": "code", + "expr": "sort_desc(rate(node_linux_disk_num_sectors_written{metrics_source=~\"$metrics_source\", namespace=~\"$namespace\", chain_name=~\"$chain_name\", cluster=~\"$cluster\", role=~\"$role\", kubernetes_pod_name=~\"$kubernetes_pod_name\"}[1m])) * 512", + "legendFormat": "{{kubernetes_pod_name}} {{name}} write", + "range": true, + "refId": "A" + }, + { + "datasource": { "type": "prometheus", "uid": "${Datasource}" }, + "editorMode": "code", + "expr": "- sort_desc(rate(node_linux_disk_num_sectors_read{metrics_source=~\"$metrics_source\", namespace=~\"$namespace\", chain_name=~\"$chain_name\", cluster=~\"$cluster\", role=~\"$role\", kubernetes_pod_name=~\"$kubernetes_pod_name\"}[1m])) * 512", + "hide": false, + "legendFormat": "{{kubernetes_pod_name}} {{name}} read", + "range": true, + "refId": "B" + } + ], + "title": "Disk Bandwidth (assuming 512B sectors)", + "type": "timeseries" + }, + { + "datasource": { "type": "prometheus", "uid": "${Datasource}" }, + "fieldConfig": { + "defaults": { + "color": { "mode": "palette-classic" }, + "custom": { + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { "legend": false, "tooltip": false, "viz": false }, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { "type": "linear" }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { "group": "A", "mode": "none" }, + "thresholdsStyle": { "mode": "off" } }, - "gridPos": { "h": 9, "w": 12, "x": 0, "y": 70 }, - "id": 3312, - "options": { - "legend": { "calcs": [], "displayMode": "table", "placement": "right", "showLegend": false }, - "tooltip": { "mode": "single", "sort": "none" } + "mappings": [], + "thresholds": { "mode": "absolute", "steps": [{ "color": "green" }, { "color": "red", "value": 80 }] } + }, + "overrides": [] + }, + "gridPos": { "h": 9, "w": 12, "x": 0, "y": 80 }, + "id": 3307, + "options": { + "legend": { "calcs": [], "displayMode": "list", "placement": "bottom", "showLegend": false }, + "tooltip": { "mode": "single", "sort": "none" } + }, + "targets": [ + { + "datasource": { "type": "prometheus", "uid": "${Datasource}" }, + "editorMode": "code", + "expr": "avg_over_time(node_linux_disk_io_in_progress{metrics_source=~\"$metrics_source\", namespace=~\"$namespace\", chain_name=~\"$chain_name\", cluster=~\"$cluster\", role=~\"$role\", kubernetes_pod_name=~\"$kubernetes_pod_name\"}[1m])", + "legendFormat": "{{kubernetes_pod_name}} {{name}}", + "range": true, + "refId": "A" + } + ], + "title": "IO in progress (1m avg)", + "type": "timeseries" + }, + { + "datasource": { "type": "prometheus", "uid": "${Datasource}" }, + "fieldConfig": { + "defaults": { + "color": { "mode": "palette-classic" }, + "custom": { + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "Read (-) / Write (+)", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { "legend": false, "tooltip": false, "viz": false }, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { "type": "linear" }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { "group": "A", "mode": "none" }, + "thresholdsStyle": { "mode": "off" } }, - "targets": [ - { - "datasource": { "type": "prometheus", "uid": "${Datasource}" }, - "editorMode": "code", - "expr": "- rate(node_linux_disk_num_sectors_read{metrics_source=~\"$metrics_source\", namespace=~\"$namespace\", chain_name=~\"$chain_name\", cluster=~\"$cluster\", role=~\"$role\", kubernetes_pod_name=~\"$kubernetes_pod_name\"}[1m]) * 512 / rate(node_linux_disk_num_reads[1m])", - "hide": false, - "legendFormat": "{{kubernetes_pod_name}} {{name}}", - "range": true, - "refId": "A" - }, - { - "datasource": { "type": "prometheus", "uid": "${Datasource}" }, - "editorMode": "code", - "expr": "rate(node_linux_disk_num_sectors_written{metrics_source=~\"$metrics_source\", namespace=~\"$namespace\", chain_name=~\"$chain_name\", cluster=~\"$cluster\", role=~\"$role\", kubernetes_pod_name=~\"$kubernetes_pod_name\"}[1m]) * 512 / rate(node_linux_disk_num_writes[1m])", - "hide": false, - "legendFormat": "{{kubernetes_pod_name}} {{name}}", - "range": true, - "refId": "B" - } - ], - "title": "Avg IO Size (assumeing 512B sectors)", - "type": "timeseries" + "mappings": [], + "thresholds": { "mode": "absolute", "steps": [{ "color": "green" }, { "color": "red", "value": 80 }] }, + "unit": "ms" }, + "overrides": [] + }, + "gridPos": { "h": 9, "w": 12, "x": 12, "y": 80 }, + "id": 3308, + "options": { + "legend": { "calcs": [], "displayMode": "table", "placement": "right", "showLegend": false }, + "tooltip": { "mode": "single", "sort": "none" } + }, + "targets": [ { "datasource": { "type": "prometheus", "uid": "${Datasource}" }, - "description": "", - "fieldConfig": { - "defaults": { - "color": { "mode": "palette-classic" }, - "custom": { - "axisCenteredZero": false, - "axisColorMode": "text", - "axisLabel": "version", - "axisPlacement": "auto", - "barAlignment": 0, - "drawStyle": "line", - "fillOpacity": 0, - "gradientMode": "none", - "hideFrom": { "legend": false, "tooltip": false, "viz": false }, - "lineInterpolation": "linear", - "lineStyle": { "fill": "solid" }, - "lineWidth": 1, - "pointSize": 5, - "scaleDistribution": { "type": "linear" }, - "showPoints": "never", - "spanNulls": false, - "stacking": { "group": "A", "mode": "none" }, - "thresholdsStyle": { "mode": "off" } - }, - "decimals": 0, - "links": [], - "mappings": [], - "thresholds": { "mode": "absolute", "steps": [{ "color": "green" }] }, - "unit": "none" - }, - "overrides": [ - { - "matcher": { "id": "byName", "options": "epoch (max)" }, - "properties": [ - { "id": "custom.axisPlacement", "value": "hidden" }, - { "id": "custom.axisLabel", "value": "epoch" }, - { "id": "custom.lineStyle" }, - { "id": "color", "value": { "fixedColor": "blue", "mode": "fixed" } }, - { "id": "custom.lineWidth", "value": 1 }, - { "id": "custom.axisColorMode", "value": "series" }, - { "id": "custom.lineStyle", "value": { "dash": [10, 10], "fill": "dash" } }, - { "id": "custom.axisPlacement", "value": "right" } - ] - }, - { - "matcher": { "id": "byRegexp", "options": ".* epoch" }, - "properties": [ - { "id": "custom.axisPlacement", "value": "right" }, - { "id": "custom.lineStyle", "value": { "dash": [10, 10], "fill": "dash" } } - ] - } - ] - }, - "gridPos": { "h": 9, "w": 12, "x": 12, "y": 70 }, - "id": 3301, - "options": { - "legend": { "calcs": ["lastNotNull"], "displayMode": "table", "placement": "right", "showLegend": false }, - "tooltip": { "mode": "multi", "sort": "none" } + "editorMode": "code", + "expr": "- rate(node_linux_disk_time_reading_ms{metrics_source=~\"$metrics_source\", namespace=~\"$namespace\", chain_name=~\"$chain_name\", cluster=~\"$cluster\", role=~\"$role\", kubernetes_pod_name=~\"$kubernetes_pod_name\"}[1m]) / rate(node_linux_disk_num_reads[1m])", + "hide": false, + "legendFormat": "{{kubernetes_pod_name}} {{name}}", + "range": true, + "refId": "A" + }, + { + "datasource": { "type": "prometheus", "uid": "${Datasource}" }, + "editorMode": "code", + "expr": "rate(node_linux_disk_time_writing_ms{metrics_source=~\"$metrics_source\", namespace=~\"$namespace\", chain_name=~\"$chain_name\", cluster=~\"$cluster\", role=~\"$role\", kubernetes_pod_name=~\"$kubernetes_pod_name\"}[1m]) / rate(node_linux_disk_num_writes[1m])", + "hide": false, + "legendFormat": "{{kubernetes_pod_name}} {{name}}", + "range": true, + "refId": "B" + } + ], + "title": "Avg IO latency", + "type": "timeseries" + }, + { + "datasource": { "type": "prometheus", "uid": "${Datasource}" }, + "fieldConfig": { + "defaults": { + "color": { "mode": "palette-classic" }, + "custom": { + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "Read (-) / Write (+)", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { "legend": false, "tooltip": false, "viz": false }, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { "type": "linear" }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { "group": "A", "mode": "none" }, + "thresholdsStyle": { "mode": "off" } }, - "pluginVersion": "9.1.1", - "targets": [ - { - "datasource": { "type": "prometheus", "uid": "${Datasource}" }, - "editorMode": "code", - "expr": "max(aptos_storage_latest_transaction_version{chain_name=~\"$chain_name\", cluster=~\"$cluster\", metrics_source=~\"$metrics_source\", namespace=~\"$namespace\", kubernetes_pod_name=~\"$kubernetes_pod_name\", role=~\"$role\"})", - "hide": true, - "interval": "", - "legendFormat": "latest (synced) (max)", - "range": true, - "refId": "A" - }, - { - "datasource": { "type": "prometheus", "uid": "${Datasource}" }, - "editorMode": "code", - "expr": "max(aptos_storage_ledger_version{chain_name=~\"$chain_name\", cluster=~\"$cluster\", metrics_source=~\"$metrics_source\", namespace=~\"$namespace\", kubernetes_pod_name=~\"$kubernetes_pod_name\", role=~\"$role\"})", - "hide": true, - "interval": "", - "legendFormat": "committed (max)", - "range": true, - "refId": "C" - }, - { - "datasource": { "type": "prometheus", "uid": "${Datasource}" }, - "editorMode": "code", - "expr": "sort_desc(quantile by (pruner_name) (0.8, aptos_pruner_min_readable_version{chain_name=~\"$chain_name\", cluster=~\"$cluster\", metrics_source=~\"$metrics_source\", namespace=~\"$namespace\", kubernetes_pod_name=~\"$kubernetes_pod_name\", role=~\"$role\", pruner_name !=\"state_store\"}))", - "hide": true, - "interval": "", - "legendFormat": "{{kubernetes_pod_name}} {{pruner_name}} pruned-till (p80)", - "range": true, - "refId": "B" - }, - { - "datasource": { "type": "prometheus", "uid": "${Datasource}" }, - "editorMode": "code", - "exemplar": false, - "expr": "max(aptos_storage_next_block_epoch{chain_name=~\"$chain_name\", cluster=~\"$cluster\", metrics_source=~\"$metrics_source\", namespace=~\"$namespace\", kubernetes_pod_name=~\"$kubernetes_pod_name\", role=~\"$role\"})", - "hide": true, - "legendFormat": "epoch (max)", - "range": true, - "refId": "D" - }, - { - "datasource": { "type": "prometheus", "uid": "${Datasource}" }, - "editorMode": "code", - "expr": "sort_desc(aptos_storage_latest_transaction_version{chain_name=~\"$chain_name\", cluster=~\"$cluster\", metrics_source=~\"$metrics_source\", namespace=~\"$namespace\", kubernetes_pod_name=~\"$kubernetes_pod_name\", role=~\"$role\"})", - "hide": false, - "legendFormat": "{{kubernetes_pod_name}} ver", - "range": true, - "refId": "E" - }, - { - "datasource": { "type": "prometheus", "uid": "${Datasource}" }, - "editorMode": "code", - "expr": "sort_desc(aptos_storage_next_block_epoch{chain_name=~\"$chain_name\", cluster=~\"$cluster\", metrics_source=~\"$metrics_source\", namespace=~\"$namespace\", kubernetes_pod_name=~\"$kubernetes_pod_name\", role=~\"$role\"})", - "hide": false, - "legendFormat": "{{kubernetes_pod_name}} epoch", - "range": true, - "refId": "F" - }, - { - "datasource": { "type": "prometheus", "uid": "${Datasource}" }, - "editorMode": "code", - "expr": "aptos_storage_ledger_version{chain_name=~\"$chain_name\", cluster=~\"$cluster\", metrics_source=~\"$metrics_source\", namespace=~\"$namespace\", kubernetes_pod_name=~\"$kubernetes_pod_name\", role=~\"$role\"}", - "hide": false, - "legendFormat": "ver", - "range": true, - "refId": "G" - } - ], - "title": "latest version, epoch, pruner versions", - "type": "timeseries" + "mappings": [], + "thresholds": { "mode": "absolute", "steps": [{ "color": "green" }, { "color": "red", "value": 80 }] }, + "unit": "bytes" }, + "overrides": [] + }, + "gridPos": { "h": 9, "w": 12, "x": 0, "y": 89 }, + "id": 3312, + "options": { + "legend": { "calcs": [], "displayMode": "table", "placement": "right", "showLegend": false }, + "tooltip": { "mode": "single", "sort": "none" } + }, + "targets": [ { "datasource": { "type": "prometheus", "uid": "${Datasource}" }, - "fieldConfig": { - "defaults": { - "color": { "mode": "palette-classic" }, - "custom": { - "axisCenteredZero": false, - "axisColorMode": "text", - "axisLabel": "", - "axisPlacement": "auto", - "barAlignment": 0, - "drawStyle": "line", - "fillOpacity": 0, - "gradientMode": "none", - "hideFrom": { "legend": false, "tooltip": false, "viz": false }, - "lineInterpolation": "linear", - "lineWidth": 1, - "pointSize": 5, - "scaleDistribution": { "type": "linear" }, - "showPoints": "auto", - "spanNulls": false, - "stacking": { "group": "A", "mode": "none" }, - "thresholdsStyle": { "mode": "off" } - }, - "mappings": [], - "thresholds": { "mode": "absolute", "steps": [{ "color": "green" }, { "color": "red", "value": 80 }] }, - "unit": "percentunit" - }, - "overrides": [] + "editorMode": "code", + "expr": "- rate(node_linux_disk_num_sectors_read{metrics_source=~\"$metrics_source\", namespace=~\"$namespace\", chain_name=~\"$chain_name\", cluster=~\"$cluster\", role=~\"$role\", kubernetes_pod_name=~\"$kubernetes_pod_name\"}[1m]) * 512 / rate(node_linux_disk_num_reads[1m])", + "hide": false, + "legendFormat": "{{kubernetes_pod_name}} {{name}}", + "range": true, + "refId": "A" + }, + { + "datasource": { "type": "prometheus", "uid": "${Datasource}" }, + "editorMode": "code", + "expr": "rate(node_linux_disk_num_sectors_written{metrics_source=~\"$metrics_source\", namespace=~\"$namespace\", chain_name=~\"$chain_name\", cluster=~\"$cluster\", role=~\"$role\", kubernetes_pod_name=~\"$kubernetes_pod_name\"}[1m]) * 512 / rate(node_linux_disk_num_writes[1m])", + "hide": false, + "legendFormat": "{{kubernetes_pod_name}} {{name}}", + "range": true, + "refId": "B" + } + ], + "title": "Avg IO Size (assumeing 512B sectors)", + "type": "timeseries" + }, + { + "datasource": { "type": "prometheus", "uid": "${Datasource}" }, + "description": "", + "fieldConfig": { + "defaults": { + "color": { "mode": "palette-classic" }, + "custom": { + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "version", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { "legend": false, "tooltip": false, "viz": false }, + "lineInterpolation": "linear", + "lineStyle": { "fill": "solid" }, + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { "type": "linear" }, + "showPoints": "never", + "spanNulls": false, + "stacking": { "group": "A", "mode": "none" }, + "thresholdsStyle": { "mode": "off" } }, - "gridPos": { "h": 9, "w": 12, "x": 0, "y": 79 }, - "id": 3274, - "options": { - "legend": { "calcs": [], "displayMode": "table", "placement": "right", "showLegend": false }, - "tooltip": { "mode": "single", "sort": "none" } + "decimals": 0, + "links": [], + "mappings": [], + "thresholds": { "mode": "absolute", "steps": [{ "color": "green" }] }, + "unit": "none" + }, + "overrides": [ + { + "matcher": { "id": "byName", "options": "epoch (max)" }, + "properties": [ + { "id": "custom.axisPlacement", "value": "hidden" }, + { "id": "custom.axisLabel", "value": "epoch" }, + { "id": "custom.lineStyle" }, + { "id": "color", "value": { "fixedColor": "blue", "mode": "fixed" } }, + { "id": "custom.lineWidth", "value": 1 }, + { "id": "custom.axisColorMode", "value": "series" }, + { "id": "custom.lineStyle", "value": { "dash": [10, 10], "fill": "dash" } }, + { "id": "custom.axisPlacement", "value": "right" } + ] }, - "targets": [ - { - "datasource": { "type": "prometheus", "uid": "${Datasource}" }, - "editorMode": "code", - "expr": "rate(node_linux_disk_total_io_time_ms{metrics_source=~\"$metrics_source\", namespace=~\"$namespace\", chain_name=~\"$chain_name\", cluster=~\"$cluster\", role=~\"$role\", kubernetes_pod_name=~\"$kubernetes_pod_name\"}[1m]) / 1000", - "legendFormat": "{{kubernetes_pod_name}} {{name}}", - "range": true, - "refId": "A" - } - ], - "title": "Time spent in disk IO", - "type": "timeseries" + { + "matcher": { "id": "byRegexp", "options": ".* epoch" }, + "properties": [ + { "id": "custom.axisPlacement", "value": "right" }, + { "id": "custom.lineStyle", "value": { "dash": [10, 10], "fill": "dash" } } + ] + } + ] + }, + "gridPos": { "h": 9, "w": 12, "x": 12, "y": 89 }, + "id": 3301, + "options": { + "legend": { "calcs": ["lastNotNull"], "displayMode": "table", "placement": "right", "showLegend": false }, + "tooltip": { "mode": "multi", "sort": "none" } + }, + "pluginVersion": "9.1.1", + "targets": [ + { + "datasource": { "type": "prometheus", "uid": "${Datasource}" }, + "editorMode": "code", + "expr": "max(aptos_storage_latest_transaction_version{chain_name=~\"$chain_name\", cluster=~\"$cluster\", metrics_source=~\"$metrics_source\", namespace=~\"$namespace\", kubernetes_pod_name=~\"$kubernetes_pod_name\", role=~\"$role\"})", + "hide": true, + "interval": "", + "legendFormat": "latest (synced) (max)", + "range": true, + "refId": "A" }, { "datasource": { "type": "prometheus", "uid": "${Datasource}" }, - "description": "This reflects the IO performance of the underlying device.\n\nExpect single or double digits of microseconds.", - "fieldConfig": { - "defaults": { - "color": { "mode": "palette-classic" }, - "custom": { - "axisCenteredZero": false, - "axisColorMode": "text", - "axisLabel": "", - "axisPlacement": "auto", - "barAlignment": 0, - "drawStyle": "line", - "fillOpacity": 0, - "gradientMode": "none", - "hideFrom": { "legend": false, "tooltip": false, "viz": false }, - "lineInterpolation": "linear", - "lineWidth": 1, - "pointSize": 5, - "scaleDistribution": { "type": "linear" }, - "showPoints": "never", - "spanNulls": false, - "stacking": { "group": "A", "mode": "none" }, - "thresholdsStyle": { "mode": "off" } - }, - "mappings": [], - "thresholds": { "mode": "absolute", "steps": [{ "color": "green" }, { "color": "red", "value": 80 }] }, - "unit": "s" - }, - "overrides": [] + "editorMode": "code", + "expr": "max(aptos_storage_ledger_version{chain_name=~\"$chain_name\", cluster=~\"$cluster\", metrics_source=~\"$metrics_source\", namespace=~\"$namespace\", kubernetes_pod_name=~\"$kubernetes_pod_name\", role=~\"$role\"})", + "hide": true, + "interval": "", + "legendFormat": "committed (max)", + "range": true, + "refId": "C" + }, + { + "datasource": { "type": "prometheus", "uid": "${Datasource}" }, + "editorMode": "code", + "expr": "sort_desc(quantile by (pruner_name) (0.8, aptos_pruner_min_readable_version{chain_name=~\"$chain_name\", cluster=~\"$cluster\", metrics_source=~\"$metrics_source\", namespace=~\"$namespace\", kubernetes_pod_name=~\"$kubernetes_pod_name\", role=~\"$role\", pruner_name !=\"state_store\"}))", + "hide": true, + "interval": "", + "legendFormat": "{{kubernetes_pod_name}} {{pruner_name}} pruned-till (p80)", + "range": true, + "refId": "B" + }, + { + "datasource": { "type": "prometheus", "uid": "${Datasource}" }, + "editorMode": "code", + "exemplar": false, + "expr": "max(aptos_storage_next_block_epoch{chain_name=~\"$chain_name\", cluster=~\"$cluster\", metrics_source=~\"$metrics_source\", namespace=~\"$namespace\", kubernetes_pod_name=~\"$kubernetes_pod_name\", role=~\"$role\"})", + "hide": true, + "legendFormat": "epoch (max)", + "range": true, + "refId": "D" + }, + { + "datasource": { "type": "prometheus", "uid": "${Datasource}" }, + "editorMode": "code", + "expr": "sort_desc(aptos_storage_latest_transaction_version{chain_name=~\"$chain_name\", cluster=~\"$cluster\", metrics_source=~\"$metrics_source\", namespace=~\"$namespace\", kubernetes_pod_name=~\"$kubernetes_pod_name\", role=~\"$role\"})", + "hide": false, + "legendFormat": "{{kubernetes_pod_name}} ver", + "range": true, + "refId": "E" + }, + { + "datasource": { "type": "prometheus", "uid": "${Datasource}" }, + "editorMode": "code", + "expr": "sort_desc(aptos_storage_next_block_epoch{chain_name=~\"$chain_name\", cluster=~\"$cluster\", metrics_source=~\"$metrics_source\", namespace=~\"$namespace\", kubernetes_pod_name=~\"$kubernetes_pod_name\", role=~\"$role\"})", + "hide": false, + "legendFormat": "{{kubernetes_pod_name}} epoch", + "range": true, + "refId": "F" + }, + { + "datasource": { "type": "prometheus", "uid": "${Datasource}" }, + "editorMode": "code", + "expr": "aptos_storage_ledger_version{chain_name=~\"$chain_name\", cluster=~\"$cluster\", metrics_source=~\"$metrics_source\", namespace=~\"$namespace\", kubernetes_pod_name=~\"$kubernetes_pod_name\", role=~\"$role\"}", + "hide": false, + "legendFormat": "ver", + "range": true, + "refId": "G" + } + ], + "title": "latest version, epoch, pruner versions", + "type": "timeseries" + }, + { + "datasource": { "type": "prometheus", "uid": "${Datasource}" }, + "fieldConfig": { + "defaults": { + "color": { "mode": "palette-classic" }, + "custom": { + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { "legend": false, "tooltip": false, "viz": false }, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { "type": "linear" }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { "group": "A", "mode": "none" }, + "thresholdsStyle": { "mode": "off" } }, - "gridPos": { "h": 9, "w": 12, "x": 0, "y": 88 }, - "id": 1199, - "options": { - "legend": { "calcs": [], "displayMode": "table", "placement": "right", "showLegend": false }, - "tooltip": { "mode": "multi", "sort": "none" } + "mappings": [], + "thresholds": { "mode": "absolute", "steps": [{ "color": "green" }, { "color": "red", "value": 80 }] }, + "unit": "percentunit" + }, + "overrides": [] + }, + "gridPos": { "h": 9, "w": 12, "x": 0, "y": 98 }, + "id": 3274, + "options": { + "legend": { "calcs": [], "displayMode": "table", "placement": "right", "showLegend": false }, + "tooltip": { "mode": "single", "sort": "none" } + }, + "targets": [ + { + "datasource": { "type": "prometheus", "uid": "${Datasource}" }, + "editorMode": "code", + "expr": "rate(node_linux_disk_total_io_time_ms{metrics_source=~\"$metrics_source\", namespace=~\"$namespace\", chain_name=~\"$chain_name\", cluster=~\"$cluster\", role=~\"$role\", kubernetes_pod_name=~\"$kubernetes_pod_name\"}[1m]) / 1000", + "legendFormat": "{{kubernetes_pod_name}} {{name}}", + "range": true, + "refId": "A" + } + ], + "title": "Time spent in disk IO", + "type": "timeseries" + }, + { + "datasource": { "type": "prometheus", "uid": "${Datasource}" }, + "description": "This reflects the IO performance of the underlying device.\n\nExpect single or double digits of microseconds.", + "fieldConfig": { + "defaults": { + "color": { "mode": "palette-classic" }, + "custom": { + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { "legend": false, "tooltip": false, "viz": false }, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { "type": "linear" }, + "showPoints": "never", + "spanNulls": false, + "stacking": { "group": "A", "mode": "none" }, + "thresholdsStyle": { "mode": "off" } }, - "pluginVersion": "9.1.1", - "targets": [ - { - "datasource": { "type": "prometheus", "uid": "${Datasource}" }, - "editorMode": "code", - "expr": "sum by (kubernetes_pod_name, run_uuid) (irate(aptos_schemadb_get_latency_seconds_sum{chain_name=~\"$chain_name\", cluster=~\"$cluster\", metrics_source=~\"$metrics_source\", namespace=~\"$namespace\", kubernetes_pod_name=~\"$kubernetes_pod_name\", role=~\"$role\"}[1m])) / sum by (kubernetes_pod_name, run_uuid) (irate(aptos_schemadb_get_latency_seconds_count[1m]))", - "interval": "", - "legendFormat": "{{kubernetes_pod_name}}", - "range": true, - "refId": "A" - } - ], - "title": "SchemaDB get() latency", - "type": "timeseries" + "mappings": [], + "thresholds": { "mode": "absolute", "steps": [{ "color": "green" }, { "color": "red", "value": 80 }] }, + "unit": "s" + }, + "overrides": [] + }, + "gridPos": { "h": 9, "w": 12, "x": 0, "y": 107 }, + "id": 1199, + "options": { + "legend": { "calcs": [], "displayMode": "table", "placement": "right", "showLegend": false }, + "tooltip": { "mode": "multi", "sort": "none" } + }, + "pluginVersion": "9.1.1", + "targets": [ + { + "datasource": { "type": "prometheus", "uid": "${Datasource}" }, + "editorMode": "code", + "expr": "sum by (kubernetes_pod_name, run_uuid) (irate(aptos_schemadb_get_latency_seconds_sum{chain_name=~\"$chain_name\", cluster=~\"$cluster\", metrics_source=~\"$metrics_source\", namespace=~\"$namespace\", kubernetes_pod_name=~\"$kubernetes_pod_name\", role=~\"$role\"}[1m])) / sum by (kubernetes_pod_name, run_uuid) (irate(aptos_schemadb_get_latency_seconds_count[1m]))", + "interval": "", + "legendFormat": "{{kubernetes_pod_name}}", + "range": true, + "refId": "A" } ], - "title": "Performance", - "type": "row" + "title": "SchemaDB get() latency", + "type": "timeseries" }, { "collapsed": true, - "gridPos": { "h": 1, "w": 24, "x": 0, "y": 22 }, + "gridPos": { "h": 1, "w": 24, "x": 0, "y": 116 }, "id": 3294, "panels": [ { @@ -2296,9 +2374,8 @@ "type": "row" } ], - "refresh": "", + "refresh": false, "schemaVersion": 38, - "style": "dark", "tags": ["aptos-core"], "templating": { "list": [ @@ -2580,6 +2657,6 @@ "timezone": "", "title": "storage-overview", "uid": "ptUp6Vn4k", - "version": 6, + "version": 7, "weekStart": "" } diff --git a/dashboards/storage-overview.json.gz b/dashboards/storage-overview.json.gz index 95b9845ebb2d5..8e1fff49cb6fa 100644 Binary files a/dashboards/storage-overview.json.gz and b/dashboards/storage-overview.json.gz differ diff --git a/developer-docs-site/docs/concepts/index.md b/developer-docs-site/docs/concepts/index.md index 5b0c3157a336d..6dbe58bf92bd3 100644 --- a/developer-docs-site/docs/concepts/index.md +++ b/developer-docs-site/docs/concepts/index.md @@ -13,6 +13,7 @@ Start here to get into the core concepts of the Aptos blockchain. Then review ou - ### [Events](./events.md) - ### [Transactions and States](./txns-states.md) - ### [Gas and Transaction Fees](./gas-txn-fee.md) +- ### [Computing Transaction Gas](./base-gas.md) - ### [Blocks](./blocks.md) - ### [Staking](./staking.md) - ### [Governance](./governance.md) diff --git a/developer-docs-site/docs/guides/state-sync.md b/developer-docs-site/docs/guides/state-sync.md index af1ee0069d08c..a6b6a4a62020e 100644 --- a/developer-docs-site/docs/guides/state-sync.md +++ b/developer-docs-site/docs/guides/state-sync.md @@ -124,7 +124,7 @@ node configuration file: state_sync: state_sync_driver: bootstrapping_mode: DownloadLatestStates - continuous_syncing_mode: ApplyTransactionOutputs + continuous_syncing_mode: ExecuteTransactionsOrApplyOutputs ``` While your node is syncing, you'll be able to see the diff --git a/developer-docs-site/docs/indexer/txn-stream/local-development.md b/developer-docs-site/docs/indexer/txn-stream/local-development.md index db2014e04ca37..99808936290a7 100644 --- a/developer-docs-site/docs/indexer/txn-stream/local-development.md +++ b/developer-docs-site/docs/indexer/txn-stream/local-development.md @@ -31,6 +31,7 @@ In order to use the local development script you must have the following install docker-compose version --short ``` - grpcurl: [Installation Guide](https://github.com/fullstorydev/grpcurl#installation) +- OpenSSL ## Preparation Clone the aptos-core repo: @@ -108,3 +109,11 @@ Try setting the following environment variable before running the script: ```bash export REDIS_IMAGE_REPO=arm64v8/redis ``` + +### Cache worker is crashlooping or `Redis latest version update failed.` in log +Wipe the data: +```bash +poetry run python indexer_grpc_local.py wipe +``` + +This means historical data will be lost. diff --git a/developer-docs-site/docs/move/book/variables.md b/developer-docs-site/docs/move/book/variables.md index 4791dbc1ef928..0a8ceded383f4 100644 --- a/developer-docs-site/docs/move/book/variables.md +++ b/developer-docs-site/docs/move/book/variables.md @@ -714,25 +714,33 @@ before it is assigned a value. As mentioned above, the Move compiler will infer a `copy` or `move` if one is not indicated. The algorithm for doing so is quite simple: -- Any scalar value with the `copy` [ability](./abilities.md) is given a `copy`. +- Any value with the `copy` [ability](./abilities.md) is given a `copy`. - Any reference (both mutable `&mut` and immutable `&`) is given a `copy`. - Except under special circumstances where it is made a `move` for predictable borrow checker errors. - Any other value is given a `move`. - - This means that even though other values might be have the `copy` [ability](./abilities.md), it - must be done _explicitly_ by the programmer. - - This is to prevent accidental copies of large data structures. +- If the compiler can prove that the source value with copy ability is not used after the + assignment, then a move may be used instead of a copy for performance, but this will be invisible + to the programmer (except in possible decreased time or gas cost). For example: ```move +struct Foo { + f: u64 +} + +struct Coin has copy { + value: u64 +} + let s = b"hello"; let foo = Foo { f: 0 }; let coin = Coin { value: 0 }; -let s2 = s; // move +let s2 = s; // copy let foo2 = foo; // move -let coin2 = coin; // move +let coin2 = coin; // copy let x = 0; let b = false; diff --git a/developer-docs-site/docs/move/book/vector.md b/developer-docs-site/docs/move/book/vector.md index 6b99820bb5af5..95d7474262cf9 100644 --- a/developer-docs-site/docs/move/book/vector.md +++ b/developer-docs-site/docs/move/book/vector.md @@ -158,21 +158,11 @@ fun destroy_droppable_vector(vec: vector) { ``` Similarly, vectors cannot be copied unless the element type has `copy`. In other words, a -`vector` has `copy` if and only if `T` has `copy`. However, even copyable vectors are never -implicitly copied: - -```move -let x = vector::singleton(10); -let y = copy x; // compiler error without the copy! -``` - -Copies of large vectors can be expensive, so the compiler requires explicit `copy`'s to make it -easier to see where they are happening. +`vector` has `copy` if and only if `T` has `copy`. For more details see the sections on [type abilities](./abilities.md) and [generics](./generics.md). ## Ownership As mentioned [above](#destroying-and-copying-vectors), `vector` values can be copied only if the -elements can be copied. In that case, the copy must be explicit via a -[`copy`](./variables.md#move-and-copy) or a [dereference `*`](./references.md#reading-and-writing-through-references). +elements can be copied. \ No newline at end of file diff --git a/developer-docs-site/docs/tools/aptos-cli/use-cli/use-aptos-cli.md b/developer-docs-site/docs/tools/aptos-cli/use-cli/use-aptos-cli.md index 3eb8aec382c2c..e462167fad702 100644 --- a/developer-docs-site/docs/tools/aptos-cli/use-cli/use-aptos-cli.md +++ b/developer-docs-site/docs/tools/aptos-cli/use-cli/use-aptos-cli.md @@ -91,8 +91,6 @@ SUBCOMMANDS: Run a Move script test Runs Move unit tests for a package - transactional-test - Run Move transactional tests verify-package Downloads a package and verifies the bytecode view diff --git a/developer-docs-site/scripts/additional_dict.txt b/developer-docs-site/scripts/additional_dict.txt index 1923cc42c49d9..b9032ea48ec7e 100644 --- a/developer-docs-site/scripts/additional_dict.txt +++ b/developer-docs-site/scripts/additional_dict.txt @@ -224,6 +224,7 @@ Octa Octas Ohlone OpenAPI +OpenSSL OptionalAggregator PFN PFNs diff --git a/docker/builder/docker-bake-rust-all.hcl b/docker/builder/docker-bake-rust-all.hcl index 10d0e78fd972b..0ad73763c649d 100644 --- a/docker/builder/docker-bake-rust-all.hcl +++ b/docker/builder/docker-bake-rust-all.hcl @@ -69,7 +69,7 @@ group "forge-images" { target "debian-base" { dockerfile = "docker/builder/debian-base.Dockerfile" contexts = { - debian = "docker-image://debian:bullseye@sha256:7ac88cb3b95d347e89126a46696374fab97153b63d25995a5c6e75b5e98a0c79" + debian = "docker-image://debian:bullseye@sha256:eeb0fbb4bbde2b41055caeb458c43f0f4fbe33d01500360e0e6cbd69fb21ea05" } } diff --git a/docker/builder/indexer-grpc.Dockerfile b/docker/builder/indexer-grpc.Dockerfile index 8617d3c6a603a..7ae3904112fed 100644 --- a/docker/builder/indexer-grpc.Dockerfile +++ b/docker/builder/indexer-grpc.Dockerfile @@ -22,9 +22,9 @@ COPY --link --from=tools-builder /aptos/dist/aptos-indexer-grpc-post-processor / # The health check port EXPOSE 8080 # The gRPC non-TLS port -EXPOSE 50051 -# The gRPC TLS port EXPOSE 50052 +# The gRPC TLS port +EXPOSE 50053 ENV RUST_LOG_FORMAT=json diff --git a/docker/compose/indexer-grpc/cache-worker-config.yaml b/docker/compose/indexer-grpc/cache-worker-config.yaml index df7748a589529..66ea082bc99ad 100644 --- a/docker/compose/indexer-grpc/cache-worker-config.yaml +++ b/docker/compose/indexer-grpc/cache-worker-config.yaml @@ -1,8 +1,8 @@ health_check_port: 8082 server_config: - fullnode_grpc_address: 172.16.1.10:50051 + fullnode_grpc_address: http://172.16.1.10:50051 file_store_config: file_store_type: LocalFileStore local_file_store_path: /opt/aptos/file-store - redis_main_instance_address: 172.16.1.12:6379 # use the primary + redis_main_instance_address: redis://172.16.1.12:6379 # use the primary diff --git a/docker/compose/indexer-grpc/data-service-config.yaml b/docker/compose/indexer-grpc/data-service-config.yaml index fbe74424e3319..c791ec5c5d4d1 100644 --- a/docker/compose/indexer-grpc/data-service-config.yaml +++ b/docker/compose/indexer-grpc/data-service-config.yaml @@ -3,8 +3,12 @@ health_check_port: 8084 server_config: data_service_grpc_non_tls_config: data_service_grpc_listen_address: 0.0.0.0:50052 - whitelisted_auth_tokens: ["dummy_token"] + data_service_grpc_tls_config: + data_service_grpc_listen_address: 0.0.0.0:50053 + cert_path: /opt/aptos/certs/data-service-grpc-server.crt + key_path: /opt/aptos/certs/data-service-grpc-server.key + whitelisted_auth_tokens: [] file_store_config: file_store_type: LocalFileStore local_file_store_path: /opt/aptos/file-store - redis_read_replica_address: 172.16.1.22:6379 # use a the read replica + redis_read_replica_address: redis://172.16.1.22:6379 # use a the read replica diff --git a/docker/compose/indexer-grpc/docker-compose.yaml b/docker/compose/indexer-grpc/docker-compose.yaml index 9c60f53cbbfc4..5e2cb22d233e7 100644 --- a/docker/compose/indexer-grpc/docker-compose.yaml +++ b/docker/compose/indexer-grpc/docker-compose.yaml @@ -14,7 +14,8 @@ version: "3.8" services: redis: - image: ${REDIS_IMAGE_REPO:-redis}:6.2 + image: ${REDIS_IMAGE_REPO:-redis}:7.2 + command: redis-server --appendonly yes networks: shared: ipv4_address: 172.16.1.12 @@ -25,11 +26,11 @@ services: - 6379:6379 redis-replica: - image: ${REDIS_IMAGE_REPO:-redis}:6.2 + image: ${REDIS_IMAGE_REPO:-redis}:7.2 command: redis-server --replicaof redis 6379 networks: shared: - ipv4_address: 172.16.1.22 + ipv4_address: 172.16.1.22 restart: unless-stopped expose: - 6379 @@ -37,10 +38,10 @@ services: - redis indexer-grpc-cache-worker: - image: "${INDEXER_GRPC_IMAGE_REPO:-aptoslabs/indexer-grpc}:${IMAGE_TAG:-devnet}" + image: "${INDEXER_GRPC_IMAGE_REPO:-aptoslabs/indexer-grpc}:${IMAGE_TAG:-main}" networks: shared: - ipv4_address: 172.16.1.13 + ipv4_address: 172.16.1.13 restart: unless-stopped volumes: - type: volume # XXX: needed now before refactor https://github.com/aptos-labs/aptos-core/pull/8139 @@ -57,10 +58,10 @@ services: - redis indexer-grpc-file-store: - image: "${INDEXER_GRPC_IMAGE_REPO:-aptoslabs/indexer-grpc}:${IMAGE_TAG:-devnet}" + image: "${INDEXER_GRPC_IMAGE_REPO:-aptoslabs/indexer-grpc}:${IMAGE_TAG:-main}" networks: shared: - ipv4_address: 172.16.1.14 + ipv4_address: 172.16.1.14 restart: unless-stopped volumes: - type: volume @@ -77,10 +78,10 @@ services: - indexer-grpc-cache-worker indexer-grpc-data-service: - image: "${INDEXER_GRPC_IMAGE_REPO:-aptoslabs/indexer-grpc}:${IMAGE_TAG:-devnet}" + image: "${INDEXER_GRPC_IMAGE_REPO:-aptoslabs/indexer-grpc}:${IMAGE_TAG:-main}" networks: shared: - ipv4_address: 172.16.1.15 + ipv4_address: 172.16.1.15 restart: unless-stopped volumes: - type: volume # XXX: needed now before refactor https://github.com/aptos-labs/aptos-core/pull/8139 @@ -89,12 +90,19 @@ services: - type: bind source: ./data-service-config.yaml target: /opt/aptos/data-service-config.yaml + - type: bind + source: ./data-service-grpc-server.key + target: /opt/aptos/certs/data-service-grpc-server.key + - type: bind + source: ./data-service-grpc-server.crt + target: /opt/aptos/certs/data-service-grpc-server.crt command: - '/usr/local/bin/aptos-indexer-grpc-data-service' - '--config-path' - '/opt/aptos/data-service-config.yaml' ports: - - "50052:50052" # GRPC + - "50052:50052" # GRPC non-secure + - "50053:50053" # GRPC secure - "18084:8084" # health depends_on: - indexer-grpc-cache-worker diff --git a/docker/compose/indexer-grpc/file-store-config.yaml b/docker/compose/indexer-grpc/file-store-config.yaml index b71b4c1f6fd28..ceb94a19fbafc 100644 --- a/docker/compose/indexer-grpc/file-store-config.yaml +++ b/docker/compose/indexer-grpc/file-store-config.yaml @@ -1,7 +1,7 @@ health_check_port: 8083 server_config: - redis_main_instance_address: 172.16.1.12:6379 + redis_main_instance_address: redis://172.16.1.12:6379 file_store_config: file_store_type: LocalFileStore local_file_store_path: /opt/aptos/file-store diff --git a/ecosystem/indexer-grpc/indexer-grpc-cache-worker/Cargo.toml b/ecosystem/indexer-grpc/indexer-grpc-cache-worker/Cargo.toml index 2b3ca437063b8..91074b24a95d6 100644 --- a/ecosystem/indexer-grpc/indexer-grpc-cache-worker/Cargo.toml +++ b/ecosystem/indexer-grpc/indexer-grpc-cache-worker/Cargo.toml @@ -35,6 +35,7 @@ serde_yaml = { workspace = true } tokio = { workspace = true } tonic = { workspace = true } tracing = { workspace = true } +url = { workspace = true } [dev-dependencies] aptos-config = { workspace = true } diff --git a/ecosystem/indexer-grpc/indexer-grpc-cache-worker/src/lib.rs b/ecosystem/indexer-grpc/indexer-grpc-cache-worker/src/lib.rs index 55820d580a76d..3bdb9f18882d7 100644 --- a/ecosystem/indexer-grpc/indexer-grpc-cache-worker/src/lib.rs +++ b/ecosystem/indexer-grpc/indexer-grpc-cache-worker/src/lib.rs @@ -4,18 +4,19 @@ pub mod metrics; pub mod worker; -use anyhow::{Ok, Result}; +use anyhow::{Context, Result}; use aptos_indexer_grpc_server_framework::RunnableConfig; -use aptos_indexer_grpc_utils::config::IndexerGrpcFileStoreConfig; +use aptos_indexer_grpc_utils::{config::IndexerGrpcFileStoreConfig, types::RedisUrl}; use serde::{Deserialize, Serialize}; +use url::Url; use worker::Worker; #[derive(Clone, Debug, Deserialize, Serialize)] #[serde(deny_unknown_fields)] pub struct IndexerGrpcCacheWorkerConfig { - pub fullnode_grpc_address: String, + pub fullnode_grpc_address: Url, pub file_store_config: IndexerGrpcFileStoreConfig, - pub redis_main_instance_address: String, + pub redis_main_instance_address: RedisUrl, } #[async_trait::async_trait] @@ -26,12 +27,13 @@ impl RunnableConfig for IndexerGrpcCacheWorkerConfig { self.redis_main_instance_address.clone(), self.file_store_config.clone(), ) - .await; - worker.run().await; + .await + .context("Failed to create cache worker")?; + worker.run().await?; Ok(()) } fn get_server_name(&self) -> String { - "idxcache".to_string() + "idxcachewrkr".to_string() } } diff --git a/ecosystem/indexer-grpc/indexer-grpc-cache-worker/src/worker.rs b/ecosystem/indexer-grpc/indexer-grpc-cache-worker/src/worker.rs index dcdaec4be2911..f1ac225403438 100644 --- a/ecosystem/indexer-grpc/indexer-grpc-cache-worker/src/worker.rs +++ b/ecosystem/indexer-grpc/indexer-grpc-cache-worker/src/worker.rs @@ -5,6 +5,7 @@ use crate::metrics::{ ERROR_COUNT, LATEST_PROCESSED_VERSION, PROCESSED_BATCH_SIZE, PROCESSED_LATENCY_IN_SECS, PROCESSED_VERSIONS_COUNT, }; +use anyhow::{bail, Context, Result}; use aptos_indexer_grpc_utils::{ cache_operator::CacheOperator, config::IndexerGrpcFileStoreConfig, @@ -13,6 +14,7 @@ use aptos_indexer_grpc_utils::{ FileStoreMetadata, FileStoreOperator, GcsFileStoreOperator, LocalFileStoreOperator, }, time_diff_since_pb_timestamp_in_secs, + types::RedisUrl, }; use aptos_moving_average::MovingAverage; use aptos_protos::internal::fullnode::v1::{ @@ -22,6 +24,7 @@ use aptos_protos::internal::fullnode::v1::{ use futures::{self, StreamExt}; use prost::Message; use tracing::{error, info}; +use url::Url; type ChainID = u32; type StartingVersion = u64; @@ -30,7 +33,7 @@ pub struct Worker { /// Redis client. redis_client: redis::Client, /// Fullnode grpc address. - fullnode_grpc_address: String, + fullnode_grpc_address: Url, /// File store config file_store: IndexerGrpcFileStoreConfig, } @@ -58,17 +61,22 @@ pub(crate) enum GrpcDataStatus { impl Worker { pub async fn new( - fullnode_grpc_address: String, - redis_main_instance_address: String, + fullnode_grpc_address: Url, + redis_main_instance_address: RedisUrl, file_store: IndexerGrpcFileStoreConfig, - ) -> Self { - let redis_client = redis::Client::open(format!("redis://{}", redis_main_instance_address)) - .expect("Create redis client failed."); - Self { + ) -> Result { + let redis_client = redis::Client::open(redis_main_instance_address.0.clone()) + .with_context(|| { + format!( + "Failed to create redis client for {}", + redis_main_instance_address + ) + })?; + Ok(Self { redis_client, file_store, - fullnode_grpc_address: format!("http://{}", fullnode_grpc_address), - } + fullnode_grpc_address, + }) } /// The main loop of the worker is: @@ -79,14 +87,15 @@ impl Worker { /// * If metadata is not present and cache is not empty, crash. /// * If metadata is present, start from file store version. /// 4. Process the streaming response. - pub async fn run(&mut self) { + // TODO: Use the ! return type when it is stable. + pub async fn run(&mut self) -> Result<()> { // Re-connect if lost. loop { let conn = self .redis_client .get_tokio_connection_manager() .await - .expect("Get redis connection failed."); + .context("Get redis connection failed.")?; let mut rpc_client = create_grpc_client(self.fullnode_grpc_address.clone()).await; // 1. Fetch metadata. @@ -121,10 +130,15 @@ impl Worker { let response = rpc_client .get_transactions_from_node(request) .await - .unwrap(); + .with_context(|| { + format!( + "Failed to get transactions from node at starting version {}", + starting_version + ) + })?; // 3&4. Infinite streaming until error happens. Either stream ends or worker crashes. - process_streaming_response(conn, file_store_metadata, response.into_inner()).await; + process_streaming_response(conn, file_store_metadata, response.into_inner()).await?; } } } @@ -132,10 +146,10 @@ impl Worker { async fn process_transactions_from_node_response( response: TransactionsFromNodeResponse, cache_operator: &mut CacheOperator, -) -> anyhow::Result { +) -> Result { match response.response.unwrap() { Response::Status(status) => { - match StatusType::from_i32(status.r#type).expect("[Indexer Cache] Invalid status type.") + match StatusType::try_from(status.r#type).expect("[Indexer Cache] Invalid status type.") { StatusType::Init => Ok(GrpcDataStatus::StreamInit(status.start_version)), StatusType::BatchEnd => { @@ -155,9 +169,12 @@ async fn process_transactions_from_node_response( }, Response::Data(data) => { let transaction_len = data.transactions.len(); - let start_version = data.transactions.first().unwrap().version; - let first_transaction_pb_timestamp = - data.transactions.first().unwrap().timestamp.clone(); + let first_transaction = data + .transactions + .first() + .context("There were unexpectedly no transactions in the response")?; + let start_version = first_transaction.version; + let first_transaction_pb_timestamp = first_transaction.timestamp.clone(); let transactions = data .transactions .into_iter() @@ -168,11 +185,11 @@ async fn process_transactions_from_node_response( }; let mut encoded_proto_data = vec![]; tx.encode(&mut encoded_proto_data) - .expect("Encode transaction failed."); + .context("Encode transaction failed.")?; let base64_encoded_proto_data = base64::encode(encoded_proto_data); - (tx.version, base64_encoded_proto_data, timestamp_in_seconds) + Ok((tx.version, base64_encoded_proto_data, timestamp_in_seconds)) }) - .collect::>(); + .collect::>>()?; // Push to cache. match cache_operator.update_cache_transactions(transactions).await { @@ -181,7 +198,7 @@ async fn process_transactions_from_node_response( ERROR_COUNT .with_label_values(&["failed_to_update_cache_version"]) .inc(); - anyhow::bail!("Update cache with version failed: {}", e); + bail!("Update cache with version failed: {}", e); }, } if let Some(ref txn_time) = first_transaction_pb_timestamp { @@ -199,34 +216,34 @@ async fn process_transactions_from_node_response( async fn setup_cache_with_init_signal( conn: redis::aio::ConnectionManager, init_signal: TransactionsFromNodeResponse, -) -> ( +) -> Result<( CacheOperator, ChainID, StartingVersion, -) { +)> { let (fullnode_chain_id, starting_version) = - match init_signal.response.expect("Response type not exists.") { + match init_signal.response.expect("Response type does not exist.") { Response::Status(status_frame) => { - match StatusType::from_i32(status_frame.r#type).expect("Invalid status type.") { + match StatusType::try_from(status_frame.r#type).expect("Invalid status type.") { StatusType::Init => (init_signal.chain_id, status_frame.start_version), _ => { - panic!("[Indexer Cache] Streaming error: first frame is not INIT signal."); + bail!("[Indexer Cache] Streaming error: first frame is not INIT signal."); }, } }, _ => { - panic!("[Indexer Cache] Streaming error: first frame is not siganl frame."); + bail!("[Indexer Cache] Streaming error: first frame is not siganl frame."); }, }; let mut cache_operator = CacheOperator::new(conn); - cache_operator.cache_setup_if_needed().await; + cache_operator.cache_setup_if_needed().await?; cache_operator .update_or_verify_chain_id(fullnode_chain_id as u64) .await - .expect("[Indexer Cache] Chain id mismatch between cache and fullnode."); + .context("[Indexer Cache] Chain id mismatch between cache and fullnode.")?; - (cache_operator, fullnode_chain_id, starting_version) + Ok((cache_operator, fullnode_chain_id, starting_version)) } // Infinite streaming processing. Retry if error happens; crash if fatal. @@ -235,25 +252,27 @@ async fn process_streaming_response( file_store_metadata: Option, mut resp_stream: impl futures_core::Stream> + std::marker::Unpin, -) { +) -> Result<()> { let mut tps_calculator = MovingAverage::new(10_000); let mut transaction_count = 0; // 3. Set up the cache operator with init signal. let init_signal = match resp_stream.next().await { Some(Ok(r)) => r, _ => { - panic!("[Indexer Cache] Streaming error: no response."); + bail!("[Indexer Cache] Streaming error: no response."); }, }; let (mut cache_operator, fullnode_chain_id, starting_version) = - setup_cache_with_init_signal(conn, init_signal).await; + setup_cache_with_init_signal(conn, init_signal) + .await + .context("Failed to setup cache")?; // It's required to start the worker with the same version as file store. if let Some(file_store_metadata) = file_store_metadata { if file_store_metadata.version != starting_version { - panic!("[Indexer Cache] File store version mismatch with fullnode."); + bail!("[Indexer Cache] File store version mismatch with fullnode."); } if file_store_metadata.chain_id != fullnode_chain_id as u64 { - panic!("[Indexer Cache] Chain id mismatch between file store and fullnode."); + bail!("[Indexer Cache] Chain id mismatch between file store and fullnode."); } } let mut current_version = starting_version; @@ -323,7 +342,7 @@ async fn process_streaming_response( cache_operator .update_cache_latest_version(transaction_count, current_version) .await - .unwrap(); + .context("Failed to update the latest version in the cache")?; transaction_count = 0; info!( current_version = current_version, @@ -343,4 +362,8 @@ async fn process_streaming_response( }, } } + + // It is expected that we get to this point, the upstream server disconnects + // clients after 5 minutes. + Ok(()) } diff --git a/ecosystem/indexer-grpc/indexer-grpc-data-service/src/config.rs b/ecosystem/indexer-grpc/indexer-grpc-data-service/src/config.rs new file mode 100644 index 0000000000000..4c3168dbfc13a --- /dev/null +++ b/ecosystem/indexer-grpc/indexer-grpc-data-service/src/config.rs @@ -0,0 +1,222 @@ +// Copyright © Aptos Foundation +// SPDX-License-Identifier: Apache-2.0 + +use crate::service::RawDataServerWrapper; +use anyhow::{bail, Result}; +use aptos_indexer_grpc_server_framework::RunnableConfig; +use aptos_indexer_grpc_utils::{config::IndexerGrpcFileStoreConfig, types::RedisUrl}; +use aptos_protos::{ + indexer::v1::FILE_DESCRIPTOR_SET as INDEXER_V1_FILE_DESCRIPTOR_SET, + transaction::v1::FILE_DESCRIPTOR_SET as TRANSACTION_V1_TESTING_FILE_DESCRIPTOR_SET, + util::timestamp::FILE_DESCRIPTOR_SET as UTIL_TIMESTAMP_FILE_DESCRIPTOR_SET, +}; +use serde::{Deserialize, Serialize}; +use std::{collections::HashSet, net::SocketAddr}; +use tonic::{ + codec::CompressionEncoding, + codegen::InterceptedService, + metadata::{Ascii, MetadataValue}, + transport::Server, + Request, Status, +}; + +pub const SERVER_NAME: &str = "idxdatasvc"; + +// Default max response channel size. +const DEFAULT_MAX_RESPONSE_CHANNEL_SIZE: usize = 3; + +// HTTP2 ping interval and timeout. +// This can help server to garbage collect dead connections. +// tonic server: https://docs.rs/tonic/latest/tonic/transport/server/struct.Server.html#method.http2_keepalive_interval +const HTTP2_PING_INTERVAL_DURATION: std::time::Duration = std::time::Duration::from_secs(60); +const HTTP2_PING_TIMEOUT_DURATION: std::time::Duration = std::time::Duration::from_secs(10); + +#[derive(Clone, Debug, Deserialize, Serialize)] +#[serde(deny_unknown_fields)] +pub struct TlsConfig { + /// The address for the TLS GRPC server to listen on. + pub data_service_grpc_listen_address: SocketAddr, + pub cert_path: String, + pub key_path: String, +} + +#[derive(Clone, Debug, Deserialize, Serialize)] +#[serde(deny_unknown_fields)] +pub struct NonTlsConfig { + /// The address for the TLS GRPC server to listen on. + pub data_service_grpc_listen_address: SocketAddr, +} + +#[derive(Clone, Debug, Deserialize, Serialize)] +#[serde(deny_unknown_fields)] +pub struct IndexerGrpcDataServiceConfig { + /// If given, we will run a server that uses TLS. + pub data_service_grpc_tls_config: Option, + /// If given, we will run a server that does not use TLS. + pub data_service_grpc_non_tls_config: Option, + /// The size of the response channel that response can be buffered. + #[serde(default = "IndexerGrpcDataServiceConfig::default_data_service_response_channel_size")] + pub data_service_response_channel_size: usize, + /// A list of auth tokens that are allowed to access the service. + pub whitelisted_auth_tokens: Vec, + /// If set, don't check for auth tokens. + #[serde(default)] + pub disable_auth_check: bool, + /// File store config. + pub file_store_config: IndexerGrpcFileStoreConfig, + /// Redis read replica address. + pub redis_read_replica_address: RedisUrl, +} + +impl IndexerGrpcDataServiceConfig { + pub fn new( + data_service_grpc_tls_config: Option, + data_service_grpc_non_tls_config: Option, + data_service_response_channel_size: Option, + whitelisted_auth_tokens: Vec, + disable_auth_check: bool, + file_store_config: IndexerGrpcFileStoreConfig, + redis_read_replica_address: RedisUrl, + ) -> Self { + Self { + data_service_grpc_tls_config, + data_service_grpc_non_tls_config, + data_service_response_channel_size: data_service_response_channel_size + .unwrap_or_else(Self::default_data_service_response_channel_size), + whitelisted_auth_tokens, + disable_auth_check, + file_store_config, + redis_read_replica_address, + } + } + + pub const fn default_data_service_response_channel_size() -> usize { + DEFAULT_MAX_RESPONSE_CHANNEL_SIZE + } +} + +#[async_trait::async_trait] +impl RunnableConfig for IndexerGrpcDataServiceConfig { + fn validate(&self) -> Result<()> { + if self.disable_auth_check && !self.whitelisted_auth_tokens.is_empty() { + bail!("disable_auth_check is set but whitelisted_auth_tokens is not empty"); + } + if !self.disable_auth_check && self.whitelisted_auth_tokens.is_empty() { + bail!("disable_auth_check is not set but whitelisted_auth_tokens is empty"); + } + if self.data_service_grpc_non_tls_config.is_none() + && self.data_service_grpc_tls_config.is_none() + { + bail!("At least one of data_service_grpc_non_tls_config and data_service_grpc_tls_config must be set"); + } + Ok(()) + } + + async fn run(&self) -> Result<()> { + let token_set = build_auth_token_set(self.whitelisted_auth_tokens.clone()); + let disable_auth_check = self.disable_auth_check; + let authentication_inceptor = + move |req: Request<()>| -> std::result::Result, Status> { + if disable_auth_check { + return std::result::Result::Ok(req); + } + let metadata = req.metadata(); + if let Some(token) = + metadata.get(aptos_indexer_grpc_utils::constants::GRPC_AUTH_TOKEN_HEADER) + { + if token_set.contains(token) { + std::result::Result::Ok(req) + } else { + Err(Status::unauthenticated("Invalid token")) + } + } else { + Err(Status::unauthenticated("Missing token")) + } + }; + let reflection_service = tonic_reflection::server::Builder::configure() + // Note: It is critical that the file descriptor set is registered for every + // file that the top level API proto depends on recursively. If you don't, + // compilation will still succeed but reflection will fail at runtime. + // + // TODO: Add a test for this / something in build.rs, this is a big footgun. + .register_encoded_file_descriptor_set(INDEXER_V1_FILE_DESCRIPTOR_SET) + .register_encoded_file_descriptor_set(TRANSACTION_V1_TESTING_FILE_DESCRIPTOR_SET) + .register_encoded_file_descriptor_set(UTIL_TIMESTAMP_FILE_DESCRIPTOR_SET) + .build() + .map_err(|e| anyhow::anyhow!("Failed to build reflection service: {}", e))?; + + // Add authentication interceptor. + let server = RawDataServerWrapper::new( + self.redis_read_replica_address.clone(), + self.file_store_config.clone(), + self.data_service_response_channel_size, + )?; + let svc = aptos_protos::indexer::v1::raw_data_server::RawDataServer::new(server) + .send_compressed(CompressionEncoding::Gzip) + .accept_compressed(CompressionEncoding::Gzip); + let svc_with_interceptor = InterceptedService::new(svc, authentication_inceptor); + + let svc_with_interceptor_clone = svc_with_interceptor.clone(); + let reflection_service_clone = reflection_service.clone(); + + let mut tasks = vec![]; + if let Some(config) = &self.data_service_grpc_non_tls_config { + let listen_address = config.data_service_grpc_listen_address; + tracing::info!( + grpc_address = listen_address.to_string().as_str(), + "[data service] starting gRPC server with non-TLS." + ); + tasks.push(tokio::spawn(async move { + Server::builder() + .http2_keepalive_interval(Some(HTTP2_PING_INTERVAL_DURATION)) + .http2_keepalive_timeout(Some(HTTP2_PING_TIMEOUT_DURATION)) + .add_service(svc_with_interceptor_clone) + .add_service(reflection_service_clone) + .serve(listen_address) + .await + .map_err(|e| anyhow::anyhow!(e)) + })); + } + if let Some(config) = &self.data_service_grpc_tls_config { + let listen_address = config.data_service_grpc_listen_address; + let cert = tokio::fs::read(config.cert_path.clone()).await?; + let key = tokio::fs::read(config.key_path.clone()).await?; + let identity = tonic::transport::Identity::from_pem(cert, key); + tracing::info!( + grpc_address = listen_address.to_string().as_str(), + "[Data Service] Starting gRPC server with TLS." + ); + tasks.push(tokio::spawn(async move { + Server::builder() + .http2_keepalive_interval(Some(HTTP2_PING_INTERVAL_DURATION)) + .http2_keepalive_timeout(Some(HTTP2_PING_TIMEOUT_DURATION)) + .tls_config(tonic::transport::ServerTlsConfig::new().identity(identity))? + .add_service(svc_with_interceptor) + .add_service(reflection_service) + .serve(listen_address) + .await + .map_err(|e| anyhow::anyhow!(e)) + })); + } + + if tasks.is_empty() { + return Err(anyhow::anyhow!("No grpc config provided")); + } + + futures::future::try_join_all(tasks).await?; + Ok(()) + } + + fn get_server_name(&self) -> String { + SERVER_NAME.to_string() + } +} + +/// Build a set of whitelisted auth tokens. Invalid tokens are ignored. +pub fn build_auth_token_set(whitelisted_auth_tokens: Vec) -> HashSet> { + whitelisted_auth_tokens + .into_iter() + .map(|token| token.parse::>()) + .filter_map(Result::ok) + .collect::>() +} diff --git a/ecosystem/indexer-grpc/indexer-grpc-data-service/src/lib.rs b/ecosystem/indexer-grpc/indexer-grpc-data-service/src/lib.rs index c4c2ee138f6a7..5e27df871b3ac 100644 --- a/ecosystem/indexer-grpc/indexer-grpc-data-service/src/lib.rs +++ b/ecosystem/indexer-grpc/indexer-grpc-data-service/src/lib.rs @@ -1,5 +1,8 @@ // Copyright © Aptos Foundation // SPDX-License-Identifier: Apache-2.0 +mod config; pub mod metrics; pub mod service; + +pub use config::{IndexerGrpcDataServiceConfig, NonTlsConfig, SERVER_NAME}; diff --git a/ecosystem/indexer-grpc/indexer-grpc-data-service/src/main.rs b/ecosystem/indexer-grpc/indexer-grpc-data-service/src/main.rs index e05859fe06e02..04f273c28d493 100644 --- a/ecosystem/indexer-grpc/indexer-grpc-data-service/src/main.rs +++ b/ecosystem/indexer-grpc/indexer-grpc-data-service/src/main.rs @@ -2,181 +2,9 @@ // SPDX-License-Identifier: Apache-2.0 use anyhow::Result; -use aptos_indexer_grpc_data_service::service::RawDataServerWrapper; -use aptos_indexer_grpc_server_framework::{RunnableConfig, ServerArgs}; -use aptos_indexer_grpc_utils::config::IndexerGrpcFileStoreConfig; -use aptos_protos::{ - indexer::v1::FILE_DESCRIPTOR_SET as INDEXER_V1_FILE_DESCRIPTOR_SET, - transaction::v1::FILE_DESCRIPTOR_SET as TRANSACTION_V1_TESTING_FILE_DESCRIPTOR_SET, - util::timestamp::FILE_DESCRIPTOR_SET as UTIL_TIMESTAMP_FILE_DESCRIPTOR_SET, -}; +use aptos_indexer_grpc_data_service::IndexerGrpcDataServiceConfig; +use aptos_indexer_grpc_server_framework::ServerArgs; use clap::Parser; -use serde::{Deserialize, Serialize}; -use std::{collections::HashSet, net::ToSocketAddrs}; -use tonic::{ - codec::CompressionEncoding, - codegen::InterceptedService, - metadata::{Ascii, MetadataValue}, - transport::Server, - Request, Status, -}; - -// HTTP2 ping interval and timeout. -// This can help server to garbage collect dead connections. -// tonic server: https://docs.rs/tonic/latest/tonic/transport/server/struct.Server.html#method.http2_keepalive_interval -const HTTP2_PING_INTERVAL_DURATION: std::time::Duration = std::time::Duration::from_secs(60); -const HTTP2_PING_TIMEOUT_DURATION: std::time::Duration = std::time::Duration::from_secs(10); - -#[derive(Clone, Debug, Deserialize, Serialize)] -#[serde(deny_unknown_fields)] -pub struct TlsConfig { - // TLS config. - pub data_service_grpc_listen_address: String, - pub cert_path: String, - pub key_path: String, -} - -#[derive(Clone, Debug, Deserialize, Serialize)] -#[serde(deny_unknown_fields)] -pub struct NonTlsConfig { - pub data_service_grpc_listen_address: String, -} - -#[derive(Clone, Debug, Deserialize, Serialize)] -#[serde(deny_unknown_fields)] -pub struct IndexerGrpcDataServiceConfig { - // The address for TLS and non-TLS gRPC server to listen on. - pub data_service_grpc_tls_config: Option, - pub data_service_grpc_non_tls_config: Option, - // The size of the response channel that response can be buffered. - pub data_service_response_channel_size: Option, - // A list of auth tokens that are allowed to access the service. - pub whitelisted_auth_tokens: Vec, - // File store config. - pub file_store_config: IndexerGrpcFileStoreConfig, - // Redis read replica address. - pub redis_read_replica_address: String, -} - -#[async_trait::async_trait] -impl RunnableConfig for IndexerGrpcDataServiceConfig { - async fn run(&self) -> Result<()> { - let token_set = build_auth_token_set(self.whitelisted_auth_tokens.clone()); - let authentication_inceptor = - move |req: Request<()>| -> std::result::Result, Status> { - let metadata = req.metadata(); - if let Some(token) = - metadata.get(aptos_indexer_grpc_utils::constants::GRPC_AUTH_TOKEN_HEADER) - { - if token_set.contains(token) { - std::result::Result::Ok(req) - } else { - Err(Status::unauthenticated("Invalid token")) - } - } else { - Err(Status::unauthenticated("Missing token")) - } - }; - let reflection_service = tonic_reflection::server::Builder::configure() - // Note: It is critical that the file descriptor set is registered for every - // file that the top level API proto depends on recursively. If you don't, - // compilation will still succeed but reflection will fail at runtime. - // - // TODO: Add a test for this / something in build.rs, this is a big footgun. - .register_encoded_file_descriptor_set(INDEXER_V1_FILE_DESCRIPTOR_SET) - .register_encoded_file_descriptor_set(TRANSACTION_V1_TESTING_FILE_DESCRIPTOR_SET) - .register_encoded_file_descriptor_set(UTIL_TIMESTAMP_FILE_DESCRIPTOR_SET) - .build() - .map_err(|e| anyhow::anyhow!("Failed to build reflection service: {}", e))?; - - // Add authentication interceptor. - let server = RawDataServerWrapper::new( - self.redis_read_replica_address.clone(), - self.file_store_config.clone(), - self.data_service_response_channel_size, - ); - let svc = aptos_protos::indexer::v1::raw_data_server::RawDataServer::new(server) - .send_compressed(CompressionEncoding::Gzip) - .accept_compressed(CompressionEncoding::Gzip); - let svc_with_interceptor = InterceptedService::new(svc, authentication_inceptor); - - let svc_with_interceptor_clone = svc_with_interceptor.clone(); - let reflection_service_clone = reflection_service.clone(); - - let mut tasks = vec![]; - if self.data_service_grpc_non_tls_config.is_some() { - let config = self.data_service_grpc_non_tls_config.clone().unwrap(); - let grpc_address = config - .data_service_grpc_listen_address - .to_socket_addrs() - .map_err(|e| anyhow::anyhow!(e))? - .next() - .ok_or_else(|| anyhow::anyhow!("Failed to parse grpc address"))?; - tracing::info!( - grpc_address = grpc_address.to_string().as_str(), - "[Data Service] Starting gRPC server with non-TLS." - ); - tasks.push(tokio::spawn(async move { - Server::builder() - .http2_keepalive_interval(Some(HTTP2_PING_INTERVAL_DURATION)) - .http2_keepalive_timeout(Some(HTTP2_PING_TIMEOUT_DURATION)) - .add_service(svc_with_interceptor_clone) - .add_service(reflection_service_clone) - .serve(grpc_address) - .await - .map_err(|e| anyhow::anyhow!(e)) - })); - } - if self.data_service_grpc_tls_config.is_some() { - let config = self.data_service_grpc_tls_config.clone().unwrap(); - let grpc_address = config - .data_service_grpc_listen_address - .to_socket_addrs() - .map_err(|e| anyhow::anyhow!(e))? - .next() - .ok_or_else(|| anyhow::anyhow!("Failed to parse grpc address"))?; - - let cert = tokio::fs::read(config.cert_path.clone()).await?; - let key = tokio::fs::read(config.key_path.clone()).await?; - let identity = tonic::transport::Identity::from_pem(cert, key); - tracing::info!( - grpc_address = grpc_address.to_string().as_str(), - "[Data Service] Starting gRPC server with TLS." - ); - tasks.push(tokio::spawn(async move { - Server::builder() - .http2_keepalive_interval(Some(HTTP2_PING_INTERVAL_DURATION)) - .http2_keepalive_timeout(Some(HTTP2_PING_TIMEOUT_DURATION)) - .tls_config(tonic::transport::ServerTlsConfig::new().identity(identity))? - .add_service(svc_with_interceptor) - .add_service(reflection_service) - .serve(grpc_address) - .await - .map_err(|e| anyhow::anyhow!(e)) - })); - } - - if tasks.is_empty() { - return Err(anyhow::anyhow!("No grpc config provided")); - } - - futures::future::try_join_all(tasks).await?; - Ok(()) - } - - fn get_server_name(&self) -> String { - "idxdata".to_string() - } -} - -/// Build a set of whitelisted auth tokens. Invalid tokens are ignored. -pub fn build_auth_token_set(whitelisted_auth_tokens: Vec) -> HashSet> { - whitelisted_auth_tokens - .into_iter() - .map(|token| token.parse::>()) - .filter_map(Result::ok) - .collect::>() -} #[tokio::main] async fn main() -> Result<()> { diff --git a/ecosystem/indexer-grpc/indexer-grpc-data-service/src/service.rs b/ecosystem/indexer-grpc/indexer-grpc-data-service/src/service.rs index 7768b57da4b16..cbc7300755993 100644 --- a/ecosystem/indexer-grpc/indexer-grpc-data-service/src/service.rs +++ b/ecosystem/indexer-grpc/indexer-grpc-data-service/src/service.rs @@ -6,6 +6,7 @@ use crate::metrics::{ PROCESSED_LATENCY_IN_SECS, PROCESSED_LATENCY_IN_SECS_ALL, PROCESSED_VERSIONS_COUNT, SHORT_CONNECTION_COUNT, }; +use anyhow::Context; use aptos_indexer_grpc_utils::{ build_protobuf_encoded_transaction_wrappers, cache_operator::{CacheBatchGetStatus, CacheOperator}, @@ -15,7 +16,9 @@ use aptos_indexer_grpc_utils::{ BLOB_STORAGE_SIZE, GRPC_AUTH_TOKEN_HEADER, GRPC_REQUEST_NAME_HEADER, MESSAGE_SIZE_LIMIT, }, file_store_operator::{FileStoreOperator, GcsFileStoreOperator, LocalFileStoreOperator}, - time_diff_since_pb_timestamp_in_secs, EncodedTransactionWithVersion, + time_diff_since_pb_timestamp_in_secs, + types::RedisUrl, + EncodedTransactionWithVersion, }; use aptos_logger::prelude::{sample, SampleRate}; use aptos_moving_average::MovingAverage; @@ -49,9 +52,6 @@ const AHEAD_OF_CACHE_RETRY_SLEEP_DURATION_MS: u64 = 50; // TODO(larry): fix all errors treated as transient errors. const TRANSIENT_DATA_ERROR_RETRY_SLEEP_DURATION_MS: u64 = 1000; -// Default max response channel size. -const DEFAULT_MAX_RESPONSE_CHANNEL_SIZE: usize = 3; - // The server will retry to send the response to the client and give up after RESPONSE_CHANNEL_SEND_TIMEOUT. // This is to prevent the server from being occupied by a slow client. const RESPONSE_CHANNEL_SEND_TIMEOUT: Duration = Duration::from_secs(120); @@ -65,23 +65,24 @@ const REQUEST_HEADER_APTOS_API_KEY_NAME: &str = "x-aptos-api-key-name"; pub struct RawDataServerWrapper { pub redis_client: Arc, pub file_store_config: IndexerGrpcFileStoreConfig, - pub data_service_response_channel_size: Option, + pub data_service_response_channel_size: usize, } impl RawDataServerWrapper { pub fn new( - redis_address: String, + redis_address: RedisUrl, file_store_config: IndexerGrpcFileStoreConfig, - data_service_response_channel_size: Option, - ) -> Self { - Self { + data_service_response_channel_size: usize, + ) -> anyhow::Result { + Ok(Self { redis_client: Arc::new( - redis::Client::open(format!("redis://{}", redis_address)) - .expect("Create redis client failed."), + redis::Client::open(redis_address.0.clone()).with_context(|| { + format!("Failed to create redis client for {}", redis_address) + })?, ), file_store_config, data_service_response_channel_size, - } + }) } } @@ -123,10 +124,7 @@ impl RawData for RawDataServerWrapper { let transactions_count = request.transactions_count; // Response channel to stream the data to the client. - let (tx, rx) = channel( - self.data_service_response_channel_size - .unwrap_or(DEFAULT_MAX_RESPONSE_CHANNEL_SIZE), - ); + let (tx, rx) = channel(self.data_service_response_channel_size); let mut current_version = match &request.starting_version { Some(version) => *version, None => { diff --git a/ecosystem/indexer-grpc/indexer-grpc-file-store/src/lib.rs b/ecosystem/indexer-grpc/indexer-grpc-file-store/src/lib.rs index e44896e3f001d..e24cc1c010459 100644 --- a/ecosystem/indexer-grpc/indexer-grpc-file-store/src/lib.rs +++ b/ecosystem/indexer-grpc/indexer-grpc-file-store/src/lib.rs @@ -4,9 +4,9 @@ pub mod metrics; pub mod processor; -use anyhow::Result; +use anyhow::{Context, Result}; use aptos_indexer_grpc_server_framework::RunnableConfig; -use aptos_indexer_grpc_utils::config::IndexerGrpcFileStoreConfig; +use aptos_indexer_grpc_utils::{config::IndexerGrpcFileStoreConfig, types::RedisUrl}; use processor::Processor; use serde::{Deserialize, Serialize}; @@ -14,7 +14,7 @@ use serde::{Deserialize, Serialize}; #[serde(deny_unknown_fields)] pub struct IndexerGrpcFileStoreWorkerConfig { pub file_store_config: IndexerGrpcFileStoreConfig, - pub redis_main_instance_address: String, + pub redis_main_instance_address: RedisUrl, } #[async_trait::async_trait] @@ -23,12 +23,14 @@ impl RunnableConfig for IndexerGrpcFileStoreWorkerConfig { let mut processor = Processor::new( self.redis_main_instance_address.clone(), self.file_store_config.clone(), - ); - processor.run().await; - Ok(()) + ) + .await + .context("Failed to create processor for file store worker")?; + processor.run().await?; + Err(anyhow::anyhow!("File store processor exited unexpectedly")) } fn get_server_name(&self) -> String { - "idxfile".to_string() + "idxfilestore".to_string() } } diff --git a/ecosystem/indexer-grpc/indexer-grpc-file-store/src/processor.rs b/ecosystem/indexer-grpc/indexer-grpc-file-store/src/processor.rs index a66c9d19b0d8a..43504e40ed292 100644 --- a/ecosystem/indexer-grpc/indexer-grpc-file-store/src/processor.rs +++ b/ecosystem/indexer-grpc/indexer-grpc-file-store/src/processor.rs @@ -2,12 +2,14 @@ // SPDX-License-Identifier: Apache-2.0 use crate::metrics::{LATEST_PROCESSED_VERSION, PROCESSED_VERSIONS_COUNT}; +use anyhow::{bail, Context, Result}; use aptos_indexer_grpc_utils::{ build_protobuf_encoded_transaction_wrappers, cache_operator::{CacheBatchGetStatus, CacheOperator}, config::IndexerGrpcFileStoreConfig, constants::BLOB_STORAGE_SIZE, file_store_operator::{FileStoreOperator, GcsFileStoreOperator, LocalFileStoreOperator}, + types::RedisUrl, EncodedTransactionWithVersion, }; use aptos_moving_average::MovingAverage; @@ -19,43 +21,40 @@ const AHEAD_OF_CACHE_SLEEP_DURATION_IN_MILLIS: u64 = 100; /// Processor tails the data in cache and stores the data in file store. pub struct Processor { - cache_operator: Option>, - file_store_processor: Option>, - cache_chain_id: Option, - redis_main_instance_address: String, - file_store_config: IndexerGrpcFileStoreConfig, + cache_operator: CacheOperator, + file_store_operator: Box, + cache_chain_id: u64, } impl Processor { - pub fn new( - redis_main_instance_address: String, + pub async fn new( + redis_main_instance_address: RedisUrl, file_store_config: IndexerGrpcFileStoreConfig, - ) -> Self { - Self { - cache_operator: None, - file_store_processor: None, - cache_chain_id: None, - redis_main_instance_address, - file_store_config, - } - } - - /// Init the processor, including creating the redis connection and file store operator. - async fn init(&mut self) { + ) -> Result { // Connection to redis is a hard dependency for file store processor. - let conn = redis::Client::open(format!("redis://{}", self.redis_main_instance_address)) - .expect("Create redis client failed.") + let conn = redis::Client::open(redis_main_instance_address.0.clone()) + .with_context(|| { + format!( + "Create redis client for {} failed", + redis_main_instance_address.0 + ) + })? .get_tokio_connection_manager() .await - .expect("Create redis connection failed."); + .with_context(|| { + format!( + "Create redis connection to {} failed.", + redis_main_instance_address.0 + ) + })?; let mut cache_operator = CacheOperator::new(conn); - let chain_id = cache_operator + let cache_chain_id = cache_operator .get_chain_id() .await - .expect("Get chain id failed."); + .context("Get chain id failed.")?; - let file_store_operator: Box = match &self.file_store_config { + let file_store_operator: Box = match &file_store_config { IndexerGrpcFileStoreConfig::GcsFileStore(gcs_file_store) => { Box::new(GcsFileStoreOperator::new( gcs_file_store.gcs_file_store_bucket_name.clone(), @@ -70,24 +69,23 @@ impl Processor { }; file_store_operator.verify_storage_bucket_existence().await; - self.cache_operator = Some(cache_operator); - self.file_store_processor = Some(file_store_operator); - self.cache_chain_id = Some(chain_id); + Ok(Self { + cache_operator, + file_store_operator, + cache_chain_id, + }) } // Starts the processing. - pub async fn run(&mut self) { - self.init().await; - let cache_chain_id = self.cache_chain_id.unwrap(); + pub async fn run(&mut self) -> Result<()> { + let cache_chain_id = self.cache_chain_id; - // If file store and cache chain id don't match, panic. + // If file store and cache chain id don't match, return an error. let metadata = self - .file_store_processor - .as_mut() - .unwrap() + .file_store_operator .create_default_file_store_metadata_if_absent(cache_chain_id) .await - .unwrap(); + .context("Metadata did not match.")?; // This implements a two-cursor approach: // * One curosr is to track the current cache version. @@ -104,18 +102,16 @@ impl Processor { // 0. Data verfiication. // File store version has to be a multiple of BLOB_STORAGE_SIZE. if current_file_store_version % BLOB_STORAGE_SIZE as u64 != 0 { - panic!("File store version is not a multiple of BLOB_STORAGE_SIZE."); + bail!("File store version is not a multiple of BLOB_STORAGE_SIZE."); } let batch_get_result = self .cache_operator - .as_mut() - .unwrap() .batch_get_encoded_proto_data(current_cache_version) .await; let batch_get_result = - fullnode_grpc_status_handling(batch_get_result, current_cache_version); + fullnode_grpc_status_handling(batch_get_result, current_cache_version)?; let current_transactions = match batch_get_result { Some(transactions) => transactions, @@ -147,12 +143,10 @@ impl Processor { let process_size = transactions_buffer.len() / BLOB_STORAGE_SIZE * BLOB_STORAGE_SIZE; let current_batch = transactions_buffer.drain(..process_size).collect(); - self.file_store_processor - .as_mut() - .unwrap() + self.file_store_operator .upload_transactions(cache_chain_id, current_batch) .await - .unwrap(); + .context("Uploading transactions to file store failed.")?; PROCESSED_VERSIONS_COUNT.inc_by(process_size as u64); tps_calculator.tick_now(process_size as u64); info!( @@ -169,19 +163,19 @@ impl Processor { fn fullnode_grpc_status_handling( fullnode_rpc_status: anyhow::Result, batch_start_version: u64, -) -> Option> { +) -> Result>> { match fullnode_rpc_status { - Ok(CacheBatchGetStatus::Ok(encoded_transactions)) => Some( + Ok(CacheBatchGetStatus::Ok(encoded_transactions)) => Ok(Some( build_protobuf_encoded_transaction_wrappers(encoded_transactions, batch_start_version), - ), - Ok(CacheBatchGetStatus::NotReady) => None, + )), + Ok(CacheBatchGetStatus::NotReady) => Ok(None), Ok(CacheBatchGetStatus::EvictedFromCache) => { - panic!( - "[indexer file]Cache evicted from cache. For file store worker, this is not expected." + bail!( + "[indexer file] Cache evicted from cache. For file store worker, this is not expected." ); }, Err(err) => { - panic!("Batch get encoded proto data failed: {}", err); + bail!("Batch get encoded proto data failed: {}", err); }, } } @@ -195,25 +189,27 @@ mod tests { let fullnode_rpc_status: anyhow::Result = Ok(CacheBatchGetStatus::NotReady); let batch_start_version = 0; - assert!(fullnode_grpc_status_handling(fullnode_rpc_status, batch_start_version).is_none()); + assert!( + fullnode_grpc_status_handling(fullnode_rpc_status, batch_start_version) + .unwrap() + .is_none() + ); } #[test] - #[should_panic] fn verify_the_grpc_status_handling_evicted_from_cache() { let fullnode_rpc_status: anyhow::Result = Ok(CacheBatchGetStatus::EvictedFromCache); let batch_start_version = 0; - fullnode_grpc_status_handling(fullnode_rpc_status, batch_start_version); + assert!(fullnode_grpc_status_handling(fullnode_rpc_status, batch_start_version).is_err()); } #[test] - #[should_panic] fn verify_the_grpc_status_handling_error() { let fullnode_rpc_status: anyhow::Result = Err(anyhow::anyhow!("Error")); let batch_start_version = 0; - fullnode_grpc_status_handling(fullnode_rpc_status, batch_start_version); + assert!(fullnode_grpc_status_handling(fullnode_rpc_status, batch_start_version).is_err()); } #[test] @@ -228,7 +224,7 @@ mod tests { let fullnode_rpc_status: anyhow::Result = Ok(CacheBatchGetStatus::Ok(transactions)); let actual_transactions = - fullnode_grpc_status_handling(fullnode_rpc_status, batch_start_version); + fullnode_grpc_status_handling(fullnode_rpc_status, batch_start_version).unwrap(); assert!(actual_transactions.is_some()); let actual_transactions = actual_transactions.unwrap(); assert_eq!(actual_transactions, transactions_with_version); diff --git a/ecosystem/indexer-grpc/indexer-grpc-fullnode/src/fullnode_data_service.rs b/ecosystem/indexer-grpc/indexer-grpc-fullnode/src/fullnode_data_service.rs new file mode 100644 index 0000000000000..ac941b3cf0a1b --- /dev/null +++ b/ecosystem/indexer-grpc/indexer-grpc-fullnode/src/fullnode_data_service.rs @@ -0,0 +1,151 @@ +// Copyright © Aptos Foundation + +use crate::{stream_coordinator::IndexerStreamCoordinator, ServiceContext}; +use aptos_logger::{error, info}; +use aptos_moving_average::MovingAverage; +use aptos_protos::internal::fullnode::v1::{ + fullnode_data_server::FullnodeData, stream_status::StatusType, transactions_from_node_response, + GetTransactionsFromNodeRequest, StreamStatus, TransactionsFromNodeResponse, +}; +use futures::Stream; +use std::pin::Pin; +use tokio::sync::mpsc; +use tokio_stream::wrappers::ReceiverStream; +use tonic::{Request, Response, Status}; + +pub struct FullnodeDataService { + pub service_context: ServiceContext, +} + +type FullnodeResponseStream = + Pin> + Send>>; + +// Default Values +pub const DEFAULT_NUM_RETRIES: usize = 3; +pub const RETRY_TIME_MILLIS: u64 = 100; +const TRANSACTION_CHANNEL_SIZE: usize = 35; +const DEFAULT_EMIT_SIZE: usize = 1000; + +#[tonic::async_trait] +impl FullnodeData for FullnodeDataService { + type GetTransactionsFromNodeStream = FullnodeResponseStream; + + /// This function is required by the GRPC tonic server. It basically handles the request. + /// Given we want to persist the stream for better performance, our approach is that when + /// we receive a request, we will return a stream. Then as we process transactions, we + /// wrap those into a TransactionsResponse that we then push into the stream. + /// There are 2 types of TransactionsResponse: + /// Status - sends events back to the client, such as init stream and batch end + /// Transaction - sends encoded transactions lightly wrapped + async fn get_transactions_from_node( + &self, + req: Request, + ) -> Result, Status> { + // Gets configs for the stream, partly from the request and partly from the node config + let r = req.into_inner(); + let starting_version = r.starting_version.expect("Starting version must be set"); + let processor_task_count = self.service_context.processor_task_count; + let processor_batch_size = self.service_context.processor_batch_size; + let output_batch_size = self.service_context.output_batch_size; + + // Some node metadata + let context = self.service_context.context.clone(); + let ledger_chain_id = context.chain_id().id(); + + // Creates a channel to send the stream to the client + let (tx, rx) = mpsc::channel(TRANSACTION_CHANNEL_SIZE); + + // Creates a moving average to track tps + let mut ma = MovingAverage::new(10_000); + + // This is the main thread handling pushing to the stream + tokio::spawn(async move { + // Initialize the coordinator that tracks starting version and processes transactions + let mut coordinator = IndexerStreamCoordinator::new( + context, + starting_version, + processor_task_count, + processor_batch_size, + output_batch_size, + tx.clone(), + ); + // Sends init message (one time per request) to the client in the with chain id and starting version. Basically a handshake + let init_status = get_status(StatusType::Init, starting_version, None, ledger_chain_id); + match tx.send(Result::<_, Status>::Ok(init_status)).await { + Ok(_) => { + // TODO: Add request details later + info!("[indexer-grpc] Init connection"); + }, + Err(_) => { + panic!("[indexer-grpc] Unable to initialize stream"); + }, + } + let mut base: u64 = 0; + loop { + // Processes and sends batch of transactions to client + let results = coordinator.process_next_batch().await; + let max_version = match IndexerStreamCoordinator::get_max_batch_version(results) { + Ok(max_version) => max_version, + Err(e) => { + error!("[indexer-grpc] Error sending to stream: {}", e); + break; + }, + }; + // send end batch message (each batch) upon success of the entire batch + // client can use the start and end version to ensure that there are no gaps + // end loop if this message fails to send because otherwise the client can't validate + let batch_end_status = get_status( + StatusType::BatchEnd, + coordinator.current_version, + Some(max_version), + ledger_chain_id, + ); + match tx.send(Result::<_, Status>::Ok(batch_end_status)).await { + Ok(_) => { + // tps logging + let new_base: u64 = ma.sum() / (DEFAULT_EMIT_SIZE as u64); + ma.tick_now(max_version - coordinator.current_version + 1); + if base != new_base { + base = new_base; + + info!( + batch_start_version = coordinator.current_version, + batch_end_version = max_version, + versions_processed = ma.sum(), + tps = (ma.avg() * 1000.0) as u64, + "[indexer-grpc] Sent batch successfully" + ); + } + }, + Err(_) => { + aptos_logger::warn!("[indexer-grpc] Unable to send end batch status"); + break; + }, + } + coordinator.current_version = max_version + 1; + } + }); + let output_stream = ReceiverStream::new(rx); + Ok(Response::new( + Box::pin(output_stream) as Self::GetTransactionsFromNodeStream + )) + } +} + +pub fn get_status( + status_type: StatusType, + start_version: u64, + end_version: Option, + ledger_chain_id: u8, +) -> TransactionsFromNodeResponse { + TransactionsFromNodeResponse { + response: Some(transactions_from_node_response::Response::Status( + StreamStatus { + r#type: status_type as i32, + start_version, + end_version, + }, + )), + chain_id: ledger_chain_id as u32, + } +} diff --git a/ecosystem/indexer-grpc/indexer-grpc-fullnode/src/lib.rs b/ecosystem/indexer-grpc/indexer-grpc-fullnode/src/lib.rs index 376a73feea7f5..5d636c354a704 100644 --- a/ecosystem/indexer-grpc/indexer-grpc-fullnode/src/lib.rs +++ b/ecosystem/indexer-grpc/indexer-grpc-fullnode/src/lib.rs @@ -1,10 +1,23 @@ // Copyright © Aptos Foundation // SPDX-License-Identifier: Apache-2.0 +use aptos_api::context::Context; +use std::sync::Arc; + pub mod convert; pub mod counters; +pub mod fullnode_data_service; +pub mod localnet_data_service; pub mod runtime; pub mod stream_coordinator; +#[derive(Clone, Debug)] +pub struct ServiceContext { + pub context: Arc, + pub processor_task_count: u16, + pub processor_batch_size: u16, + pub output_batch_size: u16, +} + #[cfg(test)] pub(crate) mod tests; diff --git a/ecosystem/indexer-grpc/indexer-grpc-fullnode/src/localnet_data_service.rs b/ecosystem/indexer-grpc/indexer-grpc-fullnode/src/localnet_data_service.rs new file mode 100644 index 0000000000000..8e72f82a8f53b --- /dev/null +++ b/ecosystem/indexer-grpc/indexer-grpc-fullnode/src/localnet_data_service.rs @@ -0,0 +1,119 @@ +// Copyright © Aptos Foundation +// SPDX-License-Identifier: Apache-2.0 + +use crate::{stream_coordinator::IndexerStreamCoordinator, ServiceContext}; +use aptos_logger::error; +use aptos_protos::{ + indexer::v1::{raw_data_server::RawData, GetTransactionsRequest, TransactionsResponse}, + internal::fullnode::v1::transactions_from_node_response, +}; +use futures::Stream; +use std::pin::Pin; +use tokio::sync::mpsc; +use tokio_stream::wrappers::ReceiverStream; +use tonic::{Request, Response, Status}; + +// Default Values +pub const DEFAULT_NUM_RETRIES: usize = 3; +pub const RETRY_TIME_MILLIS: u64 = 100; +const TRANSACTION_CHANNEL_SIZE: usize = 35; + +type TransactionResponseStream = + Pin> + Send>>; + +pub struct LocalnetDataService { + pub service_context: ServiceContext, +} + +/// External service on the fullnode is for testing/local development only. +/// Performance is not optimized, e.g., single-threaded. +/// NOTE: code is duplicated from fullnode_data_service.rs with some minor changes. +#[tonic::async_trait] +impl RawData for LocalnetDataService { + type GetTransactionsStream = TransactionResponseStream; + + async fn get_transactions( + &self, + req: Request, + ) -> Result, Status> { + // Some node metadata + let context = self.service_context.context.clone(); + let r = req.into_inner(); + let starting_version = r.starting_version.expect("Starting version must be set"); + let processor_batch_size = self.service_context.processor_batch_size; + let output_batch_size = self.service_context.output_batch_size; + let ledger_chain_id = context.chain_id().id(); + let transactions_count = r.transactions_count; + // Creates a channel to send the stream to the client + let (tx, mut rx) = mpsc::channel(TRANSACTION_CHANNEL_SIZE); + let (external_service_tx, external_service_rx) = mpsc::channel(TRANSACTION_CHANNEL_SIZE); + + tokio::spawn(async move { + // Initialize the coordinator that tracks starting version and processes transactions + let mut coordinator = IndexerStreamCoordinator::new( + context, + starting_version, + // Performance is not important for raw data, and to make sure data is in order, + // single thread is used. + 1, + processor_batch_size, + output_batch_size, + tx.clone(), + ); + loop { + // Processes and sends batch of transactions to client + let results = coordinator.process_next_batch().await; + let max_version = match IndexerStreamCoordinator::get_max_batch_version(results) { + Ok(max_version) => max_version, + Err(e) => { + error!("[indexer-grpc] Error sending to stream: {}", e); + break; + }, + }; + coordinator.current_version = max_version + 1; + } + }); + tokio::spawn(async move { + let mut response_transactions_count = transactions_count; + while let Some(response) = rx.recv().await { + if let Some(count) = response_transactions_count.as_ref() { + if *count == 0 { + break; + } + } + + let response = response.map(|t| TransactionsResponse { + chain_id: Some(ledger_chain_id as u64), + transactions: match t.response.expect("Response must be set") { + transactions_from_node_response::Response::Data(transaction_output) => { + let mut transactions = transaction_output.transactions; + let current_transactions_count = transactions.len() as u64; + if let Some(count) = response_transactions_count.as_mut() { + transactions = + transactions.into_iter().take(*count as usize).collect(); + *count = count.saturating_sub(current_transactions_count); + } + transactions + }, + _ => panic!("Unexpected response type."), + }, + }); + match external_service_tx.send(response).await { + Ok(_) => {}, + Err(e) => { + aptos_logger::warn!( + "[indexer-grpc] Unable to send end batch status: {:?}", + e + ); + break; + }, + } + } + }); + + let output_stream = ReceiverStream::new(external_service_rx); + Ok(Response::new( + Box::pin(output_stream) as Self::GetTransactionsStream + )) + } +} diff --git a/ecosystem/indexer-grpc/indexer-grpc-fullnode/src/runtime.rs b/ecosystem/indexer-grpc/indexer-grpc-fullnode/src/runtime.rs index 09b744cf96e25..3044ef9c3f363 100644 --- a/ecosystem/indexer-grpc/indexer-grpc-fullnode/src/runtime.rs +++ b/ecosystem/indexer-grpc/indexer-grpc-fullnode/src/runtime.rs @@ -1,42 +1,27 @@ // Copyright © Aptos Foundation // SPDX-License-Identifier: Apache-2.0 -use crate::stream_coordinator::IndexerStreamCoordinator; +use crate::{ + fullnode_data_service::FullnodeDataService, localnet_data_service::LocalnetDataService, + ServiceContext, +}; use aptos_api::context::Context; use aptos_config::config::NodeConfig; -use aptos_logger::{error, info}; +use aptos_logger::info; use aptos_mempool::MempoolClientSender; -use aptos_moving_average::MovingAverage; -use aptos_protos::internal::fullnode::v1::{ - fullnode_data_server::{FullnodeData, FullnodeDataServer}, - stream_status::StatusType, - transactions_from_node_response, GetTransactionsFromNodeRequest, StreamStatus, - TransactionsFromNodeResponse, +use aptos_protos::{ + indexer::v1::raw_data_server::RawDataServer, + internal::fullnode::v1::fullnode_data_server::FullnodeDataServer, }; use aptos_storage_interface::DbReader; use aptos_types::chain_id::ChainId; -use futures::Stream; -use std::{net::ToSocketAddrs, pin::Pin, sync::Arc}; -use tokio::{runtime::Runtime, sync::mpsc}; -use tokio_stream::wrappers::ReceiverStream; -use tonic::{transport::Server, Request, Response, Status}; +use std::{net::ToSocketAddrs, sync::Arc}; +use tokio::runtime::Runtime; +use tonic::{codec::CompressionEncoding, transport::Server}; // Default Values pub const DEFAULT_NUM_RETRIES: usize = 3; pub const RETRY_TIME_MILLIS: u64 = 100; -const TRANSACTION_CHANNEL_SIZE: usize = 35; -const DEFAULT_EMIT_SIZE: usize = 1000; - -type ResponseStream = - Pin> + Send>>; - -// The GRPC server -pub struct FullnodeDataService { - pub context: Arc, - pub processor_task_count: u16, - pub processor_batch_size: u16, - pub output_batch_size: u16, -} /// Creates a runtime which creates a thread pool which sets up the grpc streaming service /// Returns corresponding Tokio runtime @@ -54,26 +39,40 @@ pub fn bootstrap( let node_config = config.clone(); - // We have defaults for these so they should all return something nonnull so unwrap is safe here - let processor_task_count = node_config.indexer_grpc.processor_task_count.unwrap(); - let processor_batch_size = node_config.indexer_grpc.processor_batch_size.unwrap(); - let output_batch_size = node_config.indexer_grpc.output_batch_size.unwrap(); - let address = node_config.indexer_grpc.address.clone().unwrap(); + let address = node_config.indexer_grpc.address; + let use_data_service_interface = node_config.indexer_grpc.use_data_service_interface; + let processor_task_count = node_config.indexer_grpc.processor_task_count; + let processor_batch_size = node_config.indexer_grpc.processor_batch_size; + let output_batch_size = node_config.indexer_grpc.output_batch_size; runtime.spawn(async move { let context = Arc::new(Context::new(chain_id, db, mp_sender, node_config)); - let server = FullnodeDataService { - context, + let service_context = ServiceContext { + context: context.clone(), processor_task_count, processor_batch_size, output_batch_size, }; - - Server::builder() + // If we are here, we know indexer grpc is enabled. + let server = FullnodeDataService { + service_context: service_context.clone(), + }; + let localnet_data_server = LocalnetDataService { service_context }; + let mut tonic_server = Server::builder() .http2_keepalive_interval(Some(std::time::Duration::from_secs(60))) - .http2_keepalive_timeout(Some(std::time::Duration::from_secs(5))) - .add_service(FullnodeDataServer::new(server)) - // Make port into a config + .http2_keepalive_timeout(Some(std::time::Duration::from_secs(5))); + + let router = match use_data_service_interface { + false => tonic_server.add_service(FullnodeDataServer::new(server)), + true => { + let svc = RawDataServer::new(localnet_data_server) + .send_compressed(CompressionEncoding::Gzip) + .accept_compressed(CompressionEncoding::Gzip); + tonic_server.add_service(svc) + }, + }; + // Make port into a config + router .serve(address.to_socket_addrs().unwrap().next().unwrap()) .await .unwrap(); @@ -81,127 +80,3 @@ pub fn bootstrap( }); Some(runtime) } - -#[tonic::async_trait] -impl FullnodeData for FullnodeDataService { - type GetTransactionsFromNodeStream = ResponseStream; - - /// This function is required by the GRPC tonic server. It basically handles the request. - /// Given we want to persist the stream for better performance, our approach is that when - /// we receive a request, we will return a stream. Then as we process transactions, we - /// wrap those into a TransactionsResponse that we then push into the stream. - /// There are 2 types of TransactionsResponse: - /// Status - sends events back to the client, such as init stream and batch end - /// Transaction - sends encoded transactions lightly wrapped - async fn get_transactions_from_node( - &self, - req: Request, - ) -> Result, Status> { - // Gets configs for the stream, partly from the request and partly from the node config - let r = req.into_inner(); - let starting_version = r.starting_version.expect("Starting version must be set"); - let processor_task_count = self.processor_task_count; - let processor_batch_size = self.processor_batch_size; - let output_batch_size = self.output_batch_size; - - // Some node metadata - let context = self.context.clone(); - let ledger_chain_id = context.chain_id().id(); - - // Creates a channel to send the stream to the client - let (tx, rx) = mpsc::channel(TRANSACTION_CHANNEL_SIZE); - - // Creates a moving average to track tps - let mut ma = MovingAverage::new(10_000); - - // This is the main thread handling pushing to the stream - tokio::spawn(async move { - // Initialize the coordinator that tracks starting version and processes transactions - let mut coordinator = IndexerStreamCoordinator::new( - context, - starting_version, - processor_task_count, - processor_batch_size, - output_batch_size, - tx.clone(), - ); - // Sends init message (one time per request) to the client in the with chain id and starting version. Basically a handshake - let init_status = get_status(StatusType::Init, starting_version, None, ledger_chain_id); - match tx.send(Result::<_, Status>::Ok(init_status)).await { - Ok(_) => { - // TODO: Add request details later - info!("[indexer-grpc] Init connection"); - }, - Err(_) => { - panic!("[indexer-grpc] Unable to initialize stream"); - }, - } - let mut base: u64 = 0; - loop { - // Processes and sends batch of transactions to client - let results = coordinator.process_next_batch().await; - let max_version = match IndexerStreamCoordinator::get_max_batch_version(results) { - Ok(max_version) => max_version, - Err(e) => { - error!("[indexer-grpc] Error sending to stream: {}", e); - break; - }, - }; - // send end batch message (each batch) upon success of the entire batch - // client can use the start and end version to ensure that there are no gaps - // end loop if this message fails to send because otherwise the client can't validate - let batch_end_status = get_status( - StatusType::BatchEnd, - coordinator.current_version, - Some(max_version), - ledger_chain_id, - ); - match tx.send(Result::<_, Status>::Ok(batch_end_status)).await { - Ok(_) => { - // tps logging - let new_base: u64 = ma.sum() / (DEFAULT_EMIT_SIZE as u64); - ma.tick_now(max_version - coordinator.current_version + 1); - if base != new_base { - base = new_base; - - info!( - batch_start_version = coordinator.current_version, - batch_end_version = max_version, - versions_processed = ma.sum(), - tps = (ma.avg() * 1000.0) as u64, - "[indexer-grpc] Sent batch successfully" - ); - } - }, - Err(_) => { - aptos_logger::warn!("[indexer-grpc] Unable to send end batch status"); - break; - }, - } - coordinator.current_version = max_version + 1; - } - }); - let output_stream = ReceiverStream::new(rx); - Ok(Response::new( - Box::pin(output_stream) as Self::GetTransactionsFromNodeStream - )) - } -} - -pub fn get_status( - status_type: StatusType, - start_version: u64, - end_version: Option, - ledger_chain_id: u8, -) -> TransactionsFromNodeResponse { - TransactionsFromNodeResponse { - response: Some(transactions_from_node_response::Response::Status( - StreamStatus { - r#type: status_type as i32, - start_version, - end_version, - }, - )), - chain_id: ledger_chain_id as u32, - } -} diff --git a/ecosystem/indexer-grpc/indexer-grpc-integration-tests/Cargo.toml b/ecosystem/indexer-grpc/indexer-grpc-integration-tests/Cargo.toml index e3829549a7239..a411a9740e6e4 100644 --- a/ecosystem/indexer-grpc/indexer-grpc-integration-tests/Cargo.toml +++ b/ecosystem/indexer-grpc/indexer-grpc-integration-tests/Cargo.toml @@ -27,6 +27,7 @@ futures = { workspace = true } futures-core = { workspace = true } futures-util = { workspace = true } itertools = { workspace = true } +once_cell = { workspace = true } prometheus = { workspace = true } prost = { workspace = true } redis = { workspace = true } diff --git a/ecosystem/indexer-grpc/indexer-grpc-integration-tests/src/tests/fullnode_tests.rs b/ecosystem/indexer-grpc/indexer-grpc-integration-tests/src/tests/fullnode_tests.rs index 48466edcf2bfd..ec2a88c23b0e4 100644 --- a/ecosystem/indexer-grpc/indexer-grpc-integration-tests/src/tests/fullnode_tests.rs +++ b/ecosystem/indexer-grpc/indexer-grpc-integration-tests/src/tests/fullnode_tests.rs @@ -11,19 +11,25 @@ use aptos_indexer_grpc_utils::{ config::{IndexerGrpcFileStoreConfig, LocalFileStore}, constants::BLOB_STORAGE_SIZE, file_store_operator::{FileStoreOperator, LocalFileStoreOperator}, + types::RedisUrl, }; use aptos_transaction_emitter_lib::{emit_transactions, ClusterArgs, CoinSourceArgs, EmitArgs}; use aptos_transaction_generator_lib::args::TransactionTypeArg; use aptos_types::chain_id::ChainId; +use once_cell::sync::Lazy; use regex::Regex; -use std::{fs::File, io::Write, path::PathBuf}; +use std::{fs::File, io::Write, path::PathBuf, str::FromStr}; use tempfile::TempDir; use tokio::task::JoinHandle; use tracing::info; +use url::Url; -static TESTNET_REST_API_URL: &str = "http://localhost:8080"; -static TESTNET_FULLNODE_GRPC_URL: &str = "localhost:50051"; -static REDIS_PRIMARY_URL: &str = "localhost:6379"; +static TESTNET_REST_API_URL: Lazy = + Lazy::new(|| Url::from_str("http://127.0.0.1:8080").unwrap()); +static TESTNET_FULLNODE_GRPC_URL: Lazy = + Lazy::new(|| Url::from_str("http://127.0.0.1:50051").unwrap()); +static REDIS_PRIMARY_URL: Lazy = + Lazy::new(|| RedisUrl::from_str("redis://127.0.0.1:6379").unwrap()); static MINT_KEY_FILE_NAME: &str = "mint.key"; @@ -51,7 +57,7 @@ async fn reset_redis() -> Result<()> { .output()?; } - let conn = redis::Client::open(format!("redis://{}", REDIS_PRIMARY_URL)) + let conn = redis::Client::open(REDIS_PRIMARY_URL.0.clone()) .expect("Create redis client failed.") .get_async_connection() .await @@ -109,9 +115,7 @@ async fn emit_transactions_for_test() -> Result<()> { let duration = 10; let target_tps = BLOB_STORAGE_SIZE / duration; let cluster_args = ClusterArgs { - targets: Some(vec![url::Url::parse(TESTNET_REST_API_URL) - .context("Cannot parse default fullnode url") - .unwrap()]), + targets: Some(vec![(*TESTNET_REST_API_URL).clone()]), targets_file: None, reuse_accounts: false, chain_id: ChainId::test(), @@ -206,7 +210,8 @@ async fn setup_test() { // We will then simulate chaos by using (1) docker exec (2) docker-compose scale = #[tokio::test] pub async fn verify_docker_compose_setup() { - reqwest::get(&format!("{}/v1", TESTNET_REST_API_URL)) + let url = format!("{}v1", *TESTNET_REST_API_URL); + reqwest::get(&url) .await .unwrap() .error_for_status() @@ -222,11 +227,11 @@ async fn test_cold_start_cache_worker_progress() { let tmp_dir = TempDir::new().expect("Could not create temp dir"); // start with a new file store each time let cache_worker_config = IndexerGrpcCacheWorkerConfig { - fullnode_grpc_address: TESTNET_FULLNODE_GRPC_URL.to_string(), + fullnode_grpc_address: (*TESTNET_FULLNODE_GRPC_URL).clone(), file_store_config: IndexerGrpcFileStoreConfig::LocalFileStore(LocalFileStore { local_file_store_path: tmp_dir.path().to_path_buf(), }), - redis_main_instance_address: REDIS_PRIMARY_URL.to_string(), + redis_main_instance_address: (*REDIS_PRIMARY_URL).clone(), }; let (_cache_worker_port, _cache_worker_handle) = @@ -234,7 +239,7 @@ async fn test_cold_start_cache_worker_progress() { .await .expect("Failed to start CacheWorker"); - let conn = redis::Client::open(format!("redis://{}", REDIS_PRIMARY_URL.to_string())) + let conn = redis::Client::open((*REDIS_PRIMARY_URL).0.clone()) .expect("Create redis client failed.") .get_async_connection() .await @@ -288,15 +293,15 @@ async fn test_cold_start_file_store_worker_progress() { let tmp_dir = TempDir::new().expect("Could not create temp dir"); // start with a new file store each time let cache_worker_config = IndexerGrpcCacheWorkerConfig { - fullnode_grpc_address: TESTNET_FULLNODE_GRPC_URL.to_string(), + fullnode_grpc_address: (*TESTNET_FULLNODE_GRPC_URL).clone(), file_store_config: IndexerGrpcFileStoreConfig::LocalFileStore(LocalFileStore { local_file_store_path: tmp_dir.path().to_path_buf(), }), - redis_main_instance_address: REDIS_PRIMARY_URL.to_string(), + redis_main_instance_address: (*REDIS_PRIMARY_URL).clone(), }; let file_store_worker_config = IndexerGrpcFileStoreWorkerConfig { - redis_main_instance_address: REDIS_PRIMARY_URL.to_string(), + redis_main_instance_address: (*REDIS_PRIMARY_URL).clone(), file_store_config: IndexerGrpcFileStoreConfig::LocalFileStore(LocalFileStore { local_file_store_path: tmp_dir.path().to_path_buf(), }), diff --git a/ecosystem/indexer-grpc/indexer-grpc-server-framework/src/lib.rs b/ecosystem/indexer-grpc/indexer-grpc-server-framework/src/lib.rs index 211cd6dd623c6..3594365add9b1 100644 --- a/ecosystem/indexer-grpc/indexer-grpc-server-framework/src/lib.rs +++ b/ecosystem/indexer-grpc/indexer-grpc-server-framework/src/lib.rs @@ -27,6 +27,9 @@ impl ServerArgs { setup_logging(); setup_panic_handler(); let config = load::>(&self.config_path)?; + config + .validate() + .context("Config did not pass validation")?; run_server_with_config(config).await } } @@ -75,6 +78,10 @@ impl RunnableConfig for GenericConfig where T: RunnableConfig, { + fn validate(&self) -> Result<()> { + self.server_config.validate() + } + async fn run(&self) -> Result<()> { self.server_config.run().await } @@ -87,7 +94,15 @@ where /// RunnableConfig is a trait that all services must implement for their configuration. #[async_trait::async_trait] pub trait RunnableConfig: DeserializeOwned + Send + Sync + 'static { + // Validate the config. + fn validate(&self) -> Result<()> { + Ok(()) + } + + // Run something based on the config. async fn run(&self) -> Result<()>; + + // Get the server name. fn get_server_name(&self) -> String; } @@ -109,7 +124,7 @@ pub struct CrashInfo { /// Invoke to ensure process exits on a thread panic. /// -/// Tokio's default behavior is to catch panics and ignore them. Invoking this function will +/// Tokio's default behavior is to catch panics and ignore them. Invoking this function will /// ensure that all subsequent thread panics (even Tokio threads) will report the /// details/backtrace and then exit. pub fn setup_panic_handler() { diff --git a/ecosystem/indexer-grpc/indexer-grpc-utils/Cargo.toml b/ecosystem/indexer-grpc/indexer-grpc-utils/Cargo.toml index c1234f1dce6f0..b88da0f164ca4 100644 --- a/ecosystem/indexer-grpc/indexer-grpc-utils/Cargo.toml +++ b/ecosystem/indexer-grpc/indexer-grpc-utils/Cargo.toml @@ -39,4 +39,5 @@ toml = { workspace = true } tonic = { workspace = true } tracing = { workspace = true } tracing-subscriber = { workspace = true } +url = { workspace = true } warp = { workspace = true } diff --git a/ecosystem/indexer-grpc/indexer-grpc-utils/src/cache_operator.rs b/ecosystem/indexer-grpc/indexer-grpc-utils/src/cache_operator.rs index 418187d51c651..c0823695a2609 100644 --- a/ecosystem/indexer-grpc/indexer-grpc-utils/src/cache_operator.rs +++ b/ecosystem/indexer-grpc/indexer-grpc-utils/src/cache_operator.rs @@ -118,21 +118,21 @@ impl CacheOperator { } // Set up the cache if needed. - pub async fn cache_setup_if_needed(&mut self) -> bool { + pub async fn cache_setup_if_needed(&mut self) -> anyhow::Result { let version_inserted: bool = redis::cmd("SET") .arg(CACHE_KEY_LATEST_VERSION) .arg(CACHE_DEFAULT_LATEST_VERSION_NUMBER) .arg("NX") .query_async(&mut self.conn) .await - .expect("Redis latest_version check failed."); + .context("Redis latest_version check failed.")?; if version_inserted { tracing::info!( initialized_latest_version = CACHE_DEFAULT_LATEST_VERSION_NUMBER, "Cache latest version is initialized." ); } - version_inserted + Ok(version_inserted) } // Update the chain id in cache if missing; otherwise, verify the chain id. @@ -144,7 +144,7 @@ impl CacheOperator { .arg(chain_id) .invoke_async(&mut self.conn) .await - .expect("Redis chain id update/verification failed."); + .context("Redis chain id update/verification failed.")?; if result != 1 { anyhow::bail!("Chain id is not correct."); } @@ -255,11 +255,11 @@ impl CacheOperator { .arg(version) .invoke_async(&mut self.conn) .await - .expect("Redis latest version update failed.") + .context("Redis latest version update failed.")? { 2 => { tracing::error!(version=version, "Redis latest version update failed. The version is beyond the next expected version."); - panic!("version is not right."); + Err(anyhow::anyhow!("Version is not right.")) }, _ => Ok(()), } @@ -308,7 +308,7 @@ mod tests { let mut cache_operator: CacheOperator = CacheOperator::new(mock_connection); - assert!(cache_operator.cache_setup_if_needed().await); + assert!(cache_operator.cache_setup_if_needed().await.unwrap()); } #[tokio::test] @@ -324,7 +324,7 @@ mod tests { let mut cache_operator: CacheOperator = CacheOperator::new(mock_connection); - assert!(!cache_operator.cache_setup_if_needed().await); + assert!(!cache_operator.cache_setup_if_needed().await.unwrap()); } // Cache coverage status tests. #[tokio::test] diff --git a/ecosystem/indexer-grpc/indexer-grpc-utils/src/lib.rs b/ecosystem/indexer-grpc/indexer-grpc-utils/src/lib.rs index 2dc1516cdd034..26e97ad17462b 100644 --- a/ecosystem/indexer-grpc/indexer-grpc-utils/src/lib.rs +++ b/ecosystem/indexer-grpc/indexer-grpc-utils/src/lib.rs @@ -5,29 +5,31 @@ pub mod cache_operator; pub mod config; pub mod constants; pub mod file_store_operator; +pub mod types; use aptos_protos::{ internal::fullnode::v1::fullnode_data_client::FullnodeDataClient, transaction::v1::Transaction, util::timestamp::Timestamp, }; use prost::Message; +use url::Url; pub type GrpcClientType = FullnodeDataClient; /// Create a gRPC client with exponential backoff. -pub async fn create_grpc_client(address: String) -> GrpcClientType { +pub async fn create_grpc_client(address: Url) -> GrpcClientType { backoff::future::retry(backoff::ExponentialBackoff::default(), || async { - match FullnodeDataClient::connect(address.clone()).await { + match FullnodeDataClient::connect(address.to_string()).await { Ok(client) => { tracing::info!( - address = address.clone(), + address = address.to_string(), "[Indexer Cache] Connected to indexer gRPC server." ); Ok(client) }, Err(e) => { tracing::error!( - address = address.clone(), + address = address.to_string(), "[Indexer Cache] Failed to connect to indexer gRPC server: {}", e ); diff --git a/ecosystem/indexer-grpc/indexer-grpc-utils/src/types.rs b/ecosystem/indexer-grpc/indexer-grpc-utils/src/types.rs new file mode 100644 index 0000000000000..6cf24da14754d --- /dev/null +++ b/ecosystem/indexer-grpc/indexer-grpc-utils/src/types.rs @@ -0,0 +1,62 @@ +// Copyright © Aptos Foundation +// SPDX-License-Identifier: Apache-2.0 + +use serde::{Deserialize, Serialize}; +use std::{ + fmt::{Display, Formatter}, + ops::Deref, + str::FromStr, +}; +use url::Url; + +/// A URL that only allows the redis:// scheme. +#[derive(Clone, Debug, Eq, PartialEq, Serialize)] +pub struct RedisUrl(pub Url); + +impl FromStr for RedisUrl { + type Err = anyhow::Error; + + fn from_str(s: &str) -> Result { + let url = Url::parse(s)?; + if url.scheme() != "redis" { + return Err(anyhow::anyhow!("Invalid scheme: {}", url.scheme())); + } + Ok(RedisUrl(url)) + } +} + +impl<'de> Deserialize<'de> for RedisUrl { + fn deserialize(deserializer: D) -> Result + where + D: serde::Deserializer<'de>, + { + let url = Url::deserialize(deserializer)?; + if url.scheme() != "redis" { + return Err(serde::de::Error::custom(format!( + "Invalid scheme: {}", + url.scheme() + ))); + } + Ok(Self(url)) + } +} + +impl Deref for RedisUrl { + type Target = Url; + + fn deref(&self) -> &Self::Target { + &self.0 + } +} + +impl From for Url { + fn from(redis_url: RedisUrl) -> Self { + redis_url.0 + } +} + +impl Display for RedisUrl { + fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result { + write!(f, "{}", self.0) + } +} diff --git a/ecosystem/nft-metadata-crawler-parser/src/lib.rs b/ecosystem/nft-metadata-crawler-parser/src/lib.rs index 8873d32003e62..1bc1279c88c27 100644 --- a/ecosystem/nft-metadata-crawler-parser/src/lib.rs +++ b/ecosystem/nft-metadata-crawler-parser/src/lib.rs @@ -16,7 +16,7 @@ pub async fn get_uri_metadata(url: String) -> anyhow::Result<(String, u32)> { .timeout(Duration::from_secs(MAX_HEAD_REQUEST_RETRY_SECONDS)) .build() .context("Failed to build reqwest client")?; - let request = client.head(&url); + let request = client.head(url.trim()); let response = request.send().await?; let headers = response.headers(); diff --git a/ecosystem/nft-metadata-crawler-parser/src/utils/image_optimizer.rs b/ecosystem/nft-metadata-crawler-parser/src/utils/image_optimizer.rs index c64f4846bf09e..a1232c18a5e52 100644 --- a/ecosystem/nft-metadata-crawler-parser/src/utils/image_optimizer.rs +++ b/ecosystem/nft-metadata-crawler-parser/src/utils/image_optimizer.rs @@ -53,7 +53,7 @@ impl ImageOptimizer { .context("Failed to build reqwest client")?; let response = client - .get(&uri) + .get(uri.trim()) .send() .await .context("Failed to get image")?; diff --git a/ecosystem/nft-metadata-crawler-parser/src/utils/json_parser.rs b/ecosystem/nft-metadata-crawler-parser/src/utils/json_parser.rs index 3fdbef86b083d..fa5952fc74cc4 100644 --- a/ecosystem/nft-metadata-crawler-parser/src/utils/json_parser.rs +++ b/ecosystem/nft-metadata-crawler-parser/src/utils/json_parser.rs @@ -57,7 +57,7 @@ impl JSONParser { .context("Failed to build reqwest client")?; let response = client - .get(&uri) + .get(uri.trim()) .send() .await .context("Failed to get JSON")?; diff --git a/ecosystem/python/sdk/aptos_sdk/package_publisher.py b/ecosystem/python/sdk/aptos_sdk/package_publisher.py index 30dcada7dd2f2..c4367185af548 100644 --- a/ecosystem/python/sdk/aptos_sdk/package_publisher.py +++ b/ecosystem/python/sdk/aptos_sdk/package_publisher.py @@ -2,9 +2,10 @@ # SPDX-License-Identifier: Apache-2.0 import os -import tomllib from typing import List +import tomli + from .account import Account from .account_address import AccountAddress from .async_client import RestClient @@ -57,7 +58,7 @@ async def publish_package_in_path( large_package_address: AccountAddress = MODULE_ADDRESS, ) -> List[str]: with open(os.path.join(package_dir, "Move.toml"), "rb") as f: - data = tomllib.load(f) + data = tomli.load(f) package = data["package"]["name"] package_build_dir = os.path.join(package_dir, "build", package) diff --git a/ecosystem/python/sdk/poetry.lock b/ecosystem/python/sdk/poetry.lock index 00e903a9cfc30..2a8caba36ac6b 100644 --- a/ecosystem/python/sdk/poetry.lock +++ b/ecosystem/python/sdk/poetry.lock @@ -1,9 +1,10 @@ -# This file is automatically @generated by Poetry 1.5.1 and should not be changed by hand. +# This file is automatically @generated by Poetry 1.4.2 and should not be changed by hand. [[package]] name = "anyio" version = "3.7.1" description = "High level compatibility layer for multiple asynchronous event loop implementations" +category = "main" optional = false python-versions = ">=3.7" files = [ @@ -25,6 +26,7 @@ trio = ["trio (<0.22)"] name = "autoflake" version = "2.2.0" description = "Removes unused imports and unused variables" +category = "dev" optional = false python-versions = ">=3.8" files = [ @@ -40,6 +42,7 @@ tomli = {version = ">=2.0.1", markers = "python_version < \"3.11\""} name = "black" version = "23.7.0" description = "The uncompromising code formatter." +category = "dev" optional = false python-versions = ">=3.8" files = [ @@ -86,6 +89,7 @@ uvloop = ["uvloop (>=0.15.2)"] name = "certifi" version = "2023.5.7" description = "Python package for providing Mozilla's CA Bundle." +category = "main" optional = false python-versions = ">=3.6" files = [ @@ -97,6 +101,7 @@ files = [ name = "cffi" version = "1.15.1" description = "Foreign Function Interface for Python calling C code." +category = "main" optional = false python-versions = "*" files = [ @@ -173,6 +178,7 @@ pycparser = "*" name = "click" version = "8.1.5" description = "Composable command line interface toolkit" +category = "dev" optional = false python-versions = ">=3.7" files = [ @@ -187,6 +193,7 @@ colorama = {version = "*", markers = "platform_system == \"Windows\""} name = "colorama" version = "0.4.6" description = "Cross-platform colored terminal text." +category = "dev" optional = false python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*,!=3.5.*,!=3.6.*,>=2.7" files = [ @@ -198,6 +205,7 @@ files = [ name = "coverage" version = "7.2.7" description = "Code coverage measurement for Python" +category = "dev" optional = false python-versions = ">=3.7" files = [ @@ -270,6 +278,7 @@ toml = ["tomli"] name = "exceptiongroup" version = "1.1.2" description = "Backport of PEP 654 (exception groups)" +category = "main" optional = false python-versions = ">=3.7" files = [ @@ -284,6 +293,7 @@ test = ["pytest (>=6)"] name = "flake8" version = "6.0.0" description = "the modular source code checker: pep8 pyflakes and co" +category = "dev" optional = false python-versions = ">=3.8.1" files = [ @@ -300,6 +310,7 @@ pyflakes = ">=3.0.0,<3.1.0" name = "h11" version = "0.14.0" description = "A pure-Python, bring-your-own-I/O implementation of HTTP/1.1" +category = "main" optional = false python-versions = ">=3.7" files = [ @@ -311,6 +322,7 @@ files = [ name = "h2" version = "4.1.0" description = "HTTP/2 State-Machine based protocol implementation" +category = "main" optional = false python-versions = ">=3.6.1" files = [ @@ -326,6 +338,7 @@ hyperframe = ">=6.0,<7" name = "hpack" version = "4.0.0" description = "Pure-Python HPACK header compression" +category = "main" optional = false python-versions = ">=3.6.1" files = [ @@ -337,6 +350,7 @@ files = [ name = "httpcore" version = "0.17.3" description = "A minimal low-level HTTP client." +category = "main" optional = false python-versions = ">=3.7" files = [ @@ -348,16 +362,17 @@ files = [ anyio = ">=3.0,<5.0" certifi = "*" h11 = ">=0.13,<0.15" -sniffio = "==1.*" +sniffio = ">=1.0.0,<2.0.0" [package.extras] http2 = ["h2 (>=3,<5)"] -socks = ["socksio (==1.*)"] +socks = ["socksio (>=1.0.0,<2.0.0)"] [[package]] name = "httpx" version = "0.24.1" description = "The next generation HTTP client." +category = "main" optional = false python-versions = ">=3.7" files = [ @@ -373,14 +388,15 @@ sniffio = "*" [package.extras] brotli = ["brotli", "brotlicffi"] -cli = ["click (==8.*)", "pygments (==2.*)", "rich (>=10,<14)"] +cli = ["click (>=8.0.0,<9.0.0)", "pygments (>=2.0.0,<3.0.0)", "rich (>=10,<14)"] http2 = ["h2 (>=3,<5)"] -socks = ["socksio (==1.*)"] +socks = ["socksio (>=1.0.0,<2.0.0)"] [[package]] name = "hyperframe" version = "6.0.1" description = "HTTP/2 framing layer for Python" +category = "main" optional = false python-versions = ">=3.6.1" files = [ @@ -392,6 +408,7 @@ files = [ name = "idna" version = "3.4" description = "Internationalized Domain Names in Applications (IDNA)" +category = "main" optional = false python-versions = ">=3.5" files = [ @@ -403,6 +420,7 @@ files = [ name = "isort" version = "5.12.0" description = "A Python utility / library to sort Python imports." +category = "dev" optional = false python-versions = ">=3.8.0" files = [ @@ -420,6 +438,7 @@ requirements-deprecated-finder = ["pip-api", "pipreqs"] name = "mccabe" version = "0.7.0" description = "McCabe checker, plugin for flake8" +category = "dev" optional = false python-versions = ">=3.6" files = [ @@ -431,6 +450,7 @@ files = [ name = "mypy" version = "1.4.1" description = "Optional static typing for Python" +category = "dev" optional = false python-versions = ">=3.7" files = [ @@ -477,6 +497,7 @@ reports = ["lxml"] name = "mypy-extensions" version = "1.0.0" description = "Type system extensions for programs checked with the mypy type checker." +category = "dev" optional = false python-versions = ">=3.5" files = [ @@ -488,6 +509,7 @@ files = [ name = "packaging" version = "23.1" description = "Core utilities for Python packages" +category = "dev" optional = false python-versions = ">=3.7" files = [ @@ -499,6 +521,7 @@ files = [ name = "pathspec" version = "0.11.1" description = "Utility library for gitignore style pattern matching of file paths." +category = "dev" optional = false python-versions = ">=3.7" files = [ @@ -510,6 +533,7 @@ files = [ name = "platformdirs" version = "3.9.1" description = "A small Python package for determining appropriate platform-specific dirs, e.g. a \"user data dir\"." +category = "dev" optional = false python-versions = ">=3.7" files = [ @@ -525,6 +549,7 @@ test = ["appdirs (==1.4.4)", "covdefaults (>=2.3)", "pytest (>=7.3.1)", "pytest- name = "pycodestyle" version = "2.10.0" description = "Python style guide checker" +category = "dev" optional = false python-versions = ">=3.6" files = [ @@ -536,6 +561,7 @@ files = [ name = "pycparser" version = "2.21" description = "C parser in Python" +category = "main" optional = false python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*" files = [ @@ -547,6 +573,7 @@ files = [ name = "pyflakes" version = "3.0.1" description = "passive checker of Python programs" +category = "dev" optional = false python-versions = ">=3.6" files = [ @@ -558,6 +585,7 @@ files = [ name = "pynacl" version = "1.5.0" description = "Python binding to the Networking and Cryptography (NaCl) library" +category = "main" optional = false python-versions = ">=3.6" files = [ @@ -584,6 +612,7 @@ tests = ["hypothesis (>=3.27.0)", "pytest (>=3.2.1,!=3.3.0)"] name = "sniffio" version = "1.3.0" description = "Sniff out which async library your code is running under" +category = "main" optional = false python-versions = ">=3.7" files = [ @@ -595,6 +624,7 @@ files = [ name = "tomli" version = "2.0.1" description = "A lil' TOML parser" +category = "main" optional = false python-versions = ">=3.7" files = [ @@ -606,6 +636,7 @@ files = [ name = "typing-extensions" version = "4.7.1" description = "Backported and Experimental Type Hints for Python 3.7+" +category = "dev" optional = false python-versions = ">=3.7" files = [ @@ -616,4 +647,4 @@ files = [ [metadata] lock-version = "2.0" python-versions = ">=3.8.1,<4.0" -content-hash = "63128608945aa6acdc8db89527b14f688debf7bf7bfc9b361b190ab3ca81a8b5" +content-hash = "2ba28b17fbb5236f274ce2a2c83ce72e6494252860424944ed87417f2dacf62a" diff --git a/ecosystem/python/sdk/pyproject.toml b/ecosystem/python/sdk/pyproject.toml index 8bc577532b699..3ddf41d06ac27 100644 --- a/ecosystem/python/sdk/pyproject.toml +++ b/ecosystem/python/sdk/pyproject.toml @@ -14,6 +14,7 @@ h2 = "^4.1.0" httpx = "^0.24.0" PyNaCl = "^1.5.0" python = ">=3.8.1,<4.0" +tomli = "^2.0.1" [tool.poetry.dev-dependencies] autoflake = "2.2.0" diff --git a/ecosystem/typescript/sdk/CHANGELOG.md b/ecosystem/typescript/sdk/CHANGELOG.md index d193404a9a55c..6767c34b0aa62 100644 --- a/ecosystem/typescript/sdk/CHANGELOG.md +++ b/ecosystem/typescript/sdk/CHANGELOG.md @@ -4,6 +4,11 @@ All notable changes to the Aptos Node SDK will be captured in this file. This ch ## Unreleased +## 1.20.0 (2023-09-22) + +- Add current objects queries support - `getAccountOwnedObjects` +- Add `burnObject` transaction support in `AptosToken` + ## 1.19.0 (2023-08-24) - Add fungible asset queries support - `getAccountCoinsData`, `getAccountCoinsDataCount` diff --git a/ecosystem/typescript/sdk/package.json b/ecosystem/typescript/sdk/package.json index 6a4f5c0749a3f..f8ac285513164 100644 --- a/ecosystem/typescript/sdk/package.json +++ b/ecosystem/typescript/sdk/package.json @@ -86,5 +86,5 @@ "typedoc": "^0.23.20", "typescript": "4.8.2" }, - "version": "1.19.0" + "version": "1.20.0" } diff --git a/ecosystem/typescript/sdk/src/indexer/generated/operations.ts b/ecosystem/typescript/sdk/src/indexer/generated/operations.ts index 161891ed2e2a9..de11a6c8ad69e 100644 --- a/ecosystem/typescript/sdk/src/indexer/generated/operations.ts +++ b/ecosystem/typescript/sdk/src/indexer/generated/operations.ts @@ -78,6 +78,16 @@ export type GetCollectionsWithOwnedTokensQueryVariables = Types.Exact<{ export type GetCollectionsWithOwnedTokensQuery = { __typename?: 'query_root', current_collection_ownership_v2_view: Array<{ __typename?: 'current_collection_ownership_v2_view', collection_id?: string | null, collection_name?: string | null, collection_uri?: string | null, creator_address?: string | null, distinct_tokens?: any | null, last_transaction_version?: any | null, owner_address?: string | null, single_token_uri?: string | null, current_collection?: { __typename?: 'current_collections_v2', collection_id: string, collection_name: string, creator_address: string, current_supply: any, description: string, last_transaction_timestamp: any, last_transaction_version: any, mutable_description?: boolean | null, max_supply?: any | null, mutable_uri?: boolean | null, table_handle_v1?: string | null, token_standard: string, total_minted_v2?: any | null, uri: string } | null }> }; +export type GetCurrentObjectsQueryVariables = Types.Exact<{ + where_condition?: Types.InputMaybe; + offset?: Types.InputMaybe; + limit?: Types.InputMaybe; + order_by?: Types.InputMaybe | Types.Current_Objects_Order_By>; +}>; + + +export type GetCurrentObjectsQuery = { __typename?: 'query_root', current_objects: Array<{ __typename?: 'current_objects', allow_ungated_transfer: boolean, state_key_hash: string, owner_address: string, object_address: string, last_transaction_version: any, last_guid_creation_num: any, is_deleted: boolean }> }; + export type GetDelegatedStakingActivitiesQueryVariables = Types.Exact<{ delegatorAddress?: Types.InputMaybe; poolAddress?: Types.InputMaybe; diff --git a/ecosystem/typescript/sdk/src/indexer/generated/queries.ts b/ecosystem/typescript/sdk/src/indexer/generated/queries.ts index b60c927468001..cb9037093c548 100644 --- a/ecosystem/typescript/sdk/src/indexer/generated/queries.ts +++ b/ecosystem/typescript/sdk/src/indexer/generated/queries.ts @@ -254,6 +254,24 @@ export const GetCollectionsWithOwnedTokens = ` } } `; +export const GetCurrentObjects = ` + query getCurrentObjects($where_condition: current_objects_bool_exp, $offset: Int, $limit: Int, $order_by: [current_objects_order_by!]) { + current_objects( + where: $where_condition + offset: $offset + limit: $limit + order_by: $order_by + ) { + allow_ungated_transfer + state_key_hash + owner_address + object_address + last_transaction_version + last_guid_creation_num + is_deleted + } +} + `; export const GetDelegatedStakingActivities = ` query getDelegatedStakingActivities($delegatorAddress: String, $poolAddress: String) { delegated_staking_activities( @@ -458,6 +476,9 @@ export function getSdk(client: GraphQLClient, withWrapper: SdkFunctionWrapper = getCollectionsWithOwnedTokens(variables: Types.GetCollectionsWithOwnedTokensQueryVariables, requestHeaders?: Dom.RequestInit["headers"]): Promise { return withWrapper((wrappedRequestHeaders) => client.request(GetCollectionsWithOwnedTokens, variables, {...requestHeaders, ...wrappedRequestHeaders}), 'getCollectionsWithOwnedTokens', 'query'); }, + getCurrentObjects(variables?: Types.GetCurrentObjectsQueryVariables, requestHeaders?: Dom.RequestInit["headers"]): Promise { + return withWrapper((wrappedRequestHeaders) => client.request(GetCurrentObjects, variables, {...requestHeaders, ...wrappedRequestHeaders}), 'getCurrentObjects', 'query'); + }, getDelegatedStakingActivities(variables?: Types.GetDelegatedStakingActivitiesQueryVariables, requestHeaders?: Dom.RequestInit["headers"]): Promise { return withWrapper((wrappedRequestHeaders) => client.request(GetDelegatedStakingActivities, variables, {...requestHeaders, ...wrappedRequestHeaders}), 'getDelegatedStakingActivities', 'query'); }, diff --git a/ecosystem/typescript/sdk/src/indexer/generated/types.ts b/ecosystem/typescript/sdk/src/indexer/generated/types.ts index be08b54c9cad2..a43992457c689 100644 --- a/ecosystem/typescript/sdk/src/indexer/generated/types.ts +++ b/ecosystem/typescript/sdk/src/indexer/generated/types.ts @@ -87,7 +87,7 @@ export type Account_Transactions = { /** An array relationship */ delegated_staking_activities: Array; /** An array relationship */ - fungible_asset_activities: Array; + fungible_asset_activities: Array; /** An array relationship */ token_activities: Array; /** An aggregate relationship */ @@ -132,11 +132,11 @@ export type Account_TransactionsDelegated_Staking_ActivitiesArgs = { /** columns and relationships of "account_transactions" */ export type Account_TransactionsFungible_Asset_ActivitiesArgs = { - distinct_on?: InputMaybe>; + distinct_on?: InputMaybe>; limit?: InputMaybe; offset?: InputMaybe; - order_by?: InputMaybe>; - where?: InputMaybe; + order_by?: InputMaybe>; + where?: InputMaybe; }; @@ -223,7 +223,7 @@ export type Account_Transactions_Bool_Exp = { account_address?: InputMaybe; coin_activities?: InputMaybe; delegated_staking_activities?: InputMaybe; - fungible_asset_activities?: InputMaybe; + fungible_asset_activities?: InputMaybe; token_activities?: InputMaybe; token_activities_v2?: InputMaybe; transaction_version?: InputMaybe; @@ -248,7 +248,7 @@ export type Account_Transactions_Order_By = { account_address?: InputMaybe; coin_activities_aggregate?: InputMaybe; delegated_staking_activities_aggregate?: InputMaybe; - fungible_asset_activities_aggregate?: InputMaybe; + fungible_asset_activities_aggregate?: InputMaybe; token_activities_aggregate?: InputMaybe; token_activities_v2_aggregate?: InputMaybe; transaction_version?: InputMaybe; @@ -945,7 +945,7 @@ export type Coin_Activities = { activity_type: Scalars['String']; amount: Scalars['numeric']; /** An array relationship */ - aptos_names: Array; + aptos_names: Array; block_height: Scalars['bigint']; /** An object relationship */ coin_info?: Maybe; @@ -965,11 +965,11 @@ export type Coin_Activities = { /** columns and relationships of "coin_activities" */ export type Coin_ActivitiesAptos_NamesArgs = { - distinct_on?: InputMaybe>; + distinct_on?: InputMaybe>; limit?: InputMaybe; offset?: InputMaybe; - order_by?: InputMaybe>; - where?: InputMaybe; + order_by?: InputMaybe>; + where?: InputMaybe; }; /** aggregated selection of "coin_activities" */ @@ -1045,7 +1045,7 @@ export type Coin_Activities_Bool_Exp = { _or?: InputMaybe>; activity_type?: InputMaybe; amount?: InputMaybe; - aptos_names?: InputMaybe; + aptos_names?: InputMaybe; block_height?: InputMaybe; coin_info?: InputMaybe; coin_type?: InputMaybe; @@ -1131,7 +1131,7 @@ export type Coin_Activities_Min_Order_By = { export type Coin_Activities_Order_By = { activity_type?: InputMaybe; amount?: InputMaybe; - aptos_names_aggregate?: InputMaybe; + aptos_names_aggregate?: InputMaybe; block_height?: InputMaybe; coin_info?: InputMaybe; coin_type?: InputMaybe; @@ -1699,9 +1699,11 @@ export type Current_Ans_Lookup = { all_token_ownerships_aggregate: Current_Token_Ownerships_Aggregate; domain: Scalars['String']; expiration_timestamp: Scalars['timestamp']; + is_deleted: Scalars['Boolean']; last_transaction_version: Scalars['bigint']; registered_address?: Maybe; subdomain: Scalars['String']; + token_name: Scalars['String']; }; @@ -1724,26 +1726,6 @@ export type Current_Ans_LookupAll_Token_Ownerships_AggregateArgs = { where?: InputMaybe; }; -/** order by aggregate values of table "current_ans_lookup" */ -export type Current_Ans_Lookup_Aggregate_Order_By = { - avg?: InputMaybe; - count?: InputMaybe; - max?: InputMaybe; - min?: InputMaybe; - stddev?: InputMaybe; - stddev_pop?: InputMaybe; - stddev_samp?: InputMaybe; - sum?: InputMaybe; - var_pop?: InputMaybe; - var_samp?: InputMaybe; - variance?: InputMaybe; -}; - -/** order by avg() on columns of table "current_ans_lookup" */ -export type Current_Ans_Lookup_Avg_Order_By = { - last_transaction_version?: InputMaybe; -}; - /** Boolean expression to filter rows from the table "current_ans_lookup". All fields are combined with a logical 'AND'. */ export type Current_Ans_Lookup_Bool_Exp = { _and?: InputMaybe>; @@ -1752,102 +1734,208 @@ export type Current_Ans_Lookup_Bool_Exp = { all_token_ownerships?: InputMaybe; domain?: InputMaybe; expiration_timestamp?: InputMaybe; + is_deleted?: InputMaybe; last_transaction_version?: InputMaybe; registered_address?: InputMaybe; subdomain?: InputMaybe; + token_name?: InputMaybe; }; -/** order by max() on columns of table "current_ans_lookup" */ -export type Current_Ans_Lookup_Max_Order_By = { +/** Ordering options when selecting data from "current_ans_lookup". */ +export type Current_Ans_Lookup_Order_By = { + all_token_ownerships_aggregate?: InputMaybe; domain?: InputMaybe; expiration_timestamp?: InputMaybe; + is_deleted?: InputMaybe; last_transaction_version?: InputMaybe; registered_address?: InputMaybe; subdomain?: InputMaybe; + token_name?: InputMaybe; +}; + +/** select columns of table "current_ans_lookup" */ +export enum Current_Ans_Lookup_Select_Column { + /** column name */ + Domain = 'domain', + /** column name */ + ExpirationTimestamp = 'expiration_timestamp', + /** column name */ + IsDeleted = 'is_deleted', + /** column name */ + LastTransactionVersion = 'last_transaction_version', + /** column name */ + RegisteredAddress = 'registered_address', + /** column name */ + Subdomain = 'subdomain', + /** column name */ + TokenName = 'token_name' +} + +/** Streaming cursor of the table "current_ans_lookup" */ +export type Current_Ans_Lookup_Stream_Cursor_Input = { + /** Stream column input with initial value */ + initial_value: Current_Ans_Lookup_Stream_Cursor_Value_Input; + /** cursor ordering */ + ordering?: InputMaybe; +}; + +/** Initial value of the column from where the streaming should start */ +export type Current_Ans_Lookup_Stream_Cursor_Value_Input = { + domain?: InputMaybe; + expiration_timestamp?: InputMaybe; + is_deleted?: InputMaybe; + last_transaction_version?: InputMaybe; + registered_address?: InputMaybe; + subdomain?: InputMaybe; + token_name?: InputMaybe; +}; + +/** columns and relationships of "current_aptos_names" */ +export type Current_Aptos_Names = { + __typename?: 'current_aptos_names'; + domain?: Maybe; + expiration_timestamp?: Maybe; + is_primary?: Maybe; + last_transaction_version?: Maybe; + registered_address?: Maybe; + subdomain?: Maybe; + token_name?: Maybe; +}; + +/** order by aggregate values of table "current_aptos_names" */ +export type Current_Aptos_Names_Aggregate_Order_By = { + avg?: InputMaybe; + count?: InputMaybe; + max?: InputMaybe; + min?: InputMaybe; + stddev?: InputMaybe; + stddev_pop?: InputMaybe; + stddev_samp?: InputMaybe; + sum?: InputMaybe; + var_pop?: InputMaybe; + var_samp?: InputMaybe; + variance?: InputMaybe; +}; + +/** order by avg() on columns of table "current_aptos_names" */ +export type Current_Aptos_Names_Avg_Order_By = { + last_transaction_version?: InputMaybe; +}; + +/** Boolean expression to filter rows from the table "current_aptos_names". All fields are combined with a logical 'AND'. */ +export type Current_Aptos_Names_Bool_Exp = { + _and?: InputMaybe>; + _not?: InputMaybe; + _or?: InputMaybe>; + domain?: InputMaybe; + expiration_timestamp?: InputMaybe; + is_primary?: InputMaybe; + last_transaction_version?: InputMaybe; + registered_address?: InputMaybe; + subdomain?: InputMaybe; + token_name?: InputMaybe; }; -/** order by min() on columns of table "current_ans_lookup" */ -export type Current_Ans_Lookup_Min_Order_By = { +/** order by max() on columns of table "current_aptos_names" */ +export type Current_Aptos_Names_Max_Order_By = { domain?: InputMaybe; expiration_timestamp?: InputMaybe; last_transaction_version?: InputMaybe; registered_address?: InputMaybe; subdomain?: InputMaybe; + token_name?: InputMaybe; }; -/** Ordering options when selecting data from "current_ans_lookup". */ -export type Current_Ans_Lookup_Order_By = { - all_token_ownerships_aggregate?: InputMaybe; +/** order by min() on columns of table "current_aptos_names" */ +export type Current_Aptos_Names_Min_Order_By = { domain?: InputMaybe; expiration_timestamp?: InputMaybe; last_transaction_version?: InputMaybe; registered_address?: InputMaybe; subdomain?: InputMaybe; + token_name?: InputMaybe; }; -/** select columns of table "current_ans_lookup" */ -export enum Current_Ans_Lookup_Select_Column { +/** Ordering options when selecting data from "current_aptos_names". */ +export type Current_Aptos_Names_Order_By = { + domain?: InputMaybe; + expiration_timestamp?: InputMaybe; + is_primary?: InputMaybe; + last_transaction_version?: InputMaybe; + registered_address?: InputMaybe; + subdomain?: InputMaybe; + token_name?: InputMaybe; +}; + +/** select columns of table "current_aptos_names" */ +export enum Current_Aptos_Names_Select_Column { /** column name */ Domain = 'domain', /** column name */ ExpirationTimestamp = 'expiration_timestamp', /** column name */ + IsPrimary = 'is_primary', + /** column name */ LastTransactionVersion = 'last_transaction_version', /** column name */ RegisteredAddress = 'registered_address', /** column name */ - Subdomain = 'subdomain' + Subdomain = 'subdomain', + /** column name */ + TokenName = 'token_name' } -/** order by stddev() on columns of table "current_ans_lookup" */ -export type Current_Ans_Lookup_Stddev_Order_By = { +/** order by stddev() on columns of table "current_aptos_names" */ +export type Current_Aptos_Names_Stddev_Order_By = { last_transaction_version?: InputMaybe; }; -/** order by stddev_pop() on columns of table "current_ans_lookup" */ -export type Current_Ans_Lookup_Stddev_Pop_Order_By = { +/** order by stddev_pop() on columns of table "current_aptos_names" */ +export type Current_Aptos_Names_Stddev_Pop_Order_By = { last_transaction_version?: InputMaybe; }; -/** order by stddev_samp() on columns of table "current_ans_lookup" */ -export type Current_Ans_Lookup_Stddev_Samp_Order_By = { +/** order by stddev_samp() on columns of table "current_aptos_names" */ +export type Current_Aptos_Names_Stddev_Samp_Order_By = { last_transaction_version?: InputMaybe; }; -/** Streaming cursor of the table "current_ans_lookup" */ -export type Current_Ans_Lookup_Stream_Cursor_Input = { +/** Streaming cursor of the table "current_aptos_names" */ +export type Current_Aptos_Names_Stream_Cursor_Input = { /** Stream column input with initial value */ - initial_value: Current_Ans_Lookup_Stream_Cursor_Value_Input; + initial_value: Current_Aptos_Names_Stream_Cursor_Value_Input; /** cursor ordering */ ordering?: InputMaybe; }; /** Initial value of the column from where the streaming should start */ -export type Current_Ans_Lookup_Stream_Cursor_Value_Input = { +export type Current_Aptos_Names_Stream_Cursor_Value_Input = { domain?: InputMaybe; expiration_timestamp?: InputMaybe; + is_primary?: InputMaybe; last_transaction_version?: InputMaybe; registered_address?: InputMaybe; subdomain?: InputMaybe; + token_name?: InputMaybe; }; -/** order by sum() on columns of table "current_ans_lookup" */ -export type Current_Ans_Lookup_Sum_Order_By = { +/** order by sum() on columns of table "current_aptos_names" */ +export type Current_Aptos_Names_Sum_Order_By = { last_transaction_version?: InputMaybe; }; -/** order by var_pop() on columns of table "current_ans_lookup" */ -export type Current_Ans_Lookup_Var_Pop_Order_By = { +/** order by var_pop() on columns of table "current_aptos_names" */ +export type Current_Aptos_Names_Var_Pop_Order_By = { last_transaction_version?: InputMaybe; }; -/** order by var_samp() on columns of table "current_ans_lookup" */ -export type Current_Ans_Lookup_Var_Samp_Order_By = { +/** order by var_samp() on columns of table "current_aptos_names" */ +export type Current_Aptos_Names_Var_Samp_Order_By = { last_transaction_version?: InputMaybe; }; -/** order by variance() on columns of table "current_ans_lookup" */ -export type Current_Ans_Lookup_Variance_Order_By = { +/** order by variance() on columns of table "current_aptos_names" */ +export type Current_Aptos_Names_Variance_Order_By = { last_transaction_version?: InputMaybe; }; @@ -2419,6 +2507,80 @@ export type Current_Delegated_Staking_Pool_Balances_Stream_Cursor_Value_Input = total_shares?: InputMaybe; }; +/** columns and relationships of "current_delegated_voter" */ +export type Current_Delegated_Voter = { + __typename?: 'current_delegated_voter'; + delegation_pool_address: Scalars['String']; + delegator_address: Scalars['String']; + last_transaction_timestamp: Scalars['timestamp']; + last_transaction_version: Scalars['bigint']; + pending_voter?: Maybe; + table_handle?: Maybe; + voter?: Maybe; +}; + +/** Boolean expression to filter rows from the table "current_delegated_voter". All fields are combined with a logical 'AND'. */ +export type Current_Delegated_Voter_Bool_Exp = { + _and?: InputMaybe>; + _not?: InputMaybe; + _or?: InputMaybe>; + delegation_pool_address?: InputMaybe; + delegator_address?: InputMaybe; + last_transaction_timestamp?: InputMaybe; + last_transaction_version?: InputMaybe; + pending_voter?: InputMaybe; + table_handle?: InputMaybe; + voter?: InputMaybe; +}; + +/** Ordering options when selecting data from "current_delegated_voter". */ +export type Current_Delegated_Voter_Order_By = { + delegation_pool_address?: InputMaybe; + delegator_address?: InputMaybe; + last_transaction_timestamp?: InputMaybe; + last_transaction_version?: InputMaybe; + pending_voter?: InputMaybe; + table_handle?: InputMaybe; + voter?: InputMaybe; +}; + +/** select columns of table "current_delegated_voter" */ +export enum Current_Delegated_Voter_Select_Column { + /** column name */ + DelegationPoolAddress = 'delegation_pool_address', + /** column name */ + DelegatorAddress = 'delegator_address', + /** column name */ + LastTransactionTimestamp = 'last_transaction_timestamp', + /** column name */ + LastTransactionVersion = 'last_transaction_version', + /** column name */ + PendingVoter = 'pending_voter', + /** column name */ + TableHandle = 'table_handle', + /** column name */ + Voter = 'voter' +} + +/** Streaming cursor of the table "current_delegated_voter" */ +export type Current_Delegated_Voter_Stream_Cursor_Input = { + /** Stream column input with initial value */ + initial_value: Current_Delegated_Voter_Stream_Cursor_Value_Input; + /** cursor ordering */ + ordering?: InputMaybe; +}; + +/** Initial value of the column from where the streaming should start */ +export type Current_Delegated_Voter_Stream_Cursor_Value_Input = { + delegation_pool_address?: InputMaybe; + delegator_address?: InputMaybe; + last_transaction_timestamp?: InputMaybe; + last_transaction_version?: InputMaybe; + pending_voter?: InputMaybe; + table_handle?: InputMaybe; + voter?: InputMaybe; +}; + /** columns and relationships of "current_delegator_balances" */ export type Current_Delegator_Balances = { __typename?: 'current_delegator_balances'; @@ -2781,7 +2943,7 @@ export type Current_Staking_Pool_Voter = { last_transaction_version: Scalars['bigint']; operator_address: Scalars['String']; /** An array relationship */ - operator_aptos_name: Array; + operator_aptos_name: Array; staking_pool_address: Scalars['String']; voter_address: Scalars['String']; }; @@ -2789,11 +2951,11 @@ export type Current_Staking_Pool_Voter = { /** columns and relationships of "current_staking_pool_voter" */ export type Current_Staking_Pool_VoterOperator_Aptos_NameArgs = { - distinct_on?: InputMaybe>; + distinct_on?: InputMaybe>; limit?: InputMaybe; offset?: InputMaybe; - order_by?: InputMaybe>; - where?: InputMaybe; + order_by?: InputMaybe>; + where?: InputMaybe; }; /** Boolean expression to filter rows from the table "current_staking_pool_voter". All fields are combined with a logical 'AND'. */ @@ -2803,7 +2965,7 @@ export type Current_Staking_Pool_Voter_Bool_Exp = { _or?: InputMaybe>; last_transaction_version?: InputMaybe; operator_address?: InputMaybe; - operator_aptos_name?: InputMaybe; + operator_aptos_name?: InputMaybe; staking_pool_address?: InputMaybe; voter_address?: InputMaybe; }; @@ -2812,7 +2974,7 @@ export type Current_Staking_Pool_Voter_Bool_Exp = { export type Current_Staking_Pool_Voter_Order_By = { last_transaction_version?: InputMaybe; operator_address?: InputMaybe; - operator_aptos_name_aggregate?: InputMaybe; + operator_aptos_name_aggregate?: InputMaybe; staking_pool_address?: InputMaybe; voter_address?: InputMaybe; }; @@ -3102,6 +3264,10 @@ export type Current_Token_Datas_Stream_Cursor_Value_Input = { /** columns and relationships of "current_token_datas_v2" */ export type Current_Token_Datas_V2 = { __typename?: 'current_token_datas_v2'; + /** An object relationship */ + aptos_name?: Maybe; + /** An object relationship */ + cdn_token_uris?: Maybe; collection_id: Scalars['String']; /** An object relationship */ current_collection?: Maybe; @@ -3130,6 +3296,8 @@ export type Current_Token_Datas_V2_Bool_Exp = { _and?: InputMaybe>; _not?: InputMaybe; _or?: InputMaybe>; + aptos_name?: InputMaybe; + cdn_token_uris?: InputMaybe; collection_id?: InputMaybe; current_collection?: InputMaybe; description?: InputMaybe; @@ -3148,6 +3316,8 @@ export type Current_Token_Datas_V2_Bool_Exp = { /** Ordering options when selecting data from "current_token_datas_v2". */ export type Current_Token_Datas_V2_Order_By = { + aptos_name?: InputMaybe; + cdn_token_uris?: InputMaybe; collection_id?: InputMaybe; current_collection?: InputMaybe; description?: InputMaybe; @@ -3224,7 +3394,7 @@ export type Current_Token_Ownerships = { __typename?: 'current_token_ownerships'; amount: Scalars['numeric']; /** An object relationship */ - aptos_name?: Maybe; + aptos_name?: Maybe; collection_data_id_hash: Scalars['String']; collection_name: Scalars['String']; creator_address: Scalars['String']; @@ -3314,7 +3484,7 @@ export type Current_Token_Ownerships_Bool_Exp = { _not?: InputMaybe; _or?: InputMaybe>; amount?: InputMaybe; - aptos_name?: InputMaybe; + aptos_name?: InputMaybe; collection_data_id_hash?: InputMaybe; collection_name?: InputMaybe; creator_address?: InputMaybe; @@ -3395,7 +3565,7 @@ export type Current_Token_Ownerships_Min_Order_By = { /** Ordering options when selecting data from "current_token_ownerships". */ export type Current_Token_Ownerships_Order_By = { amount?: InputMaybe; - aptos_name?: InputMaybe; + aptos_name?: InputMaybe; collection_data_id_hash?: InputMaybe; collection_name?: InputMaybe; creator_address?: InputMaybe; @@ -4468,7 +4638,7 @@ export type Fungible_Asset_Activities = { metadata?: Maybe; owner_address: Scalars['String']; /** An array relationship */ - owner_aptos_names: Array; + owner_aptos_names: Array; storage_id: Scalars['String']; token_standard: Scalars['String']; transaction_timestamp: Scalars['timestamp']; @@ -4479,11 +4649,34 @@ export type Fungible_Asset_Activities = { /** columns and relationships of "fungible_asset_activities" */ export type Fungible_Asset_ActivitiesOwner_Aptos_NamesArgs = { - distinct_on?: InputMaybe>; + distinct_on?: InputMaybe>; limit?: InputMaybe; offset?: InputMaybe; - order_by?: InputMaybe>; - where?: InputMaybe; + order_by?: InputMaybe>; + where?: InputMaybe; +}; + +/** order by aggregate values of table "fungible_asset_activities" */ +export type Fungible_Asset_Activities_Aggregate_Order_By = { + avg?: InputMaybe; + count?: InputMaybe; + max?: InputMaybe; + min?: InputMaybe; + stddev?: InputMaybe; + stddev_pop?: InputMaybe; + stddev_samp?: InputMaybe; + sum?: InputMaybe; + var_pop?: InputMaybe; + var_samp?: InputMaybe; + variance?: InputMaybe; +}; + +/** order by avg() on columns of table "fungible_asset_activities" */ +export type Fungible_Asset_Activities_Avg_Order_By = { + amount?: InputMaybe; + block_height?: InputMaybe; + event_index?: InputMaybe; + transaction_version?: InputMaybe; }; /** Boolean expression to filter rows from the table "fungible_asset_activities". All fields are combined with a logical 'AND'. */ @@ -4502,7 +4695,7 @@ export type Fungible_Asset_Activities_Bool_Exp = { is_transaction_success?: InputMaybe; metadata?: InputMaybe; owner_address?: InputMaybe; - owner_aptos_names?: InputMaybe; + owner_aptos_names?: InputMaybe; storage_id?: InputMaybe; token_standard?: InputMaybe; transaction_timestamp?: InputMaybe; @@ -4510,6 +4703,38 @@ export type Fungible_Asset_Activities_Bool_Exp = { type?: InputMaybe; }; +/** order by max() on columns of table "fungible_asset_activities" */ +export type Fungible_Asset_Activities_Max_Order_By = { + amount?: InputMaybe; + asset_type?: InputMaybe; + block_height?: InputMaybe; + entry_function_id_str?: InputMaybe; + event_index?: InputMaybe; + gas_fee_payer_address?: InputMaybe; + owner_address?: InputMaybe; + storage_id?: InputMaybe; + token_standard?: InputMaybe; + transaction_timestamp?: InputMaybe; + transaction_version?: InputMaybe; + type?: InputMaybe; +}; + +/** order by min() on columns of table "fungible_asset_activities" */ +export type Fungible_Asset_Activities_Min_Order_By = { + amount?: InputMaybe; + asset_type?: InputMaybe; + block_height?: InputMaybe; + entry_function_id_str?: InputMaybe; + event_index?: InputMaybe; + gas_fee_payer_address?: InputMaybe; + owner_address?: InputMaybe; + storage_id?: InputMaybe; + token_standard?: InputMaybe; + transaction_timestamp?: InputMaybe; + transaction_version?: InputMaybe; + type?: InputMaybe; +}; + /** Ordering options when selecting data from "fungible_asset_activities". */ export type Fungible_Asset_Activities_Order_By = { amount?: InputMaybe; @@ -4523,7 +4748,7 @@ export type Fungible_Asset_Activities_Order_By = { is_transaction_success?: InputMaybe; metadata?: InputMaybe; owner_address?: InputMaybe; - owner_aptos_names_aggregate?: InputMaybe; + owner_aptos_names_aggregate?: InputMaybe; storage_id?: InputMaybe; token_standard?: InputMaybe; transaction_timestamp?: InputMaybe; @@ -4565,6 +4790,30 @@ export enum Fungible_Asset_Activities_Select_Column { Type = 'type' } +/** order by stddev() on columns of table "fungible_asset_activities" */ +export type Fungible_Asset_Activities_Stddev_Order_By = { + amount?: InputMaybe; + block_height?: InputMaybe; + event_index?: InputMaybe; + transaction_version?: InputMaybe; +}; + +/** order by stddev_pop() on columns of table "fungible_asset_activities" */ +export type Fungible_Asset_Activities_Stddev_Pop_Order_By = { + amount?: InputMaybe; + block_height?: InputMaybe; + event_index?: InputMaybe; + transaction_version?: InputMaybe; +}; + +/** order by stddev_samp() on columns of table "fungible_asset_activities" */ +export type Fungible_Asset_Activities_Stddev_Samp_Order_By = { + amount?: InputMaybe; + block_height?: InputMaybe; + event_index?: InputMaybe; + transaction_version?: InputMaybe; +}; + /** Streaming cursor of the table "fungible_asset_activities" */ export type Fungible_Asset_Activities_Stream_Cursor_Input = { /** Stream column input with initial value */ @@ -4592,6 +4841,38 @@ export type Fungible_Asset_Activities_Stream_Cursor_Value_Input = { type?: InputMaybe; }; +/** order by sum() on columns of table "fungible_asset_activities" */ +export type Fungible_Asset_Activities_Sum_Order_By = { + amount?: InputMaybe; + block_height?: InputMaybe; + event_index?: InputMaybe; + transaction_version?: InputMaybe; +}; + +/** order by var_pop() on columns of table "fungible_asset_activities" */ +export type Fungible_Asset_Activities_Var_Pop_Order_By = { + amount?: InputMaybe; + block_height?: InputMaybe; + event_index?: InputMaybe; + transaction_version?: InputMaybe; +}; + +/** order by var_samp() on columns of table "fungible_asset_activities" */ +export type Fungible_Asset_Activities_Var_Samp_Order_By = { + amount?: InputMaybe; + block_height?: InputMaybe; + event_index?: InputMaybe; + transaction_version?: InputMaybe; +}; + +/** order by variance() on columns of table "fungible_asset_activities" */ +export type Fungible_Asset_Activities_Variance_Order_By = { + amount?: InputMaybe; + block_height?: InputMaybe; + event_index?: InputMaybe; + transaction_version?: InputMaybe; +}; + /** columns and relationships of "fungible_asset_metadata" */ export type Fungible_Asset_Metadata = { __typename?: 'fungible_asset_metadata'; @@ -5638,6 +5919,74 @@ export type Nft_Marketplace_V2_Nft_Marketplace_Activities_Stream_Cursor_Value_In transaction_version?: InputMaybe; }; +/** columns and relationships of "nft_metadata_crawler.parsed_asset_uris" */ +export type Nft_Metadata_Crawler_Parsed_Asset_Uris = { + __typename?: 'nft_metadata_crawler_parsed_asset_uris'; + asset_uri: Scalars['String']; + cdn_animation_uri?: Maybe; + cdn_image_uri?: Maybe; + cdn_json_uri?: Maybe; + raw_animation_uri?: Maybe; + raw_image_uri?: Maybe; +}; + +/** Boolean expression to filter rows from the table "nft_metadata_crawler.parsed_asset_uris". All fields are combined with a logical 'AND'. */ +export type Nft_Metadata_Crawler_Parsed_Asset_Uris_Bool_Exp = { + _and?: InputMaybe>; + _not?: InputMaybe; + _or?: InputMaybe>; + asset_uri?: InputMaybe; + cdn_animation_uri?: InputMaybe; + cdn_image_uri?: InputMaybe; + cdn_json_uri?: InputMaybe; + raw_animation_uri?: InputMaybe; + raw_image_uri?: InputMaybe; +}; + +/** Ordering options when selecting data from "nft_metadata_crawler.parsed_asset_uris". */ +export type Nft_Metadata_Crawler_Parsed_Asset_Uris_Order_By = { + asset_uri?: InputMaybe; + cdn_animation_uri?: InputMaybe; + cdn_image_uri?: InputMaybe; + cdn_json_uri?: InputMaybe; + raw_animation_uri?: InputMaybe; + raw_image_uri?: InputMaybe; +}; + +/** select columns of table "nft_metadata_crawler.parsed_asset_uris" */ +export enum Nft_Metadata_Crawler_Parsed_Asset_Uris_Select_Column { + /** column name */ + AssetUri = 'asset_uri', + /** column name */ + CdnAnimationUri = 'cdn_animation_uri', + /** column name */ + CdnImageUri = 'cdn_image_uri', + /** column name */ + CdnJsonUri = 'cdn_json_uri', + /** column name */ + RawAnimationUri = 'raw_animation_uri', + /** column name */ + RawImageUri = 'raw_image_uri' +} + +/** Streaming cursor of the table "nft_metadata_crawler_parsed_asset_uris" */ +export type Nft_Metadata_Crawler_Parsed_Asset_Uris_Stream_Cursor_Input = { + /** Stream column input with initial value */ + initial_value: Nft_Metadata_Crawler_Parsed_Asset_Uris_Stream_Cursor_Value_Input; + /** cursor ordering */ + ordering?: InputMaybe; +}; + +/** Initial value of the column from where the streaming should start */ +export type Nft_Metadata_Crawler_Parsed_Asset_Uris_Stream_Cursor_Value_Input = { + asset_uri?: InputMaybe; + cdn_animation_uri?: InputMaybe; + cdn_image_uri?: InputMaybe; + cdn_json_uri?: InputMaybe; + raw_animation_uri?: InputMaybe; + raw_image_uri?: InputMaybe; +}; + /** columns and relationships of "num_active_delegator_per_pool" */ export type Num_Active_Delegator_Per_Pool = { __typename?: 'num_active_delegator_per_pool'; @@ -5999,6 +6348,8 @@ export type Query_Root = { current_ans_lookup: Array; /** fetch data from the table: "current_ans_lookup" using primary key columns */ current_ans_lookup_by_pk?: Maybe; + /** fetch data from the table: "current_aptos_names" */ + current_aptos_names: Array; /** fetch data from the table: "current_coin_balances" */ current_coin_balances: Array; /** fetch data from the table: "current_coin_balances" using primary key columns */ @@ -6019,6 +6370,10 @@ export type Query_Root = { current_delegated_staking_pool_balances: Array; /** fetch data from the table: "current_delegated_staking_pool_balances" using primary key columns */ current_delegated_staking_pool_balances_by_pk?: Maybe; + /** fetch data from the table: "current_delegated_voter" */ + current_delegated_voter: Array; + /** fetch data from the table: "current_delegated_voter" using primary key columns */ + current_delegated_voter_by_pk?: Maybe; /** fetch data from the table: "current_delegator_balances" */ current_delegator_balances: Array; /** fetch data from the table: "current_delegator_balances" using primary key columns */ @@ -6081,7 +6436,7 @@ export type Query_Root = { events: Array; /** fetch data from the table: "events" using primary key columns */ events_by_pk?: Maybe; - /** fetch data from the table: "fungible_asset_activities" */ + /** An array relationship */ fungible_asset_activities: Array; /** fetch data from the table: "fungible_asset_activities" using primary key columns */ fungible_asset_activities_by_pk?: Maybe; @@ -6121,6 +6476,10 @@ export type Query_Root = { nft_marketplace_v2_nft_marketplace_activities: Array; /** fetch data from the table: "nft_marketplace_v2.nft_marketplace_activities" using primary key columns */ nft_marketplace_v2_nft_marketplace_activities_by_pk?: Maybe; + /** fetch data from the table: "nft_metadata_crawler.parsed_asset_uris" */ + nft_metadata_crawler_parsed_asset_uris: Array; + /** fetch data from the table: "nft_metadata_crawler.parsed_asset_uris" using primary key columns */ + nft_metadata_crawler_parsed_asset_uris_by_pk?: Maybe; /** fetch data from the table: "num_active_delegator_per_pool" */ num_active_delegator_per_pool: Array; /** fetch data from the table: "processor_status" */ @@ -6356,6 +6715,15 @@ export type Query_RootCurrent_Ans_Lookup_By_PkArgs = { }; +export type Query_RootCurrent_Aptos_NamesArgs = { + distinct_on?: InputMaybe>; + limit?: InputMaybe; + offset?: InputMaybe; + order_by?: InputMaybe>; + where?: InputMaybe; +}; + + export type Query_RootCurrent_Coin_BalancesArgs = { distinct_on?: InputMaybe>; limit?: InputMaybe; @@ -6431,6 +6799,21 @@ export type Query_RootCurrent_Delegated_Staking_Pool_Balances_By_PkArgs = { }; +export type Query_RootCurrent_Delegated_VoterArgs = { + distinct_on?: InputMaybe>; + limit?: InputMaybe; + offset?: InputMaybe; + order_by?: InputMaybe>; + where?: InputMaybe; +}; + + +export type Query_RootCurrent_Delegated_Voter_By_PkArgs = { + delegation_pool_address: Scalars['String']; + delegator_address: Scalars['String']; +}; + + export type Query_RootCurrent_Delegator_BalancesArgs = { distinct_on?: InputMaybe>; limit?: InputMaybe; @@ -6823,6 +7206,20 @@ export type Query_RootNft_Marketplace_V2_Nft_Marketplace_Activities_By_PkArgs = }; +export type Query_RootNft_Metadata_Crawler_Parsed_Asset_UrisArgs = { + distinct_on?: InputMaybe>; + limit?: InputMaybe; + offset?: InputMaybe; + order_by?: InputMaybe>; + where?: InputMaybe; +}; + + +export type Query_RootNft_Metadata_Crawler_Parsed_Asset_Uris_By_PkArgs = { + asset_uri: Scalars['String']; +}; + + export type Query_RootNum_Active_Delegator_Per_PoolArgs = { distinct_on?: InputMaybe>; limit?: InputMaybe; @@ -7081,6 +7478,10 @@ export type Subscription_Root = { current_ans_lookup_by_pk?: Maybe; /** fetch data from the table in a streaming manner : "current_ans_lookup" */ current_ans_lookup_stream: Array; + /** fetch data from the table: "current_aptos_names" */ + current_aptos_names: Array; + /** fetch data from the table in a streaming manner : "current_aptos_names" */ + current_aptos_names_stream: Array; /** fetch data from the table: "current_coin_balances" */ current_coin_balances: Array; /** fetch data from the table: "current_coin_balances" using primary key columns */ @@ -7111,6 +7512,12 @@ export type Subscription_Root = { current_delegated_staking_pool_balances_by_pk?: Maybe; /** fetch data from the table in a streaming manner : "current_delegated_staking_pool_balances" */ current_delegated_staking_pool_balances_stream: Array; + /** fetch data from the table: "current_delegated_voter" */ + current_delegated_voter: Array; + /** fetch data from the table: "current_delegated_voter" using primary key columns */ + current_delegated_voter_by_pk?: Maybe; + /** fetch data from the table in a streaming manner : "current_delegated_voter" */ + current_delegated_voter_stream: Array; /** fetch data from the table: "current_delegator_balances" */ current_delegator_balances: Array; /** fetch data from the table: "current_delegator_balances" using primary key columns */ @@ -7201,7 +7608,7 @@ export type Subscription_Root = { events_by_pk?: Maybe; /** fetch data from the table in a streaming manner : "events" */ events_stream: Array; - /** fetch data from the table: "fungible_asset_activities" */ + /** An array relationship */ fungible_asset_activities: Array; /** fetch data from the table: "fungible_asset_activities" using primary key columns */ fungible_asset_activities_by_pk?: Maybe; @@ -7261,6 +7668,12 @@ export type Subscription_Root = { nft_marketplace_v2_nft_marketplace_activities_by_pk?: Maybe; /** fetch data from the table in a streaming manner : "nft_marketplace_v2.nft_marketplace_activities" */ nft_marketplace_v2_nft_marketplace_activities_stream: Array; + /** fetch data from the table: "nft_metadata_crawler.parsed_asset_uris" */ + nft_metadata_crawler_parsed_asset_uris: Array; + /** fetch data from the table: "nft_metadata_crawler.parsed_asset_uris" using primary key columns */ + nft_metadata_crawler_parsed_asset_uris_by_pk?: Maybe; + /** fetch data from the table in a streaming manner : "nft_metadata_crawler.parsed_asset_uris" */ + nft_metadata_crawler_parsed_asset_uris_stream: Array; /** fetch data from the table: "num_active_delegator_per_pool" */ num_active_delegator_per_pool: Array; /** fetch data from the table in a streaming manner : "num_active_delegator_per_pool" */ @@ -7595,6 +8008,22 @@ export type Subscription_RootCurrent_Ans_Lookup_StreamArgs = { }; +export type Subscription_RootCurrent_Aptos_NamesArgs = { + distinct_on?: InputMaybe>; + limit?: InputMaybe; + offset?: InputMaybe; + order_by?: InputMaybe>; + where?: InputMaybe; +}; + + +export type Subscription_RootCurrent_Aptos_Names_StreamArgs = { + batch_size: Scalars['Int']; + cursor: Array>; + where?: InputMaybe; +}; + + export type Subscription_RootCurrent_Coin_BalancesArgs = { distinct_on?: InputMaybe>; limit?: InputMaybe; @@ -7705,6 +8134,28 @@ export type Subscription_RootCurrent_Delegated_Staking_Pool_Balances_StreamArgs }; +export type Subscription_RootCurrent_Delegated_VoterArgs = { + distinct_on?: InputMaybe>; + limit?: InputMaybe; + offset?: InputMaybe; + order_by?: InputMaybe>; + where?: InputMaybe; +}; + + +export type Subscription_RootCurrent_Delegated_Voter_By_PkArgs = { + delegation_pool_address: Scalars['String']; + delegator_address: Scalars['String']; +}; + + +export type Subscription_RootCurrent_Delegated_Voter_StreamArgs = { + batch_size: Scalars['Int']; + cursor: Array>; + where?: InputMaybe; +}; + + export type Subscription_RootCurrent_Delegator_BalancesArgs = { distinct_on?: InputMaybe>; limit?: InputMaybe; @@ -8265,6 +8716,27 @@ export type Subscription_RootNft_Marketplace_V2_Nft_Marketplace_Activities_Strea }; +export type Subscription_RootNft_Metadata_Crawler_Parsed_Asset_UrisArgs = { + distinct_on?: InputMaybe>; + limit?: InputMaybe; + offset?: InputMaybe; + order_by?: InputMaybe>; + where?: InputMaybe; +}; + + +export type Subscription_RootNft_Metadata_Crawler_Parsed_Asset_Uris_By_PkArgs = { + asset_uri: Scalars['String']; +}; + + +export type Subscription_RootNft_Metadata_Crawler_Parsed_Asset_Uris_StreamArgs = { + batch_size: Scalars['Int']; + cursor: Array>; + where?: InputMaybe; +}; + + export type Subscription_RootNum_Active_Delegator_Per_PoolArgs = { distinct_on?: InputMaybe>; limit?: InputMaybe; @@ -8690,9 +9162,9 @@ export type Timestamptz_Comparison_Exp = { export type Token_Activities = { __typename?: 'token_activities'; /** An array relationship */ - aptos_names_owner: Array; + aptos_names_owner: Array; /** An array relationship */ - aptos_names_to: Array; + aptos_names_to: Array; coin_amount?: Maybe; coin_type?: Maybe; collection_data_id_hash: Scalars['String']; @@ -8718,21 +9190,21 @@ export type Token_Activities = { /** columns and relationships of "token_activities" */ export type Token_ActivitiesAptos_Names_OwnerArgs = { - distinct_on?: InputMaybe>; + distinct_on?: InputMaybe>; limit?: InputMaybe; offset?: InputMaybe; - order_by?: InputMaybe>; - where?: InputMaybe; + order_by?: InputMaybe>; + where?: InputMaybe; }; /** columns and relationships of "token_activities" */ export type Token_ActivitiesAptos_Names_ToArgs = { - distinct_on?: InputMaybe>; + distinct_on?: InputMaybe>; limit?: InputMaybe; offset?: InputMaybe; - order_by?: InputMaybe>; - where?: InputMaybe; + order_by?: InputMaybe>; + where?: InputMaybe; }; /** aggregated selection of "token_activities" */ @@ -8808,8 +9280,8 @@ export type Token_Activities_Bool_Exp = { _and?: InputMaybe>; _not?: InputMaybe; _or?: InputMaybe>; - aptos_names_owner?: InputMaybe; - aptos_names_to?: InputMaybe; + aptos_names_owner?: InputMaybe; + aptos_names_to?: InputMaybe; coin_amount?: InputMaybe; coin_type?: InputMaybe; collection_data_id_hash?: InputMaybe; @@ -8923,8 +9395,8 @@ export type Token_Activities_Min_Order_By = { /** Ordering options when selecting data from "token_activities". */ export type Token_Activities_Order_By = { - aptos_names_owner_aggregate?: InputMaybe; - aptos_names_to_aggregate?: InputMaybe; + aptos_names_owner_aggregate?: InputMaybe; + aptos_names_to_aggregate?: InputMaybe; coin_amount?: InputMaybe; coin_type?: InputMaybe; collection_data_id_hash?: InputMaybe; @@ -9113,9 +9585,9 @@ export type Token_Activities_V2 = { __typename?: 'token_activities_v2'; after_value?: Maybe; /** An array relationship */ - aptos_names_from: Array; + aptos_names_from: Array; /** An array relationship */ - aptos_names_to: Array; + aptos_names_to: Array; before_value?: Maybe; /** An object relationship */ current_token_data?: Maybe; @@ -9137,21 +9609,21 @@ export type Token_Activities_V2 = { /** columns and relationships of "token_activities_v2" */ export type Token_Activities_V2Aptos_Names_FromArgs = { - distinct_on?: InputMaybe>; + distinct_on?: InputMaybe>; limit?: InputMaybe; offset?: InputMaybe; - order_by?: InputMaybe>; - where?: InputMaybe; + order_by?: InputMaybe>; + where?: InputMaybe; }; /** columns and relationships of "token_activities_v2" */ export type Token_Activities_V2Aptos_Names_ToArgs = { - distinct_on?: InputMaybe>; + distinct_on?: InputMaybe>; limit?: InputMaybe; offset?: InputMaybe; - order_by?: InputMaybe>; - where?: InputMaybe; + order_by?: InputMaybe>; + where?: InputMaybe; }; /** aggregated selection of "token_activities_v2" */ @@ -9222,8 +9694,8 @@ export type Token_Activities_V2_Bool_Exp = { _not?: InputMaybe; _or?: InputMaybe>; after_value?: InputMaybe; - aptos_names_from?: InputMaybe; - aptos_names_to?: InputMaybe; + aptos_names_from?: InputMaybe; + aptos_names_to?: InputMaybe; before_value?: InputMaybe; current_token_data?: InputMaybe; entry_function_id_str?: InputMaybe; @@ -9318,8 +9790,8 @@ export type Token_Activities_V2_Min_Order_By = { /** Ordering options when selecting data from "token_activities_v2". */ export type Token_Activities_V2_Order_By = { after_value?: InputMaybe; - aptos_names_from_aggregate?: InputMaybe; - aptos_names_to_aggregate?: InputMaybe; + aptos_names_from_aggregate?: InputMaybe; + aptos_names_to_aggregate?: InputMaybe; before_value?: InputMaybe; current_token_data?: InputMaybe; entry_function_id_str?: InputMaybe; diff --git a/ecosystem/typescript/sdk/src/indexer/queries/getCurrentObjects.graphql b/ecosystem/typescript/sdk/src/indexer/queries/getCurrentObjects.graphql new file mode 100644 index 0000000000000..f055082467e3a --- /dev/null +++ b/ecosystem/typescript/sdk/src/indexer/queries/getCurrentObjects.graphql @@ -0,0 +1,16 @@ +query getCurrentObjects( + $where_condition: current_objects_bool_exp + $offset: Int + $limit: Int + $order_by: [current_objects_order_by!] +) { + current_objects(where: $where_condition, offset: $offset, limit: $limit, order_by: $order_by) { + allow_ungated_transfer + state_key_hash + owner_address + object_address + last_transaction_version + last_guid_creation_num + is_deleted + } +} diff --git a/ecosystem/typescript/sdk/src/plugins/aptos_token.ts b/ecosystem/typescript/sdk/src/plugins/aptos_token.ts index cc744ca073738..487251f7206f3 100644 --- a/ecosystem/typescript/sdk/src/plugins/aptos_token.ts +++ b/ecosystem/typescript/sdk/src/plugins/aptos_token.ts @@ -549,4 +549,31 @@ export class AptosToken { ); return txnHash; } + + /** + * Burn an object by the object owner + * @param owner The object owner account + * @param objectId The object address + * @optional objectType. The object type, default to "0x1::object::ObjectCore" + * @returns The hash of the transaction submitted to the API + */ + async burnObject( + owner: AptosAccount, + objectId: MaybeHexString, + objectType?: string, + extraArgs?: OptionalTransactionArgs, + ): Promise { + const builder = new TransactionBuilderRemoteABI(this.provider, { + sender: owner.address(), + ...extraArgs, + }); + const rawTxn = await builder.build( + "0x1::object::burn", + [objectType || "0x1::object::ObjectCore"], + [HexString.ensure(objectId).hex()], + ); + const bcsTxn = AptosClient.generateBCSTransaction(owner, rawTxn); + const pendingTransaction = await this.provider.submitSignedBCSTransaction(bcsTxn); + return pendingTransaction.hash; + } } diff --git a/ecosystem/typescript/sdk/src/providers/indexer.ts b/ecosystem/typescript/sdk/src/providers/indexer.ts index 0063397b30169..6e0262de221f8 100644 --- a/ecosystem/typescript/sdk/src/providers/indexer.ts +++ b/ecosystem/typescript/sdk/src/providers/indexer.ts @@ -22,6 +22,7 @@ import { GetTokenCurrentOwnerDataQuery, GetOwnedTokensByTokenDataQuery, GetAccountCoinsDataCountQuery, + GetCurrentObjectsQuery, } from "../indexer/generated/operations"; import { GetAccountTokensCount, @@ -45,6 +46,7 @@ import { GetTokenCurrentOwnerData, GetOwnedTokensByTokenData, GetAccountCoinsDataCount, + GetCurrentObjects, } from "../indexer/generated/queries"; import { ClientConfig, post } from "../client"; import { ApiError } from "./aptos_client"; @@ -58,6 +60,7 @@ import { InputMaybe, Token_Activities_V2_Order_By, User_Transactions_Order_By, + Current_Objects_Order_By, } from "../indexer/generated/types"; /** @@ -871,4 +874,36 @@ export class IndexerClient { return this.queryIndexer(graphqlQuery); } + + /** + * Queries an account owned objects + * + * @param ownerAddress Owner address + * @returns GetCurrentObjectsQuery response type + */ + async getAccountOwnedObjects( + ownerAddress: MaybeHexString, + extraArgs?: { + options?: IndexerPaginationArgs; + orderBy?: IndexerSortBy[]; + }, + ): Promise { + const address = HexString.ensure(ownerAddress).hex(); + IndexerClient.validateAddress(address); + + const whereCondition: any = { + owner_address: { _eq: address }, + }; + + const graphqlQuery = { + query: GetCurrentObjects, + variables: { + where_condition: whereCondition, + offset: extraArgs?.options?.offset, + limit: extraArgs?.options?.limit, + order_by: extraArgs?.orderBy, + }, + }; + return this.queryIndexer(graphqlQuery); + } } diff --git a/ecosystem/typescript/sdk/src/tests/e2e/aptos_token.test.ts b/ecosystem/typescript/sdk/src/tests/e2e/aptos_token.test.ts index 88be58320893f..01807d7500b19 100644 --- a/ecosystem/typescript/sdk/src/tests/e2e/aptos_token.test.ts +++ b/ecosystem/typescript/sdk/src/tests/e2e/aptos_token.test.ts @@ -1,5 +1,5 @@ import { AptosAccount } from "../../account"; -import { UserTransaction } from "../../generated"; +import { UserTransaction, WriteResource, WriteSetChange_WriteResource } from "../../generated"; import { AptosToken } from "../../plugins"; import { Provider } from "../../providers"; import { PROVIDER_LOCAL_NETWORK_CONFIG, getFaucetClient, longTestTimeout } from "../unit/test_helper.test"; @@ -14,6 +14,7 @@ const bob = new AptosAccount(); const collectionName = "AliceCollection"; const tokenName = "Alice Token"; let tokenAddress = ""; +let collectionAddress = ""; describe("token objects", () => { beforeAll(async () => { @@ -25,10 +26,14 @@ describe("token objects", () => { test( "create collection", async () => { - await provider.waitForTransaction( + const txn = await provider.waitForTransactionWithResult( await aptosToken.createCollection(alice, "Alice's simple collection", collectionName, "https://aptos.dev", 5), { checkSuccess: true }, ); + const objectCore = (txn as UserTransaction).changes.find( + (change) => (change as WriteResource).data.type === "0x1::object::ObjectCore", + ); + collectionAddress = (objectCore as WriteSetChange_WriteResource).address; }, longTestTimeout, ); @@ -241,4 +246,12 @@ describe("token objects", () => { }, longTestTimeout, ); + + test( + "burn object", + async () => { + await provider.waitForTransaction(await aptosToken.burnObject(alice, collectionAddress), { checkSuccess: true }); + }, + longTestTimeout, + ); }); diff --git a/ecosystem/typescript/sdk/src/tests/e2e/indexer.test.ts b/ecosystem/typescript/sdk/src/tests/e2e/indexer.test.ts index d67eae3eb12f9..ea8651db8a859 100644 --- a/ecosystem/typescript/sdk/src/tests/e2e/indexer.test.ts +++ b/ecosystem/typescript/sdk/src/tests/e2e/indexer.test.ts @@ -129,6 +129,17 @@ describe("Indexer", () => { expect(ledgerInfo.ledger_infos[0].chain_id).toBeGreaterThan(1); }); + // OBJECTS // + + it( + "gets account owned objects data", + async () => { + const accountObjects = await indexerClient.getAccountOwnedObjects(alice.address().hex()); + expect(accountObjects.current_objects.length).toBe(2); + }, + longTestTimeout, + ); + // TOKENS // it( diff --git a/ecosystem/typescript/sdk/src/version.ts b/ecosystem/typescript/sdk/src/version.ts index e2fa9e6caa107..398a0537c82de 100644 --- a/ecosystem/typescript/sdk/src/version.ts +++ b/ecosystem/typescript/sdk/src/version.ts @@ -1,2 +1,2 @@ // hardcoded for now, we would want to have it injected dynamically -export const VERSION = "1.19.0"; +export const VERSION = "1.20.0"; diff --git a/ecosystem/typescript/sdk_v2/src/api/account.ts b/ecosystem/typescript/sdk_v2/src/api/account.ts index 2cc6209194758..cfb50d5b2cc3b 100644 --- a/ecosystem/typescript/sdk_v2/src/api/account.ts +++ b/ecosystem/typescript/sdk_v2/src/api/account.ts @@ -6,7 +6,7 @@ import { MoveResource, MoveResourceType, PaginationArgs, - Transaction, + TransactionResponse, HexInput, } from "../types"; import { getInfo, getModule, getModules, getResource, getResources, getTransactions } from "../internal/account"; @@ -33,7 +33,7 @@ export class Account { * } * ``` */ - async getInfo(args: { accountAddress: HexInput }): Promise { + async getAccountInfo(args: { accountAddress: HexInput }): Promise { const data = await getInfo({ aptosConfig: this.config, ...args }); return data; } @@ -48,7 +48,7 @@ export class Account { * @returns Account modules */ - async getModules(args: { + async getAccountModules(args: { accountAddress: HexInput; options?: PaginationArgs & LedgerVersion; }): Promise { @@ -72,7 +72,7 @@ export class Account { * } * ``` */ - async getModule(args: { + async getAccountModule(args: { accountAddress: HexInput; moduleName: string; options?: LedgerVersion; @@ -91,7 +91,10 @@ export class Account { * * @returns The account transactions */ - async getTransactions(args: { accountAddress: HexInput; options?: PaginationArgs }): Promise { + async getAccountTransactions(args: { + accountAddress: HexInput; + options?: PaginationArgs; + }): Promise { const transactions = await getTransactions({ aptosConfig: this.config, ...args }); return transactions; } @@ -105,7 +108,7 @@ export class Account { * @param accountAddress Aptos account address * @returns Account resources */ - async getResources(args: { + async getAccountResources(args: { accountAddress: HexInput; options?: PaginationArgs & LedgerVersion; }): Promise { @@ -129,7 +132,7 @@ export class Account { * } * ``` */ - async getResource(args: { + async getAccountResource(args: { accountAddress: HexInput; resourceType: MoveResourceType; options?: LedgerVersion; diff --git a/ecosystem/typescript/sdk_v2/src/api/aptos.ts b/ecosystem/typescript/sdk_v2/src/api/aptos.ts index ce8093e63eaba..e4dd1c31ba32a 100644 --- a/ecosystem/typescript/sdk_v2/src/api/aptos.ts +++ b/ecosystem/typescript/sdk_v2/src/api/aptos.ts @@ -28,3 +28,28 @@ export class Aptos { this.account = new Account(this.config); } } + +export interface Aptos extends Account {} + +/** +In TypeScript, we can’t inherit or extend from more than one class, +Mixins helps us to get around that by creating a partial classes +that we can combine to form a single class that contains all the methods and properties from the partial classes. +{@link https://www.typescriptlang.org/docs/handbook/mixins.html#alternative-pattern} + +Here, we combine any sub-class and the Aptos class. +*/ +function applyMixin(targetClass: any, baseClass: any, baseClassProp: string) { + // Mixin instance methods + Object.getOwnPropertyNames(baseClass.prototype).forEach((propertyName) => { + const propertyDescriptor = Object.getOwnPropertyDescriptor(baseClass.prototype, propertyName); + if (!propertyDescriptor) return; + // eslint-disable-next-line func-names + propertyDescriptor.value = function (...args: any) { + return (this as any)[baseClassProp][propertyName](...args); + }; + Object.defineProperty(targetClass.prototype, propertyName, propertyDescriptor); + }); +} + +applyMixin(Aptos, Account, "account"); diff --git a/ecosystem/typescript/sdk_v2/src/bcs/consts.ts b/ecosystem/typescript/sdk_v2/src/bcs/consts.ts index 92957fd271f0d..52b6d9cad0554 100644 --- a/ecosystem/typescript/sdk_v2/src/bcs/consts.ts +++ b/ecosystem/typescript/sdk_v2/src/bcs/consts.ts @@ -1,7 +1,7 @@ // Copyright © Aptos Foundation // SPDX-License-Identifier: Apache-2.0 -import { Uint128, Uint16, Uint32, Uint64, Uint8, Uint256 } from "./types"; +import { Uint128, Uint16, Uint32, Uint64, Uint8, Uint256 } from "../types"; // Upper bound values for uint8, uint16, uint64 and uint128 export const MAX_U8_NUMBER: Uint8 = 2 ** 8 - 1; diff --git a/ecosystem/typescript/sdk_v2/src/bcs/deserializer.ts b/ecosystem/typescript/sdk_v2/src/bcs/deserializer.ts index 72a38407c4ee6..84c18fdb4617a 100644 --- a/ecosystem/typescript/sdk_v2/src/bcs/deserializer.ts +++ b/ecosystem/typescript/sdk_v2/src/bcs/deserializer.ts @@ -3,10 +3,9 @@ /* eslint-disable no-bitwise */ import { MAX_U32_NUMBER } from "./consts"; -import { Uint128, Uint16, Uint256, Uint32, Uint64, Uint8 } from "./types"; +import { Uint128, Uint16, Uint256, Uint32, Uint64, Uint8 } from "../types"; -// The class must implement a static deserialize method. -interface Deserializable { +export interface Deserializable { deserialize(deserializer: Deserializer): T; } @@ -189,4 +188,73 @@ export class Deserializer { return Number(value); } + + /** + * This function deserializes a Deserializable value. The bytes must be loaded into the Serializer already. + * Note that it does not take in the value, it takes in the class type of the value that implements Serializable. + * + * The process of using this function is as follows: + * 1. Serialize the value of class type T using its `serialize` function. + * 2. Get the serialized bytes and pass them into the Deserializer constructor. + * 3. Call this function with your newly constructed Deserializer, as `deserializer.deserialize(ClassType)` + * + * @param cls The Deserializable class to deserialize the buffered bytes into. + * + * @example + * // Define the MoveStruct class that implements the Deserializable interface + * class MoveStruct implements Serializable { + * constructor( + * public name: string, + * public description: string, + * public enabled: boolean, + * public vectorU8: Array, + * ) {} + * + * serialize(serializer: Serializer): void { + * serializer.serializeStr(this.name); + * serializer.serializeStr(this.description); + * serializer.serializeBool(this.enabled); + * serializer.serializeU32AsUleb128(this.vectorU8.length); + * this.vectorU8.forEach((n) => serializer.serializeU8(n)); + * } + * + * static deserialize(deserializer: Deserializer): MoveStruct { + * const name = deserializer.deserializeStr(); + * const description = deserializer.deserializeStr(); + * const enabled = deserializer.deserializeBool(); + * const length = deserializer.deserializeUleb128AsU32(); + * const vectorU8 = new Array(); + * for (let i = 0; i < length; i++) { + * vectorU8.push(deserializer.deserializeU8()); + * } + * return new MoveStruct(name, description, enabled, vectorU8); + * } + * } + * + * // Construct a MoveStruct + * const moveStruct = new MoveStruct("abc", "123", false, [1, 2, 3, 4]); + * + * // Serialize a MoveStruct instance. + * const serializer = new Serializer(); + * serializer.serialize(moveStruct); + * const moveStructBcsBytes = serializer.toUint8Array(); + * + * // Load the bytes into the Deserializer buffer + * const deserializer = new Deserializer(moveStructBcsBytes); + * + * // Deserialize the buffered bytes into an instance of MoveStruct + * const deserializedMoveStruct = deserializer.deserialize(MoveStruct); + * assert(deserializedMoveStruct.name === moveStruct.name); + * assert(deserializedMoveStruct.description === moveStruct.description); + * assert(deserializedMoveStruct.enabled === moveStruct.enabled); + * assert(deserializedMoveStruct.vectorU8.length === moveStruct.vectorU8.length); + * deserializeMoveStruct.vectorU8.forEach((n, i) => assert(n === moveStruct.vectorU8[i])); + * + * @returns the deserialized value of class type T + */ + deserialize(cls: Deserializable): T { + // NOTE: The `deserialize` method called by `cls` is defined in the `cls`'s + // Deserializable interface, not the one defined in this class. + return cls.deserialize(this); + } } diff --git a/ecosystem/typescript/sdk_v2/src/bcs/index.ts b/ecosystem/typescript/sdk_v2/src/bcs/index.ts index d3587124a19e3..7c43334e04ce1 100644 --- a/ecosystem/typescript/sdk_v2/src/bcs/index.ts +++ b/ecosystem/typescript/sdk_v2/src/bcs/index.ts @@ -1,6 +1,5 @@ // Copyright © Aptos Foundation // SPDX-License-Identifier: Apache-2.0 -export * from "./types"; export * from "./serializer"; export * from "./deserializer"; diff --git a/ecosystem/typescript/sdk_v2/src/bcs/serializer.ts b/ecosystem/typescript/sdk_v2/src/bcs/serializer.ts index 57367ca2d75b1..9addad26fc127 100644 --- a/ecosystem/typescript/sdk_v2/src/bcs/serializer.ts +++ b/ecosystem/typescript/sdk_v2/src/bcs/serializer.ts @@ -10,7 +10,7 @@ import { MAX_U8_NUMBER, MAX_U256_BIG_INT, } from "./consts"; -import { AnyNumber, Uint16, Uint32, Uint8 } from "./types"; +import { AnyNumber, Uint16, Uint32, Uint8 } from "../types"; export interface Serializable { serialize(serializer: Serializer): void; @@ -227,6 +227,47 @@ export class Serializer { toUint8Array(): Uint8Array { return new Uint8Array(this.buffer).slice(0, this.offset); } + + /** + * Serializes a `Serializable` value, facilitating composable serialization. + * + * @param value The Serializable value to serialize + * + * @example + * // Define the MoveStruct class that implements the Serializable interface + * class MoveStruct implements Serializable { + * constructor( + * public creatorAddress: AccountAddress, // where AccountAddress implements Serializable + * public collectionName: string, + * public tokenName: string + * ) {} + * + * serialize(serializer: Serializer): void { + * serializer.serialize(this.creatorAddress); // Composable serialization of another Serializable object + * serializer.serializeStr(this.collectionName); + * serializer.serializeStr(this.tokenName); + * } + * } + * + * // Construct a MoveStruct + * const moveStruct = new MoveStruct(new AccountAddress(...), "MyCollection", "TokenA"); + * + * // Serialize a string, a u64 number, and a MoveStruct instance. + * const serializer = new Serializer(); + * serializer.serializeStr("ExampleString"); + * serializer.serializeU64(12345678); + * serializer.serialize(moveStruct); + * + * // Get the bytes from the Serializer instance + * const serializedBytes = serializer.toUint8Array(); + * + * @returns the serializer instance + */ + serialize(value: T) { + // NOTE: The `serialize` method called by `value` is defined in `value`'s + // Serializable interface, not the one defined in this class. + value.serialize(this); + } } /** diff --git a/ecosystem/typescript/sdk_v2/src/bcs/types.ts b/ecosystem/typescript/sdk_v2/src/bcs/types.ts deleted file mode 100644 index 1a8fc7cd97c05..0000000000000 --- a/ecosystem/typescript/sdk_v2/src/bcs/types.ts +++ /dev/null @@ -1,10 +0,0 @@ -// Copyright © Aptos Foundation -// SPDX-License-Identifier: Apache-2.0 - -export type Uint8 = number; -export type Uint16 = number; -export type Uint32 = number; -export type Uint64 = bigint; -export type Uint128 = bigint; -export type Uint256 = bigint; -export type AnyNumber = bigint | number; diff --git a/ecosystem/typescript/sdk_v2/src/internal/account.ts b/ecosystem/typescript/sdk_v2/src/internal/account.ts index a691d3f4fbf37..f51a6a0a8d49e 100644 --- a/ecosystem/typescript/sdk_v2/src/internal/account.ts +++ b/ecosystem/typescript/sdk_v2/src/internal/account.ts @@ -6,7 +6,7 @@ import { MoveResource, MoveResourceType, PaginationArgs, - Transaction, + TransactionResponse, HexInput, } from "../types"; import { get } from "../client"; @@ -77,9 +77,9 @@ export async function getTransactions(args: { aptosConfig: AptosConfig; accountAddress: HexInput; options?: PaginationArgs; -}): Promise { +}): Promise { const { aptosConfig, accountAddress, options } = args; - const data = await paginateWithCursor<{}, Transaction[]>({ + const data = await paginateWithCursor<{}, TransactionResponse[]>({ url: aptosConfig.getRequestUrl(AptosApiType.FULLNODE), endpoint: `accounts/${AccountAddress.fromHexInput({ input: accountAddress }).toString()}/transactions`, originMethod: "getTransactions", diff --git a/ecosystem/typescript/sdk_v2/src/types/generated/index.ts b/ecosystem/typescript/sdk_v2/src/types/generated/index.ts deleted file mode 100644 index 3b2593aebb18d..0000000000000 --- a/ecosystem/typescript/sdk_v2/src/types/generated/index.ts +++ /dev/null @@ -1,103 +0,0 @@ -/* istanbul ignore file */ -/* tslint:disable */ -/* eslint-disable */ - -export type { AccountData } from './models/AccountData'; -export type { AccountSignature } from './models/AccountSignature'; -export type { AccountSignature_Ed25519Signature } from './models/AccountSignature_Ed25519Signature'; -export type { AccountSignature_MultiEd25519Signature } from './models/AccountSignature_MultiEd25519Signature'; -export type { Address } from './models/Address'; -export type { AptosError } from './models/AptosError'; -export { AptosErrorCode } from './models/AptosErrorCode'; -export type { Block } from './models/Block'; -export type { BlockMetadataTransaction } from './models/BlockMetadataTransaction'; -export type { DecodedTableData } from './models/DecodedTableData'; -export type { DeletedTableData } from './models/DeletedTableData'; -export type { DeleteModule } from './models/DeleteModule'; -export type { DeleteResource } from './models/DeleteResource'; -export type { DeleteTableItem } from './models/DeleteTableItem'; -export type { DirectWriteSet } from './models/DirectWriteSet'; -export type { Ed25519Signature } from './models/Ed25519Signature'; -export type { EncodeSubmissionRequest } from './models/EncodeSubmissionRequest'; -export type { EntryFunctionId } from './models/EntryFunctionId'; -export type { EntryFunctionPayload } from './models/EntryFunctionPayload'; -export type { Event } from './models/Event'; -export type { EventGuid } from './models/EventGuid'; -export type { FeePayerSignature } from './models/FeePayerSignature'; -export type { GasEstimation } from './models/GasEstimation'; -export type { GenesisPayload } from './models/GenesisPayload'; -export type { GenesisPayload_WriteSetPayload } from './models/GenesisPayload_WriteSetPayload'; -export type { GenesisTransaction } from './models/GenesisTransaction'; -export type { HashValue } from './models/HashValue'; -export type { HealthCheckSuccess } from './models/HealthCheckSuccess'; -export type { HexEncodedBytes } from './models/HexEncodedBytes'; -export type { IdentifierWrapper } from './models/IdentifierWrapper'; -export type { IndexResponse } from './models/IndexResponse'; -export type { ModuleBundlePayload } from './models/ModuleBundlePayload'; -export type { MoveAbility } from './models/MoveAbility'; -export type { MoveFunction } from './models/MoveFunction'; -export type { MoveFunctionGenericTypeParam } from './models/MoveFunctionGenericTypeParam'; -export { MoveFunctionVisibility } from './models/MoveFunctionVisibility'; -export type { MoveModule } from './models/MoveModule'; -export type { MoveModuleBytecode } from './models/MoveModuleBytecode'; -export type { MoveModuleId } from './models/MoveModuleId'; -export type { MoveResource } from './models/MoveResource'; -export type { MoveScriptBytecode } from './models/MoveScriptBytecode'; -export type { MoveStruct } from './models/MoveStruct'; -export type { MoveStructField } from './models/MoveStructField'; -export type { MoveStructGenericTypeParam } from './models/MoveStructGenericTypeParam'; -export type { MoveStructTag } from './models/MoveStructTag'; -export type { MoveStructValue } from './models/MoveStructValue'; -export type { MoveType } from './models/MoveType'; -export type { MoveValue } from './models/MoveValue'; -export type { MultiAgentSignature } from './models/MultiAgentSignature'; -export type { MultiEd25519Signature } from './models/MultiEd25519Signature'; -export type { MultisigPayload } from './models/MultisigPayload'; -export type { MultisigTransactionPayload } from './models/MultisigTransactionPayload'; -export type { PendingTransaction } from './models/PendingTransaction'; -export type { RawTableItemRequest } from './models/RawTableItemRequest'; -export { RoleType } from './models/RoleType'; -export type { ScriptPayload } from './models/ScriptPayload'; -export type { ScriptWriteSet } from './models/ScriptWriteSet'; -export type { StateCheckpointTransaction } from './models/StateCheckpointTransaction'; -export type { StateKeyWrapper } from './models/StateKeyWrapper'; -export type { SubmitTransactionRequest } from './models/SubmitTransactionRequest'; -export type { TableItemRequest } from './models/TableItemRequest'; -export type { Transaction } from './models/Transaction'; -export type { Transaction_BlockMetadataTransaction } from './models/Transaction_BlockMetadataTransaction'; -export type { Transaction_GenesisTransaction } from './models/Transaction_GenesisTransaction'; -export type { Transaction_PendingTransaction } from './models/Transaction_PendingTransaction'; -export type { Transaction_StateCheckpointTransaction } from './models/Transaction_StateCheckpointTransaction'; -export type { Transaction_UserTransaction } from './models/Transaction_UserTransaction'; -export type { TransactionPayload } from './models/TransactionPayload'; -export type { TransactionPayload_EntryFunctionPayload } from './models/TransactionPayload_EntryFunctionPayload'; -export type { TransactionPayload_ModuleBundlePayload } from './models/TransactionPayload_ModuleBundlePayload'; -export type { TransactionPayload_MultisigPayload } from './models/TransactionPayload_MultisigPayload'; -export type { TransactionPayload_ScriptPayload } from './models/TransactionPayload_ScriptPayload'; -export type { TransactionsBatchSingleSubmissionFailure } from './models/TransactionsBatchSingleSubmissionFailure'; -export type { TransactionsBatchSubmissionResult } from './models/TransactionsBatchSubmissionResult'; -export type { TransactionSignature } from './models/TransactionSignature'; -export type { TransactionSignature_Ed25519Signature } from './models/TransactionSignature_Ed25519Signature'; -export type { TransactionSignature_FeePayerSignature } from './models/TransactionSignature_FeePayerSignature'; -export type { TransactionSignature_MultiAgentSignature } from './models/TransactionSignature_MultiAgentSignature'; -export type { TransactionSignature_MultiEd25519Signature } from './models/TransactionSignature_MultiEd25519Signature'; -export type { U128 } from './models/U128'; -export type { U256 } from './models/U256'; -export type { U64 } from './models/U64'; -export type { UserTransaction } from './models/UserTransaction'; -export type { VersionedEvent } from './models/VersionedEvent'; -export type { ViewRequest } from './models/ViewRequest'; -export type { WriteModule } from './models/WriteModule'; -export type { WriteResource } from './models/WriteResource'; -export type { WriteSet } from './models/WriteSet'; -export type { WriteSet_DirectWriteSet } from './models/WriteSet_DirectWriteSet'; -export type { WriteSet_ScriptWriteSet } from './models/WriteSet_ScriptWriteSet'; -export type { WriteSetChange } from './models/WriteSetChange'; -export type { WriteSetChange_DeleteModule } from './models/WriteSetChange_DeleteModule'; -export type { WriteSetChange_DeleteResource } from './models/WriteSetChange_DeleteResource'; -export type { WriteSetChange_DeleteTableItem } from './models/WriteSetChange_DeleteTableItem'; -export type { WriteSetChange_WriteModule } from './models/WriteSetChange_WriteModule'; -export type { WriteSetChange_WriteResource } from './models/WriteSetChange_WriteResource'; -export type { WriteSetChange_WriteTableItem } from './models/WriteSetChange_WriteTableItem'; -export type { WriteSetPayload } from './models/WriteSetPayload'; -export type { WriteTableItem } from './models/WriteTableItem'; diff --git a/ecosystem/typescript/sdk_v2/src/types/generated/models/AccountData.ts b/ecosystem/typescript/sdk_v2/src/types/generated/models/AccountData.ts deleted file mode 100644 index 8df40aa857e6d..0000000000000 --- a/ecosystem/typescript/sdk_v2/src/types/generated/models/AccountData.ts +++ /dev/null @@ -1,17 +0,0 @@ -/* istanbul ignore file */ -/* tslint:disable */ -/* eslint-disable */ - -import type { HexEncodedBytes } from './HexEncodedBytes'; -import type { U64 } from './U64'; - -/** - * Account data - * - * A simplified version of the onchain Account resource - */ -export type AccountData = { - sequence_number: U64; - authentication_key: HexEncodedBytes; -}; - diff --git a/ecosystem/typescript/sdk_v2/src/types/generated/models/AccountSignature.ts b/ecosystem/typescript/sdk_v2/src/types/generated/models/AccountSignature.ts deleted file mode 100644 index 576d1507de68d..0000000000000 --- a/ecosystem/typescript/sdk_v2/src/types/generated/models/AccountSignature.ts +++ /dev/null @@ -1,17 +0,0 @@ -/* istanbul ignore file */ -/* tslint:disable */ -/* eslint-disable */ - -import type { AccountSignature_Ed25519Signature } from './AccountSignature_Ed25519Signature'; -import type { AccountSignature_MultiEd25519Signature } from './AccountSignature_MultiEd25519Signature'; - -/** - * Account signature scheme - * - * The account signature scheme allows you to have two types of accounts: - * - * 1. A single Ed25519 key account, one private key - * 2. A k-of-n multi-Ed25519 key account, multiple private keys, such that k-of-n must sign a transaction. - */ -export type AccountSignature = (AccountSignature_Ed25519Signature | AccountSignature_MultiEd25519Signature); - diff --git a/ecosystem/typescript/sdk_v2/src/types/generated/models/AccountSignature_Ed25519Signature.ts b/ecosystem/typescript/sdk_v2/src/types/generated/models/AccountSignature_Ed25519Signature.ts deleted file mode 100644 index 3d91da519364e..0000000000000 --- a/ecosystem/typescript/sdk_v2/src/types/generated/models/AccountSignature_Ed25519Signature.ts +++ /dev/null @@ -1,10 +0,0 @@ -/* istanbul ignore file */ -/* tslint:disable */ -/* eslint-disable */ - -import type { Ed25519Signature } from './Ed25519Signature'; - -export type AccountSignature_Ed25519Signature = ({ - type: string; -} & Ed25519Signature); - diff --git a/ecosystem/typescript/sdk_v2/src/types/generated/models/AccountSignature_MultiEd25519Signature.ts b/ecosystem/typescript/sdk_v2/src/types/generated/models/AccountSignature_MultiEd25519Signature.ts deleted file mode 100644 index 11acab2ebd410..0000000000000 --- a/ecosystem/typescript/sdk_v2/src/types/generated/models/AccountSignature_MultiEd25519Signature.ts +++ /dev/null @@ -1,10 +0,0 @@ -/* istanbul ignore file */ -/* tslint:disable */ -/* eslint-disable */ - -import type { MultiEd25519Signature } from './MultiEd25519Signature'; - -export type AccountSignature_MultiEd25519Signature = ({ - type: string; -} & MultiEd25519Signature); - diff --git a/ecosystem/typescript/sdk_v2/src/types/generated/models/Address.ts b/ecosystem/typescript/sdk_v2/src/types/generated/models/Address.ts deleted file mode 100644 index 03c32f6b45d1e..0000000000000 --- a/ecosystem/typescript/sdk_v2/src/types/generated/models/Address.ts +++ /dev/null @@ -1,14 +0,0 @@ -/* istanbul ignore file */ -/* tslint:disable */ -/* eslint-disable */ - -/** - * A hex encoded 32 byte Aptos account address. - * - * This is represented in a string as a 64 character hex string, sometimes - * shortened by stripping leading 0s, and adding a 0x. - * - * For example, address 0x0000000000000000000000000000000000000000000000000000000000000001 is represented as 0x1. - * - */ -export type Address = string; diff --git a/ecosystem/typescript/sdk_v2/src/types/generated/models/AptosError.ts b/ecosystem/typescript/sdk_v2/src/types/generated/models/AptosError.ts deleted file mode 100644 index ed24d55242bef..0000000000000 --- a/ecosystem/typescript/sdk_v2/src/types/generated/models/AptosError.ts +++ /dev/null @@ -1,22 +0,0 @@ -/* istanbul ignore file */ -/* tslint:disable */ -/* eslint-disable */ - -import type { AptosErrorCode } from './AptosErrorCode'; - -/** - * This is the generic struct we use for all API errors, it contains a string - * message and an Aptos API specific error code. - */ -export type AptosError = { - /** - * A message describing the error - */ - message: string; - error_code: AptosErrorCode; - /** - * A code providing VM error details when submitting transactions to the VM - */ - vm_error_code?: number; -}; - diff --git a/ecosystem/typescript/sdk_v2/src/types/generated/models/AptosErrorCode.ts b/ecosystem/typescript/sdk_v2/src/types/generated/models/AptosErrorCode.ts deleted file mode 100644 index a20bac5d55260..0000000000000 --- a/ecosystem/typescript/sdk_v2/src/types/generated/models/AptosErrorCode.ts +++ /dev/null @@ -1,31 +0,0 @@ -/* istanbul ignore file */ -/* tslint:disable */ -/* eslint-disable */ - -/** - * These codes provide more granular error information beyond just the HTTP - * status code of the response. - */ -export enum AptosErrorCode { - ACCOUNT_NOT_FOUND = 'account_not_found', - RESOURCE_NOT_FOUND = 'resource_not_found', - MODULE_NOT_FOUND = 'module_not_found', - STRUCT_FIELD_NOT_FOUND = 'struct_field_not_found', - VERSION_NOT_FOUND = 'version_not_found', - TRANSACTION_NOT_FOUND = 'transaction_not_found', - TABLE_ITEM_NOT_FOUND = 'table_item_not_found', - BLOCK_NOT_FOUND = 'block_not_found', - STATE_VALUE_NOT_FOUND = 'state_value_not_found', - VERSION_PRUNED = 'version_pruned', - BLOCK_PRUNED = 'block_pruned', - INVALID_INPUT = 'invalid_input', - INVALID_TRANSACTION_UPDATE = 'invalid_transaction_update', - SEQUENCE_NUMBER_TOO_OLD = 'sequence_number_too_old', - VM_ERROR = 'vm_error', - HEALTH_CHECK_FAILED = 'health_check_failed', - MEMPOOL_IS_FULL = 'mempool_is_full', - INTERNAL_ERROR = 'internal_error', - WEB_FRAMEWORK_ERROR = 'web_framework_error', - BCS_NOT_SUPPORTED = 'bcs_not_supported', - API_DISABLED = 'api_disabled', -} diff --git a/ecosystem/typescript/sdk_v2/src/types/generated/models/Block.ts b/ecosystem/typescript/sdk_v2/src/types/generated/models/Block.ts deleted file mode 100644 index e6c2a57e31af2..0000000000000 --- a/ecosystem/typescript/sdk_v2/src/types/generated/models/Block.ts +++ /dev/null @@ -1,26 +0,0 @@ -/* istanbul ignore file */ -/* tslint:disable */ -/* eslint-disable */ - -import type { HashValue } from './HashValue'; -import type { Transaction } from './Transaction'; -import type { U64 } from './U64'; - -/** - * A Block with or without transactions - * - * This contains the information about a transactions along with - * associated transactions if requested - */ -export type Block = { - block_height: U64; - block_hash: HashValue; - block_timestamp: U64; - first_version: U64; - last_version: U64; - /** - * The transactions in the block in sequential order - */ - transactions?: Array; -}; - diff --git a/ecosystem/typescript/sdk_v2/src/types/generated/models/BlockMetadataTransaction.ts b/ecosystem/typescript/sdk_v2/src/types/generated/models/BlockMetadataTransaction.ts deleted file mode 100644 index 9ec5828deedd9..0000000000000 --- a/ecosystem/typescript/sdk_v2/src/types/generated/models/BlockMetadataTransaction.ts +++ /dev/null @@ -1,55 +0,0 @@ -/* istanbul ignore file */ -/* tslint:disable */ -/* eslint-disable */ - -import type { Address } from './Address'; -import type { Event } from './Event'; -import type { HashValue } from './HashValue'; -import type { U64 } from './U64'; -import type { WriteSetChange } from './WriteSetChange'; - -/** - * A block metadata transaction - * - * This signifies the beginning of a block, and contains information - * about the specific block - */ -export type BlockMetadataTransaction = { - version: U64; - hash: HashValue; - state_change_hash: HashValue; - event_root_hash: HashValue; - state_checkpoint_hash?: HashValue; - gas_used: U64; - /** - * Whether the transaction was successful - */ - success: boolean; - /** - * The VM status of the transaction, can tell useful information in a failure - */ - vm_status: string; - accumulator_root_hash: HashValue; - /** - * Final state of resources changed by the transaction - */ - changes: Array; - id: HashValue; - epoch: U64; - round: U64; - /** - * The events emitted at the block creation - */ - events: Array; - /** - * Previous block votes - */ - previous_block_votes_bitvec: Array; - proposer: Address; - /** - * The indices of the proposers who failed to propose - */ - failed_proposer_indices: Array; - timestamp: U64; -}; - diff --git a/ecosystem/typescript/sdk_v2/src/types/generated/models/DecodedTableData.ts b/ecosystem/typescript/sdk_v2/src/types/generated/models/DecodedTableData.ts deleted file mode 100644 index 83571f4fe5287..0000000000000 --- a/ecosystem/typescript/sdk_v2/src/types/generated/models/DecodedTableData.ts +++ /dev/null @@ -1,26 +0,0 @@ -/* istanbul ignore file */ -/* tslint:disable */ -/* eslint-disable */ - -/** - * Decoded table data - */ -export type DecodedTableData = { - /** - * Key of table in JSON - */ - key: any; - /** - * Type of key - */ - key_type: string; - /** - * Value of table in JSON - */ - value: any; - /** - * Type of value - */ - value_type: string; -}; - diff --git a/ecosystem/typescript/sdk_v2/src/types/generated/models/DeleteModule.ts b/ecosystem/typescript/sdk_v2/src/types/generated/models/DeleteModule.ts deleted file mode 100644 index 44d49ce0ca66a..0000000000000 --- a/ecosystem/typescript/sdk_v2/src/types/generated/models/DeleteModule.ts +++ /dev/null @@ -1,19 +0,0 @@ -/* istanbul ignore file */ -/* tslint:disable */ -/* eslint-disable */ - -import type { Address } from './Address'; -import type { MoveModuleId } from './MoveModuleId'; - -/** - * Delete a module - */ -export type DeleteModule = { - address: Address; - /** - * State key hash - */ - state_key_hash: string; - module: MoveModuleId; -}; - diff --git a/ecosystem/typescript/sdk_v2/src/types/generated/models/DeleteResource.ts b/ecosystem/typescript/sdk_v2/src/types/generated/models/DeleteResource.ts deleted file mode 100644 index ff863a9491086..0000000000000 --- a/ecosystem/typescript/sdk_v2/src/types/generated/models/DeleteResource.ts +++ /dev/null @@ -1,19 +0,0 @@ -/* istanbul ignore file */ -/* tslint:disable */ -/* eslint-disable */ - -import type { Address } from './Address'; -import type { MoveStructTag } from './MoveStructTag'; - -/** - * Delete a resource - */ -export type DeleteResource = { - address: Address; - /** - * State key hash - */ - state_key_hash: string; - resource: MoveStructTag; -}; - diff --git a/ecosystem/typescript/sdk_v2/src/types/generated/models/DeleteTableItem.ts b/ecosystem/typescript/sdk_v2/src/types/generated/models/DeleteTableItem.ts deleted file mode 100644 index c2655d4f5be06..0000000000000 --- a/ecosystem/typescript/sdk_v2/src/types/generated/models/DeleteTableItem.ts +++ /dev/null @@ -1,17 +0,0 @@ -/* istanbul ignore file */ -/* tslint:disable */ -/* eslint-disable */ - -import type { DeletedTableData } from './DeletedTableData'; -import type { HexEncodedBytes } from './HexEncodedBytes'; - -/** - * Delete a table item - */ -export type DeleteTableItem = { - state_key_hash: string; - handle: HexEncodedBytes; - key: HexEncodedBytes; - data?: DeletedTableData; -}; - diff --git a/ecosystem/typescript/sdk_v2/src/types/generated/models/DeletedTableData.ts b/ecosystem/typescript/sdk_v2/src/types/generated/models/DeletedTableData.ts deleted file mode 100644 index ccf4fd6e9b3cb..0000000000000 --- a/ecosystem/typescript/sdk_v2/src/types/generated/models/DeletedTableData.ts +++ /dev/null @@ -1,18 +0,0 @@ -/* istanbul ignore file */ -/* tslint:disable */ -/* eslint-disable */ - -/** - * Deleted table data - */ -export type DeletedTableData = { - /** - * Deleted key - */ - key: any; - /** - * Deleted key type - */ - key_type: string; -}; - diff --git a/ecosystem/typescript/sdk_v2/src/types/generated/models/DirectWriteSet.ts b/ecosystem/typescript/sdk_v2/src/types/generated/models/DirectWriteSet.ts deleted file mode 100644 index 1c09731f04fe6..0000000000000 --- a/ecosystem/typescript/sdk_v2/src/types/generated/models/DirectWriteSet.ts +++ /dev/null @@ -1,12 +0,0 @@ -/* istanbul ignore file */ -/* tslint:disable */ -/* eslint-disable */ - -import type { Event } from './Event'; -import type { WriteSetChange } from './WriteSetChange'; - -export type DirectWriteSet = { - changes: Array; - events: Array; -}; - diff --git a/ecosystem/typescript/sdk_v2/src/types/generated/models/Ed25519Signature.ts b/ecosystem/typescript/sdk_v2/src/types/generated/models/Ed25519Signature.ts deleted file mode 100644 index 27686cec889c4..0000000000000 --- a/ecosystem/typescript/sdk_v2/src/types/generated/models/Ed25519Signature.ts +++ /dev/null @@ -1,14 +0,0 @@ -/* istanbul ignore file */ -/* tslint:disable */ -/* eslint-disable */ - -import type { HexEncodedBytes } from './HexEncodedBytes'; - -/** - * A single Ed25519 signature - */ -export type Ed25519Signature = { - public_key: HexEncodedBytes; - signature: HexEncodedBytes; -}; - diff --git a/ecosystem/typescript/sdk_v2/src/types/generated/models/EncodeSubmissionRequest.ts b/ecosystem/typescript/sdk_v2/src/types/generated/models/EncodeSubmissionRequest.ts deleted file mode 100644 index 52e6f903e6f00..0000000000000 --- a/ecosystem/typescript/sdk_v2/src/types/generated/models/EncodeSubmissionRequest.ts +++ /dev/null @@ -1,24 +0,0 @@ -/* istanbul ignore file */ -/* tslint:disable */ -/* eslint-disable */ - -import type { Address } from './Address'; -import type { TransactionPayload } from './TransactionPayload'; -import type { U64 } from './U64'; - -/** - * Request to encode a submission - */ -export type EncodeSubmissionRequest = { - sender: Address; - sequence_number: U64; - max_gas_amount: U64; - gas_unit_price: U64; - expiration_timestamp_secs: U64; - payload: TransactionPayload; - /** - * Secondary signer accounts of the request for Multi-agent - */ - secondary_signers?: Array

; -}; - diff --git a/ecosystem/typescript/sdk_v2/src/types/generated/models/EntryFunctionId.ts b/ecosystem/typescript/sdk_v2/src/types/generated/models/EntryFunctionId.ts deleted file mode 100644 index f38310d0c2a1c..0000000000000 --- a/ecosystem/typescript/sdk_v2/src/types/generated/models/EntryFunctionId.ts +++ /dev/null @@ -1,13 +0,0 @@ -/* istanbul ignore file */ -/* tslint:disable */ -/* eslint-disable */ - -/** - * Entry function id is string representation of a entry function defined on-chain. - * - * Format: `{address}::{module name}::{function name}` - * - * Both `module name` and `function name` are case-sensitive. - * - */ -export type EntryFunctionId = string; diff --git a/ecosystem/typescript/sdk_v2/src/types/generated/models/EntryFunctionPayload.ts b/ecosystem/typescript/sdk_v2/src/types/generated/models/EntryFunctionPayload.ts deleted file mode 100644 index 78a3ee67d866d..0000000000000 --- a/ecosystem/typescript/sdk_v2/src/types/generated/models/EntryFunctionPayload.ts +++ /dev/null @@ -1,22 +0,0 @@ -/* istanbul ignore file */ -/* tslint:disable */ -/* eslint-disable */ - -import type { EntryFunctionId } from './EntryFunctionId'; -import type { MoveType } from './MoveType'; - -/** - * Payload which runs a single entry function - */ -export type EntryFunctionPayload = { - function: EntryFunctionId; - /** - * Type arguments of the function - */ - type_arguments: Array; - /** - * Arguments of the function - */ - arguments: Array; -}; - diff --git a/ecosystem/typescript/sdk_v2/src/types/generated/models/Event.ts b/ecosystem/typescript/sdk_v2/src/types/generated/models/Event.ts deleted file mode 100644 index a160a00cc0dde..0000000000000 --- a/ecosystem/typescript/sdk_v2/src/types/generated/models/Event.ts +++ /dev/null @@ -1,21 +0,0 @@ -/* istanbul ignore file */ -/* tslint:disable */ -/* eslint-disable */ - -import type { EventGuid } from './EventGuid'; -import type { MoveType } from './MoveType'; -import type { U64 } from './U64'; - -/** - * An event from a transaction - */ -export type Event = { - guid: EventGuid; - sequence_number: U64; - type: MoveType; - /** - * The JSON representation of the event - */ - data: any; -}; - diff --git a/ecosystem/typescript/sdk_v2/src/types/generated/models/EventGuid.ts b/ecosystem/typescript/sdk_v2/src/types/generated/models/EventGuid.ts deleted file mode 100644 index 928ad847f5797..0000000000000 --- a/ecosystem/typescript/sdk_v2/src/types/generated/models/EventGuid.ts +++ /dev/null @@ -1,12 +0,0 @@ -/* istanbul ignore file */ -/* tslint:disable */ -/* eslint-disable */ - -import type { Address } from './Address'; -import type { U64 } from './U64'; - -export type EventGuid = { - creation_number: U64; - account_address: Address; -}; - diff --git a/ecosystem/typescript/sdk_v2/src/types/generated/models/FeePayerSignature.ts b/ecosystem/typescript/sdk_v2/src/types/generated/models/FeePayerSignature.ts deleted file mode 100644 index 4f01fa13ce8e8..0000000000000 --- a/ecosystem/typescript/sdk_v2/src/types/generated/models/FeePayerSignature.ts +++ /dev/null @@ -1,26 +0,0 @@ -/* istanbul ignore file */ -/* tslint:disable */ -/* eslint-disable */ - -import type { AccountSignature } from './AccountSignature'; -import type { Address } from './Address'; - -/** - * Fee payer signature for fee payer transactions - * - * This allows you to have transactions across multiple accounts and with a fee payer - */ -export type FeePayerSignature = { - sender: AccountSignature; - /** - * The other involved parties' addresses - */ - secondary_signer_addresses: Array
; - /** - * The associated signatures, in the same order as the secondary addresses - */ - secondary_signers: Array; - fee_payer_address: Address; - fee_payer_signer: AccountSignature; -}; - diff --git a/ecosystem/typescript/sdk_v2/src/types/generated/models/GasEstimation.ts b/ecosystem/typescript/sdk_v2/src/types/generated/models/GasEstimation.ts deleted file mode 100644 index f8ebd699311c9..0000000000000 --- a/ecosystem/typescript/sdk_v2/src/types/generated/models/GasEstimation.ts +++ /dev/null @@ -1,22 +0,0 @@ -/* istanbul ignore file */ -/* tslint:disable */ -/* eslint-disable */ - -/** - * Struct holding the outputs of the estimate gas API - */ -export type GasEstimation = { - /** - * The deprioritized estimate for the gas unit price - */ - deprioritized_gas_estimate?: number; - /** - * The current estimate for the gas unit price - */ - gas_estimate: number; - /** - * The prioritized estimate for the gas unit price - */ - prioritized_gas_estimate?: number; -}; - diff --git a/ecosystem/typescript/sdk_v2/src/types/generated/models/GenesisPayload.ts b/ecosystem/typescript/sdk_v2/src/types/generated/models/GenesisPayload.ts deleted file mode 100644 index 66d653a68f279..0000000000000 --- a/ecosystem/typescript/sdk_v2/src/types/generated/models/GenesisPayload.ts +++ /dev/null @@ -1,11 +0,0 @@ -/* istanbul ignore file */ -/* tslint:disable */ -/* eslint-disable */ - -import type { GenesisPayload_WriteSetPayload } from './GenesisPayload_WriteSetPayload'; - -/** - * The writeset payload of the Genesis transaction - */ -export type GenesisPayload = GenesisPayload_WriteSetPayload; - diff --git a/ecosystem/typescript/sdk_v2/src/types/generated/models/GenesisPayload_WriteSetPayload.ts b/ecosystem/typescript/sdk_v2/src/types/generated/models/GenesisPayload_WriteSetPayload.ts deleted file mode 100644 index a61275063a40a..0000000000000 --- a/ecosystem/typescript/sdk_v2/src/types/generated/models/GenesisPayload_WriteSetPayload.ts +++ /dev/null @@ -1,10 +0,0 @@ -/* istanbul ignore file */ -/* tslint:disable */ -/* eslint-disable */ - -import type { WriteSetPayload } from './WriteSetPayload'; - -export type GenesisPayload_WriteSetPayload = ({ - type: string; -} & WriteSetPayload); - diff --git a/ecosystem/typescript/sdk_v2/src/types/generated/models/GenesisTransaction.ts b/ecosystem/typescript/sdk_v2/src/types/generated/models/GenesisTransaction.ts deleted file mode 100644 index 563a8be8fce78..0000000000000 --- a/ecosystem/typescript/sdk_v2/src/types/generated/models/GenesisTransaction.ts +++ /dev/null @@ -1,42 +0,0 @@ -/* istanbul ignore file */ -/* tslint:disable */ -/* eslint-disable */ - -import type { Event } from './Event'; -import type { GenesisPayload } from './GenesisPayload'; -import type { HashValue } from './HashValue'; -import type { U64 } from './U64'; -import type { WriteSetChange } from './WriteSetChange'; - -/** - * The genesis transaction - * - * This only occurs at the genesis transaction (version 0) - */ -export type GenesisTransaction = { - version: U64; - hash: HashValue; - state_change_hash: HashValue; - event_root_hash: HashValue; - state_checkpoint_hash?: HashValue; - gas_used: U64; - /** - * Whether the transaction was successful - */ - success: boolean; - /** - * The VM status of the transaction, can tell useful information in a failure - */ - vm_status: string; - accumulator_root_hash: HashValue; - /** - * Final state of resources changed by the transaction - */ - changes: Array; - payload: GenesisPayload; - /** - * Events emitted during genesis - */ - events: Array; -}; - diff --git a/ecosystem/typescript/sdk_v2/src/types/generated/models/HashValue.ts b/ecosystem/typescript/sdk_v2/src/types/generated/models/HashValue.ts deleted file mode 100644 index a296dd84c2a4b..0000000000000 --- a/ecosystem/typescript/sdk_v2/src/types/generated/models/HashValue.ts +++ /dev/null @@ -1,5 +0,0 @@ -/* istanbul ignore file */ -/* tslint:disable */ -/* eslint-disable */ - -export type HashValue = string; diff --git a/ecosystem/typescript/sdk_v2/src/types/generated/models/HealthCheckSuccess.ts b/ecosystem/typescript/sdk_v2/src/types/generated/models/HealthCheckSuccess.ts deleted file mode 100644 index ff98f0ed77d30..0000000000000 --- a/ecosystem/typescript/sdk_v2/src/types/generated/models/HealthCheckSuccess.ts +++ /dev/null @@ -1,11 +0,0 @@ -/* istanbul ignore file */ -/* tslint:disable */ -/* eslint-disable */ - -/** - * Representation of a successful healthcheck - */ -export type HealthCheckSuccess = { - message: string; -}; - diff --git a/ecosystem/typescript/sdk_v2/src/types/generated/models/HexEncodedBytes.ts b/ecosystem/typescript/sdk_v2/src/types/generated/models/HexEncodedBytes.ts deleted file mode 100644 index d4a21baf0ce47..0000000000000 --- a/ecosystem/typescript/sdk_v2/src/types/generated/models/HexEncodedBytes.ts +++ /dev/null @@ -1,12 +0,0 @@ -/* istanbul ignore file */ -/* tslint:disable */ -/* eslint-disable */ - -/** - * All bytes (Vec) data is represented as hex-encoded string prefixed with `0x` and fulfilled with - * two hex digits per byte. - * - * Unlike the `Address` type, HexEncodedBytes will not trim any zeros. - * - */ -export type HexEncodedBytes = string; diff --git a/ecosystem/typescript/sdk_v2/src/types/generated/models/IdentifierWrapper.ts b/ecosystem/typescript/sdk_v2/src/types/generated/models/IdentifierWrapper.ts deleted file mode 100644 index 8bf5d9a71895a..0000000000000 --- a/ecosystem/typescript/sdk_v2/src/types/generated/models/IdentifierWrapper.ts +++ /dev/null @@ -1,5 +0,0 @@ -/* istanbul ignore file */ -/* tslint:disable */ -/* eslint-disable */ - -export type IdentifierWrapper = string; diff --git a/ecosystem/typescript/sdk_v2/src/types/generated/models/IndexResponse.ts b/ecosystem/typescript/sdk_v2/src/types/generated/models/IndexResponse.ts deleted file mode 100644 index 1ca2519b1a62d..0000000000000 --- a/ecosystem/typescript/sdk_v2/src/types/generated/models/IndexResponse.ts +++ /dev/null @@ -1,30 +0,0 @@ -/* istanbul ignore file */ -/* tslint:disable */ -/* eslint-disable */ - -import type { RoleType } from './RoleType'; -import type { U64 } from './U64'; - -/** - * The struct holding all data returned to the client by the - * index endpoint (i.e., GET "/"). Only for responding in JSON - */ -export type IndexResponse = { - /** - * Chain ID of the current chain - */ - chain_id: number; - epoch: U64; - ledger_version: U64; - oldest_ledger_version: U64; - ledger_timestamp: U64; - node_role: RoleType; - oldest_block_height: U64; - block_height: U64; - /** - * Git hash of the build of the API endpoint. Can be used to determine the exact - * software version used by the API endpoint. - */ - git_hash?: string; -}; - diff --git a/ecosystem/typescript/sdk_v2/src/types/generated/models/ModuleBundlePayload.ts b/ecosystem/typescript/sdk_v2/src/types/generated/models/ModuleBundlePayload.ts deleted file mode 100644 index be34a1f42ecbd..0000000000000 --- a/ecosystem/typescript/sdk_v2/src/types/generated/models/ModuleBundlePayload.ts +++ /dev/null @@ -1,10 +0,0 @@ -/* istanbul ignore file */ -/* tslint:disable */ -/* eslint-disable */ - -import type { MoveModuleBytecode } from './MoveModuleBytecode'; - -export type ModuleBundlePayload = { - modules: Array; -}; - diff --git a/ecosystem/typescript/sdk_v2/src/types/generated/models/MoveAbility.ts b/ecosystem/typescript/sdk_v2/src/types/generated/models/MoveAbility.ts deleted file mode 100644 index 311c5db9003a8..0000000000000 --- a/ecosystem/typescript/sdk_v2/src/types/generated/models/MoveAbility.ts +++ /dev/null @@ -1,5 +0,0 @@ -/* istanbul ignore file */ -/* tslint:disable */ -/* eslint-disable */ - -export type MoveAbility = string; diff --git a/ecosystem/typescript/sdk_v2/src/types/generated/models/MoveFunction.ts b/ecosystem/typescript/sdk_v2/src/types/generated/models/MoveFunction.ts deleted file mode 100644 index 4ce3ad729c26c..0000000000000 --- a/ecosystem/typescript/sdk_v2/src/types/generated/models/MoveFunction.ts +++ /dev/null @@ -1,37 +0,0 @@ -/* istanbul ignore file */ -/* tslint:disable */ -/* eslint-disable */ - -import type { IdentifierWrapper } from './IdentifierWrapper'; -import type { MoveFunctionGenericTypeParam } from './MoveFunctionGenericTypeParam'; -import type { MoveFunctionVisibility } from './MoveFunctionVisibility'; -import type { MoveType } from './MoveType'; - -/** - * Move function - */ -export type MoveFunction = { - name: IdentifierWrapper; - visibility: MoveFunctionVisibility; - /** - * Whether the function can be called as an entry function directly in a transaction - */ - is_entry: boolean; - /** - * Whether the function is a view function or not - */ - is_view: boolean; - /** - * Generic type params associated with the Move function - */ - generic_type_params: Array; - /** - * Parameters associated with the move function - */ - params: Array; - /** - * Return type of the function - */ - return: Array; -}; - diff --git a/ecosystem/typescript/sdk_v2/src/types/generated/models/MoveFunctionGenericTypeParam.ts b/ecosystem/typescript/sdk_v2/src/types/generated/models/MoveFunctionGenericTypeParam.ts deleted file mode 100644 index c0776fe5cab0c..0000000000000 --- a/ecosystem/typescript/sdk_v2/src/types/generated/models/MoveFunctionGenericTypeParam.ts +++ /dev/null @@ -1,16 +0,0 @@ -/* istanbul ignore file */ -/* tslint:disable */ -/* eslint-disable */ - -import type { MoveAbility } from './MoveAbility'; - -/** - * Move function generic type param - */ -export type MoveFunctionGenericTypeParam = { - /** - * Move abilities tied to the generic type param and associated with the function that uses it - */ - constraints: Array; -}; - diff --git a/ecosystem/typescript/sdk_v2/src/types/generated/models/MoveFunctionVisibility.ts b/ecosystem/typescript/sdk_v2/src/types/generated/models/MoveFunctionVisibility.ts deleted file mode 100644 index a1684d5c84713..0000000000000 --- a/ecosystem/typescript/sdk_v2/src/types/generated/models/MoveFunctionVisibility.ts +++ /dev/null @@ -1,12 +0,0 @@ -/* istanbul ignore file */ -/* tslint:disable */ -/* eslint-disable */ - -/** - * Move function visibility - */ -export enum MoveFunctionVisibility { - PRIVATE = 'private', - PUBLIC = 'public', - FRIEND = 'friend', -} diff --git a/ecosystem/typescript/sdk_v2/src/types/generated/models/MoveModule.ts b/ecosystem/typescript/sdk_v2/src/types/generated/models/MoveModule.ts deleted file mode 100644 index f2d127ff28614..0000000000000 --- a/ecosystem/typescript/sdk_v2/src/types/generated/models/MoveModule.ts +++ /dev/null @@ -1,30 +0,0 @@ -/* istanbul ignore file */ -/* tslint:disable */ -/* eslint-disable */ - -import type { Address } from './Address'; -import type { IdentifierWrapper } from './IdentifierWrapper'; -import type { MoveFunction } from './MoveFunction'; -import type { MoveModuleId } from './MoveModuleId'; -import type { MoveStruct } from './MoveStruct'; - -/** - * A Move module - */ -export type MoveModule = { - address: Address; - name: IdentifierWrapper; - /** - * Friends of the module - */ - friends: Array; - /** - * Public functions of the module - */ - exposed_functions: Array; - /** - * Structs of the module - */ - structs: Array; -}; - diff --git a/ecosystem/typescript/sdk_v2/src/types/generated/models/MoveModuleBytecode.ts b/ecosystem/typescript/sdk_v2/src/types/generated/models/MoveModuleBytecode.ts deleted file mode 100644 index 9348d105310a5..0000000000000 --- a/ecosystem/typescript/sdk_v2/src/types/generated/models/MoveModuleBytecode.ts +++ /dev/null @@ -1,15 +0,0 @@ -/* istanbul ignore file */ -/* tslint:disable */ -/* eslint-disable */ - -import type { HexEncodedBytes } from './HexEncodedBytes'; -import type { MoveModule } from './MoveModule'; - -/** - * Move module bytecode along with it's ABI - */ -export type MoveModuleBytecode = { - bytecode: HexEncodedBytes; - abi?: MoveModule; -}; - diff --git a/ecosystem/typescript/sdk_v2/src/types/generated/models/MoveModuleId.ts b/ecosystem/typescript/sdk_v2/src/types/generated/models/MoveModuleId.ts deleted file mode 100644 index 5ad74349b690b..0000000000000 --- a/ecosystem/typescript/sdk_v2/src/types/generated/models/MoveModuleId.ts +++ /dev/null @@ -1,15 +0,0 @@ -/* istanbul ignore file */ -/* tslint:disable */ -/* eslint-disable */ - -/** - * Move module id is a string representation of Move module. - * - * Format: `{address}::{module name}` - * - * `address` should be hex-encoded 32 byte account address that is prefixed with `0x`. - * - * Module name is case-sensitive. - * - */ -export type MoveModuleId = string; diff --git a/ecosystem/typescript/sdk_v2/src/types/generated/models/MoveResource.ts b/ecosystem/typescript/sdk_v2/src/types/generated/models/MoveResource.ts deleted file mode 100644 index 544bfa47be053..0000000000000 --- a/ecosystem/typescript/sdk_v2/src/types/generated/models/MoveResource.ts +++ /dev/null @@ -1,15 +0,0 @@ -/* istanbul ignore file */ -/* tslint:disable */ -/* eslint-disable */ - -import type { MoveStructTag } from './MoveStructTag'; -import type { MoveStructValue } from './MoveStructValue'; - -/** - * A parsed Move resource - */ -export type MoveResource = { - type: MoveStructTag; - data: MoveStructValue; -}; - diff --git a/ecosystem/typescript/sdk_v2/src/types/generated/models/MoveScriptBytecode.ts b/ecosystem/typescript/sdk_v2/src/types/generated/models/MoveScriptBytecode.ts deleted file mode 100644 index 109a6cf6bfcf0..0000000000000 --- a/ecosystem/typescript/sdk_v2/src/types/generated/models/MoveScriptBytecode.ts +++ /dev/null @@ -1,15 +0,0 @@ -/* istanbul ignore file */ -/* tslint:disable */ -/* eslint-disable */ - -import type { HexEncodedBytes } from './HexEncodedBytes'; -import type { MoveFunction } from './MoveFunction'; - -/** - * Move script bytecode - */ -export type MoveScriptBytecode = { - bytecode: HexEncodedBytes; - abi?: MoveFunction; -}; - diff --git a/ecosystem/typescript/sdk_v2/src/types/generated/models/MoveStruct.ts b/ecosystem/typescript/sdk_v2/src/types/generated/models/MoveStruct.ts deleted file mode 100644 index 74dd1d8766e6f..0000000000000 --- a/ecosystem/typescript/sdk_v2/src/types/generated/models/MoveStruct.ts +++ /dev/null @@ -1,32 +0,0 @@ -/* istanbul ignore file */ -/* tslint:disable */ -/* eslint-disable */ - -import type { IdentifierWrapper } from './IdentifierWrapper'; -import type { MoveAbility } from './MoveAbility'; -import type { MoveStructField } from './MoveStructField'; -import type { MoveStructGenericTypeParam } from './MoveStructGenericTypeParam'; - -/** - * A move struct - */ -export type MoveStruct = { - name: IdentifierWrapper; - /** - * Whether the struct is a native struct of Move - */ - is_native: boolean; - /** - * Abilities associated with the struct - */ - abilities: Array; - /** - * Generic types associated with the struct - */ - generic_type_params: Array; - /** - * Fields associated with the struct - */ - fields: Array; -}; - diff --git a/ecosystem/typescript/sdk_v2/src/types/generated/models/MoveStructField.ts b/ecosystem/typescript/sdk_v2/src/types/generated/models/MoveStructField.ts deleted file mode 100644 index 3f20b0bfbb72b..0000000000000 --- a/ecosystem/typescript/sdk_v2/src/types/generated/models/MoveStructField.ts +++ /dev/null @@ -1,15 +0,0 @@ -/* istanbul ignore file */ -/* tslint:disable */ -/* eslint-disable */ - -import type { IdentifierWrapper } from './IdentifierWrapper'; -import type { MoveType } from './MoveType'; - -/** - * Move struct field - */ -export type MoveStructField = { - name: IdentifierWrapper; - type: MoveType; -}; - diff --git a/ecosystem/typescript/sdk_v2/src/types/generated/models/MoveStructGenericTypeParam.ts b/ecosystem/typescript/sdk_v2/src/types/generated/models/MoveStructGenericTypeParam.ts deleted file mode 100644 index 5ff3317db916d..0000000000000 --- a/ecosystem/typescript/sdk_v2/src/types/generated/models/MoveStructGenericTypeParam.ts +++ /dev/null @@ -1,16 +0,0 @@ -/* istanbul ignore file */ -/* tslint:disable */ -/* eslint-disable */ - -import type { MoveAbility } from './MoveAbility'; - -/** - * Move generic type param - */ -export type MoveStructGenericTypeParam = { - /** - * Move abilities tied to the generic type param and associated with the type that uses it - */ - constraints: Array; -}; - diff --git a/ecosystem/typescript/sdk_v2/src/types/generated/models/MoveStructTag.ts b/ecosystem/typescript/sdk_v2/src/types/generated/models/MoveStructTag.ts deleted file mode 100644 index 254235ddd4b0a..0000000000000 --- a/ecosystem/typescript/sdk_v2/src/types/generated/models/MoveStructTag.ts +++ /dev/null @@ -1,24 +0,0 @@ -/* istanbul ignore file */ -/* tslint:disable */ -/* eslint-disable */ - -/** - * String representation of a MoveStructTag (on-chain Move struct type). This exists so you - * can specify MoveStructTags as path / query parameters, e.g. for get_events_by_event_handle. - * - * It is a combination of: - * 1. `move_module_address`, `module_name` and `struct_name`, all joined by `::` - * 2. `struct generic type parameters` joined by `, ` - * - * Examples: - * * `0x1::coin::CoinStore<0x1::aptos_coin::AptosCoin>` - * * `0x1::account::Account` - * - * Note: - * 1. Empty chars should be ignored when comparing 2 struct tag ids. - * 2. When used in an URL path, should be encoded by url-encoding (AKA percent-encoding). - * - * See [doc](https://aptos.dev/concepts/accounts) for more details. - * - */ -export type MoveStructTag = string; diff --git a/ecosystem/typescript/sdk_v2/src/types/generated/models/MoveStructValue.ts b/ecosystem/typescript/sdk_v2/src/types/generated/models/MoveStructValue.ts deleted file mode 100644 index 11d4da45739bc..0000000000000 --- a/ecosystem/typescript/sdk_v2/src/types/generated/models/MoveStructValue.ts +++ /dev/null @@ -1,49 +0,0 @@ -/* istanbul ignore file */ -/* tslint:disable */ -/* eslint-disable */ - -/** - * This is a JSON representation of some data within an account resource. More specifically, - * it is a map of strings to arbitrary JSON values / objects, where the keys are top level - * fields within the given resource. - * - * To clarify, you might query for 0x1::account::Account and see the example data. - * - * Move `bool` type value is serialized into `boolean`. - * - * Move `u8`, `u16` and `u32` type value is serialized into `integer`. - * - * Move `u64`, `u128` and `u256` type value is serialized into `string`. - * - * Move `address` type value (32 byte Aptos account address) is serialized into a HexEncodedBytes string. - * For example: - * - `0x1` - * - `0x1668f6be25668c1a17cd8caf6b8d2f25` - * - * Move `vector` type value is serialized into `array`, except `vector` which is serialized into a - * HexEncodedBytes string with `0x` prefix. - * For example: - * - `vector{255, 255}` => `["255", "255"]` - * - `vector{255, 255}` => `0xffff` - * - * Move `struct` type value is serialized into `object` that looks like this (except some Move stdlib types, see the following section): - * ```json - * { - * field1_name: field1_value, - * field2_name: field2_value, - * ...... - * } - * ``` - * - * For example: - * `{ "created": "0xa550c18", "role_id": "0" }` - * - * **Special serialization for Move stdlib types**: - * - [0x1::string::String](https://github.com/aptos-labs/aptos-core/blob/main/language/move-stdlib/docs/ascii.md) - * is serialized into `string`. For example, struct value `0x1::string::String{bytes: b"Hello World!"}` - * is serialized as `"Hello World!"` in JSON. - * - */ - export type MoveStructValue = { - }; - diff --git a/ecosystem/typescript/sdk_v2/src/types/generated/models/MoveType.ts b/ecosystem/typescript/sdk_v2/src/types/generated/models/MoveType.ts deleted file mode 100644 index 226183770b27c..0000000000000 --- a/ecosystem/typescript/sdk_v2/src/types/generated/models/MoveType.ts +++ /dev/null @@ -1,34 +0,0 @@ -/* istanbul ignore file */ -/* tslint:disable */ -/* eslint-disable */ - -/** - * String representation of an on-chain Move type tag that is exposed in transaction payload. - * Values: - * - bool - * - u8 - * - u16 - * - u32 - * - u64 - * - u128 - * - u256 - * - address - * - signer - * - vector: `vector<{non-reference MoveTypeId}>` - * - struct: `{address}::{module_name}::{struct_name}::<{generic types}>` - * - * Vector type value examples: - * - `vector` - * - `vector>` - * - `vector<0x1::coin::CoinStore<0x1::aptos_coin::AptosCoin>>` - * - * Struct type value examples: - * - `0x1::coin::CoinStore<0x1::aptos_coin::AptosCoin> - * - `0x1::account::Account` - * - * Note: - * 1. Empty chars should be ignored when comparing 2 struct tag ids. - * 2. When used in an URL path, should be encoded by url-encoding (AKA percent-encoding). - * - */ -export type MoveType = string; diff --git a/ecosystem/typescript/sdk_v2/src/types/generated/models/MoveValue.ts b/ecosystem/typescript/sdk_v2/src/types/generated/models/MoveValue.ts deleted file mode 100644 index 899490be8b4da..0000000000000 --- a/ecosystem/typescript/sdk_v2/src/types/generated/models/MoveValue.ts +++ /dev/null @@ -1,16 +0,0 @@ -/* istanbul ignore file */ -/* tslint:disable */ -/* eslint-disable */ - -import type { Address } from './Address'; -import type { HexEncodedBytes } from './HexEncodedBytes'; -import type { MoveStructValue } from './MoveStructValue'; -import type { U128 } from './U128'; -import type { U256 } from './U256'; -import type { U64 } from './U64'; - -/** - * An enum of the possible Move value types - */ -export type MoveValue = (number | U64 | U128 | U256 | boolean | Address | Array | HexEncodedBytes | MoveStructValue | string); - diff --git a/ecosystem/typescript/sdk_v2/src/types/generated/models/MultiAgentSignature.ts b/ecosystem/typescript/sdk_v2/src/types/generated/models/MultiAgentSignature.ts deleted file mode 100644 index f07b81734f79e..0000000000000 --- a/ecosystem/typescript/sdk_v2/src/types/generated/models/MultiAgentSignature.ts +++ /dev/null @@ -1,24 +0,0 @@ -/* istanbul ignore file */ -/* tslint:disable */ -/* eslint-disable */ - -import type { AccountSignature } from './AccountSignature'; -import type { Address } from './Address'; - -/** - * Multi agent signature for multi agent transactions - * - * This allows you to have transactions across multiple accounts - */ -export type MultiAgentSignature = { - sender: AccountSignature; - /** - * The other involved parties' addresses - */ - secondary_signer_addresses: Array
; - /** - * The associated signatures, in the same order as the secondary addresses - */ - secondary_signers: Array; -}; - diff --git a/ecosystem/typescript/sdk_v2/src/types/generated/models/MultiEd25519Signature.ts b/ecosystem/typescript/sdk_v2/src/types/generated/models/MultiEd25519Signature.ts deleted file mode 100644 index 29809a29c6c05..0000000000000 --- a/ecosystem/typescript/sdk_v2/src/types/generated/models/MultiEd25519Signature.ts +++ /dev/null @@ -1,27 +0,0 @@ -/* istanbul ignore file */ -/* tslint:disable */ -/* eslint-disable */ - -import type { HexEncodedBytes } from './HexEncodedBytes'; - -/** - * A Ed25519 multi-sig signature - * - * This allows k-of-n signing for a transaction - */ -export type MultiEd25519Signature = { - /** - * The public keys for the Ed25519 signature - */ - public_keys: Array; - /** - * Signature associated with the public keys in the same order - */ - signatures: Array; - /** - * The number of signatures required for a successful transaction - */ - threshold: number; - bitmap: HexEncodedBytes; -}; - diff --git a/ecosystem/typescript/sdk_v2/src/types/generated/models/MultisigPayload.ts b/ecosystem/typescript/sdk_v2/src/types/generated/models/MultisigPayload.ts deleted file mode 100644 index 9100a74b3359f..0000000000000 --- a/ecosystem/typescript/sdk_v2/src/types/generated/models/MultisigPayload.ts +++ /dev/null @@ -1,16 +0,0 @@ -/* istanbul ignore file */ -/* tslint:disable */ -/* eslint-disable */ - -import type { Address } from './Address'; -import type { MultisigTransactionPayload } from './MultisigTransactionPayload'; - -/** - * A multisig transaction that allows an owner of a multisig account to execute a pre-approved - * transaction as the multisig account. - */ -export type MultisigPayload = { - multisig_address: Address; - transaction_payload?: MultisigTransactionPayload; -}; - diff --git a/ecosystem/typescript/sdk_v2/src/types/generated/models/MultisigTransactionPayload.ts b/ecosystem/typescript/sdk_v2/src/types/generated/models/MultisigTransactionPayload.ts deleted file mode 100644 index d3191b538308b..0000000000000 --- a/ecosystem/typescript/sdk_v2/src/types/generated/models/MultisigTransactionPayload.ts +++ /dev/null @@ -1,8 +0,0 @@ -/* istanbul ignore file */ -/* tslint:disable */ -/* eslint-disable */ - -import type { EntryFunctionPayload } from './EntryFunctionPayload'; - -export type MultisigTransactionPayload = EntryFunctionPayload; - diff --git a/ecosystem/typescript/sdk_v2/src/types/generated/models/PendingTransaction.ts b/ecosystem/typescript/sdk_v2/src/types/generated/models/PendingTransaction.ts deleted file mode 100644 index 2b4ad87fd6b8d..0000000000000 --- a/ecosystem/typescript/sdk_v2/src/types/generated/models/PendingTransaction.ts +++ /dev/null @@ -1,24 +0,0 @@ -/* istanbul ignore file */ -/* tslint:disable */ -/* eslint-disable */ - -import type { Address } from './Address'; -import type { HashValue } from './HashValue'; -import type { TransactionPayload } from './TransactionPayload'; -import type { TransactionSignature } from './TransactionSignature'; -import type { U64 } from './U64'; - -/** - * A transaction waiting in mempool - */ -export type PendingTransaction = { - hash: HashValue; - sender: Address; - sequence_number: U64; - max_gas_amount: U64; - gas_unit_price: U64; - expiration_timestamp_secs: U64; - payload: TransactionPayload; - signature?: TransactionSignature; -}; - diff --git a/ecosystem/typescript/sdk_v2/src/types/generated/models/RawTableItemRequest.ts b/ecosystem/typescript/sdk_v2/src/types/generated/models/RawTableItemRequest.ts deleted file mode 100644 index e8fc0f8eccd1d..0000000000000 --- a/ecosystem/typescript/sdk_v2/src/types/generated/models/RawTableItemRequest.ts +++ /dev/null @@ -1,13 +0,0 @@ -/* istanbul ignore file */ -/* tslint:disable */ -/* eslint-disable */ - -import type { HexEncodedBytes } from './HexEncodedBytes'; - -/** - * Table Item request for the GetTableItemRaw API - */ -export type RawTableItemRequest = { - key: HexEncodedBytes; -}; - diff --git a/ecosystem/typescript/sdk_v2/src/types/generated/models/RoleType.ts b/ecosystem/typescript/sdk_v2/src/types/generated/models/RoleType.ts deleted file mode 100644 index f0b5c207d63bf..0000000000000 --- a/ecosystem/typescript/sdk_v2/src/types/generated/models/RoleType.ts +++ /dev/null @@ -1,8 +0,0 @@ -/* istanbul ignore file */ -/* tslint:disable */ -/* eslint-disable */ - -export enum RoleType { - VALIDATOR = 'validator', - FULL_NODE = 'full_node', -} diff --git a/ecosystem/typescript/sdk_v2/src/types/generated/models/ScriptPayload.ts b/ecosystem/typescript/sdk_v2/src/types/generated/models/ScriptPayload.ts deleted file mode 100644 index 52cb0a0574352..0000000000000 --- a/ecosystem/typescript/sdk_v2/src/types/generated/models/ScriptPayload.ts +++ /dev/null @@ -1,22 +0,0 @@ -/* istanbul ignore file */ -/* tslint:disable */ -/* eslint-disable */ - -import type { MoveScriptBytecode } from './MoveScriptBytecode'; -import type { MoveType } from './MoveType'; - -/** - * Payload which runs a script that can run multiple functions - */ -export type ScriptPayload = { - code: MoveScriptBytecode; - /** - * Type arguments of the function - */ - type_arguments: Array; - /** - * Arguments of the function - */ - arguments: Array; -}; - diff --git a/ecosystem/typescript/sdk_v2/src/types/generated/models/ScriptWriteSet.ts b/ecosystem/typescript/sdk_v2/src/types/generated/models/ScriptWriteSet.ts deleted file mode 100644 index 469b780799c7a..0000000000000 --- a/ecosystem/typescript/sdk_v2/src/types/generated/models/ScriptWriteSet.ts +++ /dev/null @@ -1,12 +0,0 @@ -/* istanbul ignore file */ -/* tslint:disable */ -/* eslint-disable */ - -import type { Address } from './Address'; -import type { ScriptPayload } from './ScriptPayload'; - -export type ScriptWriteSet = { - execute_as: Address; - script: ScriptPayload; -}; - diff --git a/ecosystem/typescript/sdk_v2/src/types/generated/models/StateCheckpointTransaction.ts b/ecosystem/typescript/sdk_v2/src/types/generated/models/StateCheckpointTransaction.ts deleted file mode 100644 index bfe5b5dbcd246..0000000000000 --- a/ecosystem/typescript/sdk_v2/src/types/generated/models/StateCheckpointTransaction.ts +++ /dev/null @@ -1,34 +0,0 @@ -/* istanbul ignore file */ -/* tslint:disable */ -/* eslint-disable */ - -import type { HashValue } from './HashValue'; -import type { U64 } from './U64'; -import type { WriteSetChange } from './WriteSetChange'; - -/** - * A state checkpoint transaction - */ -export type StateCheckpointTransaction = { - version: U64; - hash: HashValue; - state_change_hash: HashValue; - event_root_hash: HashValue; - state_checkpoint_hash?: HashValue; - gas_used: U64; - /** - * Whether the transaction was successful - */ - success: boolean; - /** - * The VM status of the transaction, can tell useful information in a failure - */ - vm_status: string; - accumulator_root_hash: HashValue; - /** - * Final state of resources changed by the transaction - */ - changes: Array; - timestamp: U64; -}; - diff --git a/ecosystem/typescript/sdk_v2/src/types/generated/models/StateKeyWrapper.ts b/ecosystem/typescript/sdk_v2/src/types/generated/models/StateKeyWrapper.ts deleted file mode 100644 index d3939a43e6a77..0000000000000 --- a/ecosystem/typescript/sdk_v2/src/types/generated/models/StateKeyWrapper.ts +++ /dev/null @@ -1,9 +0,0 @@ -/* istanbul ignore file */ -/* tslint:disable */ -/* eslint-disable */ - -/** - * Representation of a StateKey as a hex string. This is used for cursor based pagination. - * - */ -export type StateKeyWrapper = string; diff --git a/ecosystem/typescript/sdk_v2/src/types/generated/models/SubmitTransactionRequest.ts b/ecosystem/typescript/sdk_v2/src/types/generated/models/SubmitTransactionRequest.ts deleted file mode 100644 index b850d3f548e2d..0000000000000 --- a/ecosystem/typescript/sdk_v2/src/types/generated/models/SubmitTransactionRequest.ts +++ /dev/null @@ -1,24 +0,0 @@ -/* istanbul ignore file */ -/* tslint:disable */ -/* eslint-disable */ - -import type { Address } from './Address'; -import type { TransactionPayload } from './TransactionPayload'; -import type { TransactionSignature } from './TransactionSignature'; -import type { U64 } from './U64'; - -/** - * A request to submit a transaction - * - * This requires a transaction and a signature of it - */ -export type SubmitTransactionRequest = { - sender: Address; - sequence_number: U64; - max_gas_amount: U64; - gas_unit_price: U64; - expiration_timestamp_secs: U64; - payload: TransactionPayload; - signature: TransactionSignature; -}; - diff --git a/ecosystem/typescript/sdk_v2/src/types/generated/models/TableItemRequest.ts b/ecosystem/typescript/sdk_v2/src/types/generated/models/TableItemRequest.ts deleted file mode 100644 index 18b44286d5165..0000000000000 --- a/ecosystem/typescript/sdk_v2/src/types/generated/models/TableItemRequest.ts +++ /dev/null @@ -1,18 +0,0 @@ -/* istanbul ignore file */ -/* tslint:disable */ -/* eslint-disable */ - -import type { MoveType } from './MoveType'; - -/** - * Table Item request for the GetTableItem API - */ -export type TableItemRequest = { - key_type: MoveType; - value_type: MoveType; - /** - * The value of the table item's key - */ - key: any; -}; - diff --git a/ecosystem/typescript/sdk_v2/src/types/generated/models/Transaction.ts b/ecosystem/typescript/sdk_v2/src/types/generated/models/Transaction.ts deleted file mode 100644 index 0d6bd99e8daa9..0000000000000 --- a/ecosystem/typescript/sdk_v2/src/types/generated/models/Transaction.ts +++ /dev/null @@ -1,15 +0,0 @@ -/* istanbul ignore file */ -/* tslint:disable */ -/* eslint-disable */ - -import type { Transaction_BlockMetadataTransaction } from './Transaction_BlockMetadataTransaction'; -import type { Transaction_GenesisTransaction } from './Transaction_GenesisTransaction'; -import type { Transaction_PendingTransaction } from './Transaction_PendingTransaction'; -import type { Transaction_StateCheckpointTransaction } from './Transaction_StateCheckpointTransaction'; -import type { Transaction_UserTransaction } from './Transaction_UserTransaction'; - -/** - * Enum of the different types of transactions in Aptos - */ -export type Transaction = (Transaction_PendingTransaction | Transaction_UserTransaction | Transaction_GenesisTransaction | Transaction_BlockMetadataTransaction | Transaction_StateCheckpointTransaction); - diff --git a/ecosystem/typescript/sdk_v2/src/types/generated/models/TransactionPayload.ts b/ecosystem/typescript/sdk_v2/src/types/generated/models/TransactionPayload.ts deleted file mode 100644 index 0115a29749beb..0000000000000 --- a/ecosystem/typescript/sdk_v2/src/types/generated/models/TransactionPayload.ts +++ /dev/null @@ -1,14 +0,0 @@ -/* istanbul ignore file */ -/* tslint:disable */ -/* eslint-disable */ - -import type { TransactionPayload_EntryFunctionPayload } from './TransactionPayload_EntryFunctionPayload'; -import type { TransactionPayload_ModuleBundlePayload } from './TransactionPayload_ModuleBundlePayload'; -import type { TransactionPayload_MultisigPayload } from './TransactionPayload_MultisigPayload'; -import type { TransactionPayload_ScriptPayload } from './TransactionPayload_ScriptPayload'; - -/** - * An enum of the possible transaction payloads - */ -export type TransactionPayload = (TransactionPayload_EntryFunctionPayload | TransactionPayload_ScriptPayload | TransactionPayload_ModuleBundlePayload | TransactionPayload_MultisigPayload); - diff --git a/ecosystem/typescript/sdk_v2/src/types/generated/models/TransactionPayload_EntryFunctionPayload.ts b/ecosystem/typescript/sdk_v2/src/types/generated/models/TransactionPayload_EntryFunctionPayload.ts deleted file mode 100644 index d44e5d89a654a..0000000000000 --- a/ecosystem/typescript/sdk_v2/src/types/generated/models/TransactionPayload_EntryFunctionPayload.ts +++ /dev/null @@ -1,10 +0,0 @@ -/* istanbul ignore file */ -/* tslint:disable */ -/* eslint-disable */ - -import type { EntryFunctionPayload } from './EntryFunctionPayload'; - -export type TransactionPayload_EntryFunctionPayload = ({ - type: string; -} & EntryFunctionPayload); - diff --git a/ecosystem/typescript/sdk_v2/src/types/generated/models/TransactionPayload_ModuleBundlePayload.ts b/ecosystem/typescript/sdk_v2/src/types/generated/models/TransactionPayload_ModuleBundlePayload.ts deleted file mode 100644 index 0193cb9d97dc5..0000000000000 --- a/ecosystem/typescript/sdk_v2/src/types/generated/models/TransactionPayload_ModuleBundlePayload.ts +++ /dev/null @@ -1,10 +0,0 @@ -/* istanbul ignore file */ -/* tslint:disable */ -/* eslint-disable */ - -import type { ModuleBundlePayload } from './ModuleBundlePayload'; - -export type TransactionPayload_ModuleBundlePayload = ({ - type: string; -} & ModuleBundlePayload); - diff --git a/ecosystem/typescript/sdk_v2/src/types/generated/models/TransactionPayload_MultisigPayload.ts b/ecosystem/typescript/sdk_v2/src/types/generated/models/TransactionPayload_MultisigPayload.ts deleted file mode 100644 index 01023dd71e6e9..0000000000000 --- a/ecosystem/typescript/sdk_v2/src/types/generated/models/TransactionPayload_MultisigPayload.ts +++ /dev/null @@ -1,10 +0,0 @@ -/* istanbul ignore file */ -/* tslint:disable */ -/* eslint-disable */ - -import type { MultisigPayload } from './MultisigPayload'; - -export type TransactionPayload_MultisigPayload = ({ - type: string; -} & MultisigPayload); - diff --git a/ecosystem/typescript/sdk_v2/src/types/generated/models/TransactionPayload_ScriptPayload.ts b/ecosystem/typescript/sdk_v2/src/types/generated/models/TransactionPayload_ScriptPayload.ts deleted file mode 100644 index 9e7feb973092b..0000000000000 --- a/ecosystem/typescript/sdk_v2/src/types/generated/models/TransactionPayload_ScriptPayload.ts +++ /dev/null @@ -1,10 +0,0 @@ -/* istanbul ignore file */ -/* tslint:disable */ -/* eslint-disable */ - -import type { ScriptPayload } from './ScriptPayload'; - -export type TransactionPayload_ScriptPayload = ({ - type: string; -} & ScriptPayload); - diff --git a/ecosystem/typescript/sdk_v2/src/types/generated/models/TransactionSignature.ts b/ecosystem/typescript/sdk_v2/src/types/generated/models/TransactionSignature.ts deleted file mode 100644 index 0ddc7dcc48ba2..0000000000000 --- a/ecosystem/typescript/sdk_v2/src/types/generated/models/TransactionSignature.ts +++ /dev/null @@ -1,14 +0,0 @@ -/* istanbul ignore file */ -/* tslint:disable */ -/* eslint-disable */ - -import type { TransactionSignature_Ed25519Signature } from './TransactionSignature_Ed25519Signature'; -import type { TransactionSignature_FeePayerSignature } from './TransactionSignature_FeePayerSignature'; -import type { TransactionSignature_MultiAgentSignature } from './TransactionSignature_MultiAgentSignature'; -import type { TransactionSignature_MultiEd25519Signature } from './TransactionSignature_MultiEd25519Signature'; - -/** - * An enum representing the different transaction signatures available - */ -export type TransactionSignature = (TransactionSignature_Ed25519Signature | TransactionSignature_MultiEd25519Signature | TransactionSignature_MultiAgentSignature | TransactionSignature_FeePayerSignature); - diff --git a/ecosystem/typescript/sdk_v2/src/types/generated/models/TransactionSignature_Ed25519Signature.ts b/ecosystem/typescript/sdk_v2/src/types/generated/models/TransactionSignature_Ed25519Signature.ts deleted file mode 100644 index 0667d1e8bb140..0000000000000 --- a/ecosystem/typescript/sdk_v2/src/types/generated/models/TransactionSignature_Ed25519Signature.ts +++ /dev/null @@ -1,10 +0,0 @@ -/* istanbul ignore file */ -/* tslint:disable */ -/* eslint-disable */ - -import type { Ed25519Signature } from './Ed25519Signature'; - -export type TransactionSignature_Ed25519Signature = ({ - type: string; -} & Ed25519Signature); - diff --git a/ecosystem/typescript/sdk_v2/src/types/generated/models/TransactionSignature_FeePayerSignature.ts b/ecosystem/typescript/sdk_v2/src/types/generated/models/TransactionSignature_FeePayerSignature.ts deleted file mode 100644 index c16bc6bd71d09..0000000000000 --- a/ecosystem/typescript/sdk_v2/src/types/generated/models/TransactionSignature_FeePayerSignature.ts +++ /dev/null @@ -1,10 +0,0 @@ -/* istanbul ignore file */ -/* tslint:disable */ -/* eslint-disable */ - -import type { FeePayerSignature } from './FeePayerSignature'; - -export type TransactionSignature_FeePayerSignature = ({ - type: string; -} & FeePayerSignature); - diff --git a/ecosystem/typescript/sdk_v2/src/types/generated/models/TransactionSignature_MultiAgentSignature.ts b/ecosystem/typescript/sdk_v2/src/types/generated/models/TransactionSignature_MultiAgentSignature.ts deleted file mode 100644 index e74d911d9fb63..0000000000000 --- a/ecosystem/typescript/sdk_v2/src/types/generated/models/TransactionSignature_MultiAgentSignature.ts +++ /dev/null @@ -1,10 +0,0 @@ -/* istanbul ignore file */ -/* tslint:disable */ -/* eslint-disable */ - -import type { MultiAgentSignature } from './MultiAgentSignature'; - -export type TransactionSignature_MultiAgentSignature = ({ - type: string; -} & MultiAgentSignature); - diff --git a/ecosystem/typescript/sdk_v2/src/types/generated/models/TransactionSignature_MultiEd25519Signature.ts b/ecosystem/typescript/sdk_v2/src/types/generated/models/TransactionSignature_MultiEd25519Signature.ts deleted file mode 100644 index 1f6dc58ada495..0000000000000 --- a/ecosystem/typescript/sdk_v2/src/types/generated/models/TransactionSignature_MultiEd25519Signature.ts +++ /dev/null @@ -1,10 +0,0 @@ -/* istanbul ignore file */ -/* tslint:disable */ -/* eslint-disable */ - -import type { MultiEd25519Signature } from './MultiEd25519Signature'; - -export type TransactionSignature_MultiEd25519Signature = ({ - type: string; -} & MultiEd25519Signature); - diff --git a/ecosystem/typescript/sdk_v2/src/types/generated/models/Transaction_BlockMetadataTransaction.ts b/ecosystem/typescript/sdk_v2/src/types/generated/models/Transaction_BlockMetadataTransaction.ts deleted file mode 100644 index 82067d7479ccf..0000000000000 --- a/ecosystem/typescript/sdk_v2/src/types/generated/models/Transaction_BlockMetadataTransaction.ts +++ /dev/null @@ -1,10 +0,0 @@ -/* istanbul ignore file */ -/* tslint:disable */ -/* eslint-disable */ - -import type { BlockMetadataTransaction } from './BlockMetadataTransaction'; - -export type Transaction_BlockMetadataTransaction = ({ - type: string; -} & BlockMetadataTransaction); - diff --git a/ecosystem/typescript/sdk_v2/src/types/generated/models/Transaction_GenesisTransaction.ts b/ecosystem/typescript/sdk_v2/src/types/generated/models/Transaction_GenesisTransaction.ts deleted file mode 100644 index 3bb44fd02883b..0000000000000 --- a/ecosystem/typescript/sdk_v2/src/types/generated/models/Transaction_GenesisTransaction.ts +++ /dev/null @@ -1,10 +0,0 @@ -/* istanbul ignore file */ -/* tslint:disable */ -/* eslint-disable */ - -import type { GenesisTransaction } from './GenesisTransaction'; - -export type Transaction_GenesisTransaction = ({ - type: string; -} & GenesisTransaction); - diff --git a/ecosystem/typescript/sdk_v2/src/types/generated/models/Transaction_PendingTransaction.ts b/ecosystem/typescript/sdk_v2/src/types/generated/models/Transaction_PendingTransaction.ts deleted file mode 100644 index c593c76424f81..0000000000000 --- a/ecosystem/typescript/sdk_v2/src/types/generated/models/Transaction_PendingTransaction.ts +++ /dev/null @@ -1,10 +0,0 @@ -/* istanbul ignore file */ -/* tslint:disable */ -/* eslint-disable */ - -import type { PendingTransaction } from './PendingTransaction'; - -export type Transaction_PendingTransaction = ({ - type: string; -} & PendingTransaction); - diff --git a/ecosystem/typescript/sdk_v2/src/types/generated/models/Transaction_StateCheckpointTransaction.ts b/ecosystem/typescript/sdk_v2/src/types/generated/models/Transaction_StateCheckpointTransaction.ts deleted file mode 100644 index 90eeda7001b0f..0000000000000 --- a/ecosystem/typescript/sdk_v2/src/types/generated/models/Transaction_StateCheckpointTransaction.ts +++ /dev/null @@ -1,10 +0,0 @@ -/* istanbul ignore file */ -/* tslint:disable */ -/* eslint-disable */ - -import type { StateCheckpointTransaction } from './StateCheckpointTransaction'; - -export type Transaction_StateCheckpointTransaction = ({ - type: string; -} & StateCheckpointTransaction); - diff --git a/ecosystem/typescript/sdk_v2/src/types/generated/models/Transaction_UserTransaction.ts b/ecosystem/typescript/sdk_v2/src/types/generated/models/Transaction_UserTransaction.ts deleted file mode 100644 index 8feeed7c2233c..0000000000000 --- a/ecosystem/typescript/sdk_v2/src/types/generated/models/Transaction_UserTransaction.ts +++ /dev/null @@ -1,10 +0,0 @@ -/* istanbul ignore file */ -/* tslint:disable */ -/* eslint-disable */ - -import type { UserTransaction } from './UserTransaction'; - -export type Transaction_UserTransaction = ({ - type: string; -} & UserTransaction); - diff --git a/ecosystem/typescript/sdk_v2/src/types/generated/models/TransactionsBatchSingleSubmissionFailure.ts b/ecosystem/typescript/sdk_v2/src/types/generated/models/TransactionsBatchSingleSubmissionFailure.ts deleted file mode 100644 index 98f81af611b51..0000000000000 --- a/ecosystem/typescript/sdk_v2/src/types/generated/models/TransactionsBatchSingleSubmissionFailure.ts +++ /dev/null @@ -1,17 +0,0 @@ -/* istanbul ignore file */ -/* tslint:disable */ -/* eslint-disable */ - -import type { AptosError } from './AptosError'; - -/** - * Information telling which batch submission transactions failed - */ -export type TransactionsBatchSingleSubmissionFailure = { - error: AptosError; - /** - * The index of which transaction failed, same as submission order - */ - transaction_index: number; -}; - diff --git a/ecosystem/typescript/sdk_v2/src/types/generated/models/TransactionsBatchSubmissionResult.ts b/ecosystem/typescript/sdk_v2/src/types/generated/models/TransactionsBatchSubmissionResult.ts deleted file mode 100644 index 43c5d62f8dfba..0000000000000 --- a/ecosystem/typescript/sdk_v2/src/types/generated/models/TransactionsBatchSubmissionResult.ts +++ /dev/null @@ -1,18 +0,0 @@ -/* istanbul ignore file */ -/* tslint:disable */ -/* eslint-disable */ - -import type { TransactionsBatchSingleSubmissionFailure } from './TransactionsBatchSingleSubmissionFailure'; - -/** - * Batch transaction submission result - * - * Tells which transactions failed - */ -export type TransactionsBatchSubmissionResult = { - /** - * Summary of the failed transactions - */ - transaction_failures: Array; -}; - diff --git a/ecosystem/typescript/sdk_v2/src/types/generated/models/U128.ts b/ecosystem/typescript/sdk_v2/src/types/generated/models/U128.ts deleted file mode 100644 index d391d4afb9c24..0000000000000 --- a/ecosystem/typescript/sdk_v2/src/types/generated/models/U128.ts +++ /dev/null @@ -1,12 +0,0 @@ -/* istanbul ignore file */ -/* tslint:disable */ -/* eslint-disable */ - -/** - * A string containing a 128-bit unsigned integer. - * - * We represent u128 values as a string to ensure compatibility with languages such - * as JavaScript that do not parse u128s in JSON natively. - * - */ -export type U128 = string; diff --git a/ecosystem/typescript/sdk_v2/src/types/generated/models/U256.ts b/ecosystem/typescript/sdk_v2/src/types/generated/models/U256.ts deleted file mode 100644 index 092d56d8eb4be..0000000000000 --- a/ecosystem/typescript/sdk_v2/src/types/generated/models/U256.ts +++ /dev/null @@ -1,12 +0,0 @@ -/* istanbul ignore file */ -/* tslint:disable */ -/* eslint-disable */ - -/** - * A string containing a 256-bit unsigned integer. - * - * We represent u256 values as a string to ensure compatibility with languages such - * as JavaScript that do not parse u256s in JSON natively. - * - */ -export type U256 = string; diff --git a/ecosystem/typescript/sdk_v2/src/types/generated/models/U64.ts b/ecosystem/typescript/sdk_v2/src/types/generated/models/U64.ts deleted file mode 100644 index f8f542c609edb..0000000000000 --- a/ecosystem/typescript/sdk_v2/src/types/generated/models/U64.ts +++ /dev/null @@ -1,12 +0,0 @@ -/* istanbul ignore file */ -/* tslint:disable */ -/* eslint-disable */ - -/** - * A string containing a 64-bit unsigned integer. - * - * We represent u64 values as a string to ensure compatibility with languages such - * as JavaScript that do not parse u64s in JSON natively. - * - */ -export type U64 = string; diff --git a/ecosystem/typescript/sdk_v2/src/types/generated/models/UserTransaction.ts b/ecosystem/typescript/sdk_v2/src/types/generated/models/UserTransaction.ts deleted file mode 100644 index 4aa4e67a37ce3..0000000000000 --- a/ecosystem/typescript/sdk_v2/src/types/generated/models/UserTransaction.ts +++ /dev/null @@ -1,49 +0,0 @@ -/* istanbul ignore file */ -/* tslint:disable */ -/* eslint-disable */ - -import type { Address } from './Address'; -import type { Event } from './Event'; -import type { HashValue } from './HashValue'; -import type { TransactionPayload } from './TransactionPayload'; -import type { TransactionSignature } from './TransactionSignature'; -import type { U64 } from './U64'; -import type { WriteSetChange } from './WriteSetChange'; - -/** - * A transaction submitted by a user to change the state of the blockchain - */ -export type UserTransaction = { - version: U64; - hash: HashValue; - state_change_hash: HashValue; - event_root_hash: HashValue; - state_checkpoint_hash?: HashValue; - gas_used: U64; - /** - * Whether the transaction was successful - */ - success: boolean; - /** - * The VM status of the transaction, can tell useful information in a failure - */ - vm_status: string; - accumulator_root_hash: HashValue; - /** - * Final state of resources changed by the transaction - */ - changes: Array; - sender: Address; - sequence_number: U64; - max_gas_amount: U64; - gas_unit_price: U64; - expiration_timestamp_secs: U64; - payload: TransactionPayload; - signature?: TransactionSignature; - /** - * Events generated by the transaction - */ - events: Array; - timestamp: U64; -}; - diff --git a/ecosystem/typescript/sdk_v2/src/types/generated/models/VersionedEvent.ts b/ecosystem/typescript/sdk_v2/src/types/generated/models/VersionedEvent.ts deleted file mode 100644 index c9934c820268f..0000000000000 --- a/ecosystem/typescript/sdk_v2/src/types/generated/models/VersionedEvent.ts +++ /dev/null @@ -1,22 +0,0 @@ -/* istanbul ignore file */ -/* tslint:disable */ -/* eslint-disable */ - -import type { EventGuid } from './EventGuid'; -import type { MoveType } from './MoveType'; -import type { U64 } from './U64'; - -/** - * An event from a transaction with a version - */ -export type VersionedEvent = { - version: U64; - guid: EventGuid; - sequence_number: U64; - type: MoveType; - /** - * The JSON representation of the event - */ - data: any; -}; - diff --git a/ecosystem/typescript/sdk_v2/src/types/generated/models/ViewRequest.ts b/ecosystem/typescript/sdk_v2/src/types/generated/models/ViewRequest.ts deleted file mode 100644 index 5fa124b8ef5fa..0000000000000 --- a/ecosystem/typescript/sdk_v2/src/types/generated/models/ViewRequest.ts +++ /dev/null @@ -1,22 +0,0 @@ -/* istanbul ignore file */ -/* tslint:disable */ -/* eslint-disable */ - -import type { EntryFunctionId } from './EntryFunctionId'; -import type { MoveType } from './MoveType'; - -/** - * View request for the Move View Function API - */ -export type ViewRequest = { - function: EntryFunctionId; - /** - * Type arguments of the function - */ - type_arguments: Array; - /** - * Arguments of the function - */ - arguments: Array; -}; - diff --git a/ecosystem/typescript/sdk_v2/src/types/generated/models/WriteModule.ts b/ecosystem/typescript/sdk_v2/src/types/generated/models/WriteModule.ts deleted file mode 100644 index b032cc9136124..0000000000000 --- a/ecosystem/typescript/sdk_v2/src/types/generated/models/WriteModule.ts +++ /dev/null @@ -1,19 +0,0 @@ -/* istanbul ignore file */ -/* tslint:disable */ -/* eslint-disable */ - -import type { Address } from './Address'; -import type { MoveModuleBytecode } from './MoveModuleBytecode'; - -/** - * Write a new module or update an existing one - */ -export type WriteModule = { - address: Address; - /** - * State key hash - */ - state_key_hash: string; - data: MoveModuleBytecode; -}; - diff --git a/ecosystem/typescript/sdk_v2/src/types/generated/models/WriteResource.ts b/ecosystem/typescript/sdk_v2/src/types/generated/models/WriteResource.ts deleted file mode 100644 index 2bccf200e8ff8..0000000000000 --- a/ecosystem/typescript/sdk_v2/src/types/generated/models/WriteResource.ts +++ /dev/null @@ -1,19 +0,0 @@ -/* istanbul ignore file */ -/* tslint:disable */ -/* eslint-disable */ - -import type { Address } from './Address'; -import type { MoveResource } from './MoveResource'; - -/** - * Write a resource or update an existing one - */ -export type WriteResource = { - address: Address; - /** - * State key hash - */ - state_key_hash: string; - data: MoveResource; -}; - diff --git a/ecosystem/typescript/sdk_v2/src/types/generated/models/WriteSet.ts b/ecosystem/typescript/sdk_v2/src/types/generated/models/WriteSet.ts deleted file mode 100644 index ea06d3e6355aa..0000000000000 --- a/ecosystem/typescript/sdk_v2/src/types/generated/models/WriteSet.ts +++ /dev/null @@ -1,12 +0,0 @@ -/* istanbul ignore file */ -/* tslint:disable */ -/* eslint-disable */ - -import type { WriteSet_DirectWriteSet } from './WriteSet_DirectWriteSet'; -import type { WriteSet_ScriptWriteSet } from './WriteSet_ScriptWriteSet'; - -/** - * The associated writeset with a payload - */ -export type WriteSet = (WriteSet_ScriptWriteSet | WriteSet_DirectWriteSet); - diff --git a/ecosystem/typescript/sdk_v2/src/types/generated/models/WriteSetChange.ts b/ecosystem/typescript/sdk_v2/src/types/generated/models/WriteSetChange.ts deleted file mode 100644 index dfcc2cb630beb..0000000000000 --- a/ecosystem/typescript/sdk_v2/src/types/generated/models/WriteSetChange.ts +++ /dev/null @@ -1,16 +0,0 @@ -/* istanbul ignore file */ -/* tslint:disable */ -/* eslint-disable */ - -import type { WriteSetChange_DeleteModule } from './WriteSetChange_DeleteModule'; -import type { WriteSetChange_DeleteResource } from './WriteSetChange_DeleteResource'; -import type { WriteSetChange_DeleteTableItem } from './WriteSetChange_DeleteTableItem'; -import type { WriteSetChange_WriteModule } from './WriteSetChange_WriteModule'; -import type { WriteSetChange_WriteResource } from './WriteSetChange_WriteResource'; -import type { WriteSetChange_WriteTableItem } from './WriteSetChange_WriteTableItem'; - -/** - * A final state change of a transaction on a resource or module - */ -export type WriteSetChange = (WriteSetChange_DeleteModule | WriteSetChange_DeleteResource | WriteSetChange_DeleteTableItem | WriteSetChange_WriteModule | WriteSetChange_WriteResource | WriteSetChange_WriteTableItem); - diff --git a/ecosystem/typescript/sdk_v2/src/types/generated/models/WriteSetChange_DeleteModule.ts b/ecosystem/typescript/sdk_v2/src/types/generated/models/WriteSetChange_DeleteModule.ts deleted file mode 100644 index 3c4252969f572..0000000000000 --- a/ecosystem/typescript/sdk_v2/src/types/generated/models/WriteSetChange_DeleteModule.ts +++ /dev/null @@ -1,10 +0,0 @@ -/* istanbul ignore file */ -/* tslint:disable */ -/* eslint-disable */ - -import type { DeleteModule } from './DeleteModule'; - -export type WriteSetChange_DeleteModule = ({ - type: string; -} & DeleteModule); - diff --git a/ecosystem/typescript/sdk_v2/src/types/generated/models/WriteSetChange_DeleteResource.ts b/ecosystem/typescript/sdk_v2/src/types/generated/models/WriteSetChange_DeleteResource.ts deleted file mode 100644 index bfb9924b661a3..0000000000000 --- a/ecosystem/typescript/sdk_v2/src/types/generated/models/WriteSetChange_DeleteResource.ts +++ /dev/null @@ -1,10 +0,0 @@ -/* istanbul ignore file */ -/* tslint:disable */ -/* eslint-disable */ - -import type { DeleteResource } from './DeleteResource'; - -export type WriteSetChange_DeleteResource = ({ - type: string; -} & DeleteResource); - diff --git a/ecosystem/typescript/sdk_v2/src/types/generated/models/WriteSetChange_DeleteTableItem.ts b/ecosystem/typescript/sdk_v2/src/types/generated/models/WriteSetChange_DeleteTableItem.ts deleted file mode 100644 index d82340222522d..0000000000000 --- a/ecosystem/typescript/sdk_v2/src/types/generated/models/WriteSetChange_DeleteTableItem.ts +++ /dev/null @@ -1,10 +0,0 @@ -/* istanbul ignore file */ -/* tslint:disable */ -/* eslint-disable */ - -import type { DeleteTableItem } from './DeleteTableItem'; - -export type WriteSetChange_DeleteTableItem = ({ - type: string; -} & DeleteTableItem); - diff --git a/ecosystem/typescript/sdk_v2/src/types/generated/models/WriteSetChange_WriteModule.ts b/ecosystem/typescript/sdk_v2/src/types/generated/models/WriteSetChange_WriteModule.ts deleted file mode 100644 index 924d279b8c26f..0000000000000 --- a/ecosystem/typescript/sdk_v2/src/types/generated/models/WriteSetChange_WriteModule.ts +++ /dev/null @@ -1,10 +0,0 @@ -/* istanbul ignore file */ -/* tslint:disable */ -/* eslint-disable */ - -import type { WriteModule } from './WriteModule'; - -export type WriteSetChange_WriteModule = ({ - type: string; -} & WriteModule); - diff --git a/ecosystem/typescript/sdk_v2/src/types/generated/models/WriteSetChange_WriteResource.ts b/ecosystem/typescript/sdk_v2/src/types/generated/models/WriteSetChange_WriteResource.ts deleted file mode 100644 index e7d4f6be56188..0000000000000 --- a/ecosystem/typescript/sdk_v2/src/types/generated/models/WriteSetChange_WriteResource.ts +++ /dev/null @@ -1,10 +0,0 @@ -/* istanbul ignore file */ -/* tslint:disable */ -/* eslint-disable */ - -import type { WriteResource } from './WriteResource'; - -export type WriteSetChange_WriteResource = ({ - type: string; -} & WriteResource); - diff --git a/ecosystem/typescript/sdk_v2/src/types/generated/models/WriteSetChange_WriteTableItem.ts b/ecosystem/typescript/sdk_v2/src/types/generated/models/WriteSetChange_WriteTableItem.ts deleted file mode 100644 index 087c9765a7e62..0000000000000 --- a/ecosystem/typescript/sdk_v2/src/types/generated/models/WriteSetChange_WriteTableItem.ts +++ /dev/null @@ -1,10 +0,0 @@ -/* istanbul ignore file */ -/* tslint:disable */ -/* eslint-disable */ - -import type { WriteTableItem } from './WriteTableItem'; - -export type WriteSetChange_WriteTableItem = ({ - type: string; -} & WriteTableItem); - diff --git a/ecosystem/typescript/sdk_v2/src/types/generated/models/WriteSetPayload.ts b/ecosystem/typescript/sdk_v2/src/types/generated/models/WriteSetPayload.ts deleted file mode 100644 index c15ea62f22d9c..0000000000000 --- a/ecosystem/typescript/sdk_v2/src/types/generated/models/WriteSetPayload.ts +++ /dev/null @@ -1,13 +0,0 @@ -/* istanbul ignore file */ -/* tslint:disable */ -/* eslint-disable */ - -import type { WriteSet } from './WriteSet'; - -/** - * A writeset payload, used only for genesis - */ -export type WriteSetPayload = { - write_set: WriteSet; -}; - diff --git a/ecosystem/typescript/sdk_v2/src/types/generated/models/WriteSet_DirectWriteSet.ts b/ecosystem/typescript/sdk_v2/src/types/generated/models/WriteSet_DirectWriteSet.ts deleted file mode 100644 index 552e98d4833a5..0000000000000 --- a/ecosystem/typescript/sdk_v2/src/types/generated/models/WriteSet_DirectWriteSet.ts +++ /dev/null @@ -1,10 +0,0 @@ -/* istanbul ignore file */ -/* tslint:disable */ -/* eslint-disable */ - -import type { DirectWriteSet } from './DirectWriteSet'; - -export type WriteSet_DirectWriteSet = ({ - type: string; -} & DirectWriteSet); - diff --git a/ecosystem/typescript/sdk_v2/src/types/generated/models/WriteSet_ScriptWriteSet.ts b/ecosystem/typescript/sdk_v2/src/types/generated/models/WriteSet_ScriptWriteSet.ts deleted file mode 100644 index e7304a8a8431d..0000000000000 --- a/ecosystem/typescript/sdk_v2/src/types/generated/models/WriteSet_ScriptWriteSet.ts +++ /dev/null @@ -1,10 +0,0 @@ -/* istanbul ignore file */ -/* tslint:disable */ -/* eslint-disable */ - -import type { ScriptWriteSet } from './ScriptWriteSet'; - -export type WriteSet_ScriptWriteSet = ({ - type: string; -} & ScriptWriteSet); - diff --git a/ecosystem/typescript/sdk_v2/src/types/generated/models/WriteTableItem.ts b/ecosystem/typescript/sdk_v2/src/types/generated/models/WriteTableItem.ts deleted file mode 100644 index 6fbaa2b02a3b2..0000000000000 --- a/ecosystem/typescript/sdk_v2/src/types/generated/models/WriteTableItem.ts +++ /dev/null @@ -1,18 +0,0 @@ -/* istanbul ignore file */ -/* tslint:disable */ -/* eslint-disable */ - -import type { DecodedTableData } from './DecodedTableData'; -import type { HexEncodedBytes } from './HexEncodedBytes'; - -/** - * Change set to write a table item - */ -export type WriteTableItem = { - state_key_hash: string; - handle: HexEncodedBytes; - key: HexEncodedBytes; - value: HexEncodedBytes; - data?: DecodedTableData; -}; - diff --git a/ecosystem/typescript/sdk_v2/src/types/index.ts b/ecosystem/typescript/sdk_v2/src/types/index.ts index 48e9ab7bdea01..f2e86a38ac06e 100644 --- a/ecosystem/typescript/sdk_v2/src/types/index.ts +++ b/ecosystem/typescript/sdk_v2/src/types/index.ts @@ -1,11 +1,26 @@ import { Network } from "../utils/api-endpoints"; -import { MoveFunction, MoveStruct } from "./generated"; -export { Transaction } from "./generated"; +/** + * Hex data as input to a function + */ +export type HexInput = string | Uint8Array; +/** + * BCS types + */ +export type Uint8 = number; +export type Uint16 = number; +export type Uint32 = number; +export type Uint64 = bigint; +export type Uint128 = bigint; +export type Uint256 = bigint; export type AnyNumber = number | bigint; -export type HexInput = string | Uint8Array; +/** + * Set of configuration options that can be provided when initializing the SDK. + * The purpose of these options is to configure various aspects of the SDK's + * behavior and interaction with the Aptos network + */ export type AptosSettings = { readonly network: Network; @@ -29,6 +44,10 @@ export interface PaginationArgs { limit?: number; } +/** + * QUERY TYPES + */ + /** * A configuration object we can pass with the request to the server. * @@ -66,14 +85,22 @@ export type AptosRequest = { overrides?: ClientConfig; }; +/** + * Specifies ledger version of transactions. By default latest version will be used + */ +export type LedgerVersion = { + ledgerVersion?: AnyNumber; +}; + +/** + * RESPONSE TYPES + */ + export type MoveResource = { type: MoveResourceType; data: {}; }; -export type MoveResourceType = `${string}::${string}::${string}`; -export type MoveModuleId = `${string}::${string}`; - export type AccountData = { sequence_number: string; authentication_key: string; @@ -84,6 +111,455 @@ export type MoveModuleBytecode = { abi?: MoveModule; }; +/** + * TRANSACTION TYPES + */ + +export type TransactionResponse = + | PendingTransactionResponse + | UserTransactionResponse + | GenesisTransactionResponse + | BlockMetadataTransactionResponse + | StateCheckpointTransactionResponse; + +export type PendingTransactionResponse = { + type: string; + hash: string; + sender: string; + sequence_number: string; + max_gas_amount: string; + gas_unit_price: string; + expiration_timestamp_secs: string; + payload: TransactionPayload; + signature?: TransactionSignature; +}; + +export type UserTransactionResponse = { + type: string; + version: string; + hash: string; + state_change_hash: string; + event_root_hash: string; + state_checkpoint_hash?: string; + gas_used: string; + /** + * Whether the transaction was successful + */ + success: boolean; + /** + * The VM status of the transaction, can tell useful information in a failure + */ + vm_status: string; + accumulator_root_hash: string; + /** + * Final state of resources changed by the transaction + */ + changes: Array; + sender: string; + sequence_number: string; + max_gas_amount: string; + gas_unit_price: string; + expiration_timestamp_secs: string; + payload: TransactionPayload; + signature?: TransactionSignature; + /** + * Events generated by the transaction + */ + events: Array; + timestamp: string; +}; + +export type GenesisTransactionResponse = { + type: string; + version: string; + hash: string; + state_change_hash: string; + event_root_hash: string; + state_checkpoint_hash?: string; + gas_used: string; + /** + * Whether the transaction was successful + */ + success: boolean; + /** + * The VM status of the transaction, can tell useful information in a failure + */ + vm_status: string; + accumulator_root_hash: string; + /** + * Final state of resources changed by the transaction + */ + changes: Array; + payload: GenesisPayload; + /** + * Events emitted during genesis + */ + events: Array; +}; + +export type BlockMetadataTransactionResponse = { + type: string; + version: string; + hash: string; + state_change_hash: string; + event_root_hash: string; + state_checkpoint_hash?: string; + gas_used: string; + /** + * Whether the transaction was successful + */ + success: boolean; + /** + * The VM status of the transaction, can tell useful information in a failure + */ + vm_status: string; + accumulator_root_hash: string; + /** + * Final state of resources changed by the transaction + */ + changes: Array; + id: string; + epoch: string; + round: string; + /** + * The events emitted at the block creation + */ + events: Array; + /** + * Previous block votes + */ + previous_block_votes_bitvec: Array; + proposer: string; + /** + * The indices of the proposers who failed to propose + */ + failed_proposer_indices: Array; + timestamp: string; +}; + +export type StateCheckpointTransactionResponse = { + type: string; + version: string; + hash: string; + state_change_hash: string; + event_root_hash: string; + state_checkpoint_hash?: string; + gas_used: string; + /** + * Whether the transaction was successful + */ + success: boolean; + /** + * The VM status of the transaction, can tell useful information in a failure + */ + vm_status: string; + accumulator_root_hash: string; + /** + * Final state of resources changed by the transaction + */ + changes: Array; + timestamp: string; +}; + +/** + * WRITESET CHANGE TYPES + */ + +export type WriteSetChange = + | WriteSetChangeDeleteModule + | WriteSetChangeDeleteResource + | WriteSetChangeDeleteTableItem + | WriteSetChangeWriteModule + | WriteSetChangeWriteResource + | WriteSetChangeWriteTableItem; + +export type WriteSetChangeDeleteModule = { + type: string; + address: string; + /** + * State key hash + */ + state_key_hash: string; + module: MoveModuleId; +}; + +export type WriteSetChangeDeleteResource = { + type: string; + address: string; + state_key_hash: string; + resource: string; +}; + +export type WriteSetChangeDeleteTableItem = { + type: string; + state_key_hash: string; + handle: string; + key: string; + data?: DeletedTableData; +}; + +export type WriteSetChangeWriteModule = { + type: string; + address: string; + state_key_hash: string; + data: MoveModuleBytecode; +}; + +export type WriteSetChangeWriteResource = { + type: string; + address: string; + state_key_hash: string; + data: MoveResource; +}; + +export type WriteSetChangeWriteTableItem = { + type: string; + state_key_hash: string; + handle: string; + key: string; + value: string; + data?: DecodedTableData; +}; + +export type DecodedTableData = { + /** + * Key of table in JSON + */ + key: any; + /** + * Type of key + */ + key_type: string; + /** + * Value of table in JSON + */ + value: any; + /** + * Type of value + */ + value_type: string; +}; + +/** + * Deleted table data + */ +export type DeletedTableData = { + /** + * Deleted key + */ + key: any; + /** + * Deleted key type + */ + key_type: string; +}; + +export type TransactionPayload = EntryFunctionPayload | ScriptPayload | MultisigPayload; + +export type EntryFunctionPayload = { + type: string; + function: MoveResourceType; + /** + * Type arguments of the function + */ + type_arguments: Array; + /** + * Arguments of the function + */ + arguments: Array; +}; + +export type ScriptPayload = { + type: string; + code: MoveScriptBytecode; + /** + * Type arguments of the function + */ + type_arguments: Array; + /** + * Arguments of the function + */ + arguments: Array; +}; + +export type MultisigPayload = { + type: string; + multisig_address: string; + transaction_payload?: EntryFunctionPayload; +}; + +export type GenesisPayload = { + type: string; + write_set: WriteSet; +}; + +/** + * Move script bytecode + */ +export type MoveScriptBytecode = { + bytecode: string; + abi?: MoveFunction; +}; + +export type TransactionSignature = + | TransactionEd25519Signature + | TransactionMultiEd25519Signature + | TransactionMultiAgentSignature + | TransactioneePayerSignature; + +export type TransactionEd25519Signature = { + type: string; + public_key: string; + signature: string; +}; + +export type TransactionMultiEd25519Signature = { + type: string; + /** + * The public keys for the Ed25519 signature + */ + public_keys: Array; + /** + * Signature associated with the public keys in the same order + */ + signatures: Array; + /** + * The number of signatures required for a successful transaction + */ + threshold: number; + bitmap: string; +}; + +export type TransactionMultiAgentSignature = { + type: string; + sender: AccountSignature; + /** + * The other involved parties' addresses + */ + secondary_signer_addresses: Array; + /** + * The associated signatures, in the same order as the secondary addresses + */ + secondary_signers: Array; +}; + +export type TransactioneePayerSignature = { + type: string; + sender: AccountSignature; + /** + * The other involved parties' addresses + */ + secondary_signer_addresses: Array; + /** + * The associated signatures, in the same order as the secondary addresses + */ + secondary_signers: Array; + fee_payer_address: string; + fee_payer_signer: AccountSignature; +}; + +export type AccountSignature = AccountEd25519Signature | AccountMultiEd25519Signature; + +export type AccountEd25519Signature = { + type: string; + public_key: string; + signature: string; +}; + +export type AccountMultiEd25519Signature = { + type: string; + /** + * The public keys for the Ed25519 signature + */ + public_keys: Array; + /** + * Signature associated with the public keys in the same order + */ + signatures: Array; + /** + * The number of signatures required for a successful transaction + */ + threshold: number; + bitmap: string; +}; + +export type WriteSet = ScriptWriteSet | DirectWriteSet; + +export type ScriptWriteSet = { + type: string; + execute_as: string; + script: ScriptPayload; +}; + +export type DirectWriteSet = { + type: string; + changes: Array; + events: Array; +}; + +export type EventGuid = { + creation_number: string; + account_address: string; +}; + +export type Event = { + guid: EventGuid; + sequence_number: string; + type: string; + /** + * The JSON representation of the event + */ + data: any; +}; + +/** + * String representation of a on-chain Move struct type. + */ +export type MoveResourceType = `${string}::${string}::${string}`; + +/** + * Move module id is a string representation of Move module. + * Module name is case-sensitive. + */ +export type MoveModuleId = `${string}::${string}`; + +/** + * Move function visibility + */ +export enum MoveFunctionVisibility { + PRIVATE = "private", + PUBLIC = "public", + FRIEND = "friend", +} + +/** + * Move function ability + */ +export enum MoveAbility { + STORE = "store", + DROP = "drop", + KEY = "key", + COPY = "copy", +} + +/** + * Move abilities tied to the generic type param and associated with the function that uses it + */ +export type MoveFunctionGenericTypeParam = { + constraints: Array; +}; + +/** + * Move struct field + */ +export type MoveStructField = { + name: string; + type: string; +}; + +/** + * A Move module + */ export type MoveModule = { address: string; name: string; @@ -101,6 +577,53 @@ export type MoveModule = { structs: Array; }; -export type LedgerVersion = { - ledgerVersion?: AnyNumber; +/** + * A move struct + */ +export type MoveStruct = { + name: string; + /** + * Whether the struct is a native struct of Move + */ + is_native: boolean; + /** + * Abilities associated with the struct + */ + abilities: Array; + /** + * Generic types associated with the struct + */ + generic_type_params: Array; + /** + * Fields associated with the struct + */ + fields: Array; +}; + +/** + * Move function + */ +export type MoveFunction = { + name: string; + visibility: MoveFunctionVisibility; + /** + * Whether the function can be called as an entry function directly in a transaction + */ + is_entry: boolean; + /** + * Whether the function is a view function or not + */ + is_view: boolean; + /** + * Generic type params associated with the Move function + */ + generic_type_params: Array; + /** + * Parameters associated with the move function + */ + params: Array; + /** + * Return type of the function + */ + return: Array; }; diff --git a/ecosystem/typescript/sdk_v2/tests/e2e/api/account.test.ts b/ecosystem/typescript/sdk_v2/tests/e2e/api/account.test.ts index e1a90509a673d..83ebd6b4bc18a 100644 --- a/ecosystem/typescript/sdk_v2/tests/e2e/api/account.test.ts +++ b/ecosystem/typescript/sdk_v2/tests/e2e/api/account.test.ts @@ -10,7 +10,7 @@ describe("account api", () => { const aptos = new Aptos(config); expect( async () => - await aptos.account.getInfo({ + await aptos.getAccountInfo({ accountAddress: "ca843279e3427144cead5e4d5999a3d0ca843279e3427144cead5e4d5999a3d0", }), ).rejects.toThrow(); @@ -19,7 +19,7 @@ describe("account api", () => { test("it throws when invalid account address", () => { const config = new AptosConfig({ network: Network.LOCAL }); const aptos = new Aptos(config); - expect(async () => await aptos.account.getInfo({ accountAddress: "0x123" })).rejects.toThrow(); + expect(async () => await aptos.getAccountInfo({ accountAddress: "0x123" })).rejects.toThrow(); }); }); @@ -27,7 +27,7 @@ describe("account api", () => { test("it fetches account data", async () => { const config = new AptosConfig({ network: Network.LOCAL }); const aptos = new Aptos(config); - const data = await aptos.account.getInfo({ + const data = await aptos.getAccountInfo({ accountAddress: "0x1", }); expect(data).toHaveProperty("sequence_number"); @@ -39,7 +39,7 @@ describe("account api", () => { test("it fetches account modules", async () => { const config = new AptosConfig({ network: Network.LOCAL }); const aptos = new Aptos(config); - const data = await aptos.account.getModules({ + const data = await aptos.getAccountModules({ accountAddress: "0x1", }); expect(data.length).toBeGreaterThan(0); @@ -48,7 +48,7 @@ describe("account api", () => { test("it fetches account module", async () => { const config = new AptosConfig({ network: Network.LOCAL }); const aptos = new Aptos(config); - const data = await aptos.account.getModule({ + const data = await aptos.getAccountModule({ accountAddress: "0x1", moduleName: "coin", }); @@ -58,7 +58,7 @@ describe("account api", () => { test("it fetches account resources", async () => { const config = new AptosConfig({ network: Network.LOCAL }); const aptos = new Aptos(config); - const data = await aptos.account.getResources({ + const data = await aptos.getAccountResources({ accountAddress: "0x1", }); expect(data.length).toBeGreaterThan(0); @@ -67,7 +67,7 @@ describe("account api", () => { test("it fetches account resource", async () => { const config = new AptosConfig({ network: Network.LOCAL }); const aptos = new Aptos(config); - const data = await aptos.account.getResource({ + const data = await aptos.getAccountResource({ accountAddress: "0x1", resourceType: "0x1::account::Account", }); @@ -79,7 +79,7 @@ describe("account api", () => { test("it fetches account data", async () => { const config = new AptosConfig({ network: Network.LOCAL }); const aptos = new Aptos(config); - const data = await aptos.account.getInfo({ + const data = await aptos.getAccountInfo({ accountAddress: new Uint8Array([ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, ]), @@ -93,7 +93,7 @@ describe("account api", () => { test("it fetches account modules", async () => { const config = new AptosConfig({ network: Network.LOCAL }); const aptos = new Aptos(config); - const data = await aptos.account.getModules({ + const data = await aptos.getAccountModules({ accountAddress: new Uint8Array([ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, ]), @@ -104,7 +104,7 @@ describe("account api", () => { test("it fetches account module", async () => { const config = new AptosConfig({ network: Network.LOCAL }); const aptos = new Aptos(config); - const data = await aptos.account.getModule({ + const data = await aptos.getAccountModule({ accountAddress: new Uint8Array([ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, ]), @@ -116,7 +116,7 @@ describe("account api", () => { test("it fetches account resources", async () => { const config = new AptosConfig({ network: Network.LOCAL }); const aptos = new Aptos(config); - const data = await aptos.account.getResources({ + const data = await aptos.getAccountResources({ accountAddress: new Uint8Array([ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, ]), @@ -127,7 +127,7 @@ describe("account api", () => { test("it fetches account resource", async () => { const config = new AptosConfig({ network: Network.LOCAL }); const aptos = new Aptos(config); - const data = await aptos.account.getResource({ + const data = await aptos.getAccountResource({ accountAddress: "0x1", resourceType: "0x1::account::Account", }); diff --git a/ecosystem/typescript/sdk_v2/tests/unit/deserializer.test.ts b/ecosystem/typescript/sdk_v2/tests/unit/deserializer.test.ts index b421fdf93b2d7..2a13b0379c4a0 100644 --- a/ecosystem/typescript/sdk_v2/tests/unit/deserializer.test.ts +++ b/ecosystem/typescript/sdk_v2/tests/unit/deserializer.test.ts @@ -1,8 +1,7 @@ // Copyright © Aptos Foundation // SPDX-License-Identifier: Apache-2.0 -import { Serializer } from "../../src/bcs"; -import { Deserializer } from "../../src/bcs/deserializer"; +import { Serializable, Serializer, Deserializer, Deserializable } from "../../src/bcs"; describe("BCS Deserializer", () => { it("deserializes a non-empty string", () => { @@ -130,4 +129,187 @@ describe("BCS Deserializer", () => { deserializer.deserializeStr(); }).toThrow("Reached to the end of buffer"); }); + + it("deserializes a single deserializable class", () => { + // Define the MoveStruct class that implements the Deserializable interface + class MoveStruct implements Serializable { + constructor( + public name: string, + public description: string, + public enabled: boolean, + public vectorU8: Array, + ) {} + + serialize(serializer: Serializer): void { + serializer.serializeStr(this.name); + serializer.serializeStr(this.description); + serializer.serializeBool(this.enabled); + serializer.serializeU32AsUleb128(this.vectorU8.length); + this.vectorU8.forEach((n) => serializer.serializeU8(n)); + } + + static deserialize(deserializer: Deserializer): MoveStruct { + const name = deserializer.deserializeStr(); + const description = deserializer.deserializeStr(); + const enabled = deserializer.deserializeBool(); + const length = deserializer.deserializeUleb128AsU32(); + const vectorU8 = new Array(); + for (let i = 0; i < length; i++) { + vectorU8.push(deserializer.deserializeU8()); + } + return new MoveStruct(name, description, enabled, vectorU8); + } + } + // Construct a MoveStruct + const moveStruct = new MoveStruct("abc", "123", false, [1, 2, 3, 4]); + // Serialize a MoveStruct instance. + const serializer = new Serializer(); + serializer.serialize(moveStruct); + const moveStructBcsBytes = serializer.toUint8Array(); + // Load the bytes into the Deserializer buffer + const deserializer = new Deserializer(moveStructBcsBytes); + // Deserialize the buffered bytes into an instance of MoveStruct + const deserializedMoveStruct = deserializer.deserialize(MoveStruct); + expect(deserializedMoveStruct.name).toEqual(moveStruct.name); + expect(deserializedMoveStruct.description).toEqual(moveStruct.description); + expect(deserializedMoveStruct.enabled).toEqual(moveStruct.enabled); + expect(deserializedMoveStruct.vectorU8).toEqual(moveStruct.vectorU8); + }); + + it("deserializes and composes an abstract Deserializable class instance from composed deserialize calls", () => { + abstract class MoveStruct { + abstract serialize(serializer: Serializer): void; + + static deserialize(deserializer: Deserializer): MoveStruct { + const index = deserializer.deserializeUleb128AsU32(); + switch (index) { + case 0: + return MoveStructA.load(deserializer); + case 1: + return MoveStructB.load(deserializer); + default: + throw new Error("Invalid variant index"); + } + } + } + + class MoveStructs implements Serializable { + constructor(public moveStruct1: MoveStruct, public moveStruct2: MoveStruct) {} + + serialize(serializer: Serializer): void { + serializer.serialize(this.moveStruct1); + serializer.serialize(this.moveStruct2); + } + + // deserialize two MoveStructs, potentially either MoveStructA or MoveStructB + static deserialize(deserializer: Deserializer): MoveStructs { + return new MoveStructs(MoveStruct.deserialize(deserializer), MoveStruct.deserialize(deserializer)); + } + } + + class MoveStructA implements Serializable { + constructor( + public name: string, + public description: string, + public enabled: boolean, + public vectorU8: Array, + ) {} + + serialize(serializer: Serializer): void { + // enum variant index for the abstract MoveStruct class + serializer.serializeU32AsUleb128(0); + serializer.serializeStr(this.name); + serializer.serializeStr(this.description); + serializer.serializeBool(this.enabled); + serializer.serializeU32AsUleb128(this.vectorU8.length); + this.vectorU8.forEach((n) => serializer.serializeU8(n)); + } + + static load(deserializer: Deserializer): MoveStructA { + const name = deserializer.deserializeStr(); + const description = deserializer.deserializeStr(); + const enabled = deserializer.deserializeBool(); + const length = deserializer.deserializeUleb128AsU32(); + const vectorU8 = new Array(); + for (let i = 0; i < length; i++) { + vectorU8.push(deserializer.deserializeU8()); + } + return new MoveStructA(name, description, enabled, vectorU8); + } + } + class MoveStructB implements Serializable { + constructor( + public moveStructA: MoveStructA, + public name: string, + public description: string, + public vectorU8: Array, + ) {} + + serialize(serializer: Serializer): void { + // enum variant index for the abstract MoveStruct class + serializer.serializeU32AsUleb128(1); + serializer.serialize(this.moveStructA); + serializer.serializeStr(this.name); + serializer.serializeStr(this.description); + serializer.serializeU32AsUleb128(this.vectorU8.length); + this.vectorU8.forEach((n) => serializer.serializeU8(n)); + } + + static load(deserializer: Deserializer): MoveStructB { + // note we cannot use MoveStructA.load here because we need to pop off the variant index first + const moveStructA = MoveStruct.deserialize(deserializer) as MoveStructA; + const name = deserializer.deserializeStr(); + const description = deserializer.deserializeStr(); + const length = deserializer.deserializeUleb128AsU32(); + const vectorU8 = new Array(); + for (let i = 0; i < length; i++) { + vectorU8.push(deserializer.deserializeU8()); + } + return new MoveStructB(moveStructA, name, description, vectorU8); + } + } + + // in a real e2e flow, we might get a stream of BCS-serialized bytes that we deserialize, + // say as a wallet in a dapp, we need to deserialize the payload and read its inner fields. + // The payload could be of multiple types, so we need to first deserialize the variant index + // and then deserialize the payload based on the variant index. + // + // The abstract MoveStruct class is used to demonstrate this process. + + // Construct a MoveStructA and a MoveStructB, which consists of a MoveStructA inside it + const moveStructA = new MoveStructA("abc", "123", false, [1, 2, 3, 4]); + const moveStructAInsideB = new MoveStructA("def", "456", true, [5, 6, 7, 8]); + const moveStructB = new MoveStructB(moveStructAInsideB, "ghi", "789", [9, 10, 11, 12]); + + // say for some reason we serialize two MoveStructs into a single byte array + // and we want to deserialize them back into two MoveStruct instances later + const serializer = new Serializer(); + serializer.serialize(moveStructA); + serializer.serialize(moveStructB); + const serializedBytes = serializer.toUint8Array(); + + // We receive the serializedBytes somewhere else, and + // load the bytes into the Deserializer buffer + const deserializer = new Deserializer(serializedBytes); + // we extract each one, and typecast them because we are expecting MoveStructA and then MoveStructB + const deserializedMoveStructA = deserializer.deserialize(MoveStruct) as MoveStructA; + const deserializedMoveStructB = deserializer.deserialize(MoveStruct) as MoveStructB; + + // This is the MoveStructA by itself + expect(deserializedMoveStructA.name).toEqual("abc"); + expect(deserializedMoveStructA.description).toEqual("123"); + expect(deserializedMoveStructA.enabled).toEqual(false); + expect(deserializedMoveStructA.vectorU8).toEqual([1, 2, 3, 4]); + + // This is the MoveStructB by itself + // Which consists of a MoveStructA and some other fields + expect(deserializedMoveStructB.moveStructA.name).toEqual("def"); + expect(deserializedMoveStructB.moveStructA.description).toEqual("456"); + expect(deserializedMoveStructB.moveStructA.enabled).toEqual(true); + expect(deserializedMoveStructB.moveStructA.vectorU8).toEqual([5, 6, 7, 8]); + + expect(deserializedMoveStructB.name).toEqual("ghi"); + expect(deserializedMoveStructB.description).toEqual("789"); + expect(deserializedMoveStructB.vectorU8).toEqual([9, 10, 11, 12]); + }); }); diff --git a/ecosystem/typescript/sdk_v2/tests/unit/serializer.test.ts b/ecosystem/typescript/sdk_v2/tests/unit/serializer.test.ts index 3277128def6fc..b8b1b0793e181 100644 --- a/ecosystem/typescript/sdk_v2/tests/unit/serializer.test.ts +++ b/ecosystem/typescript/sdk_v2/tests/unit/serializer.test.ts @@ -223,4 +223,53 @@ describe("BCS Serializer", () => { const serializer = new Serializer(-1); }).toThrow(); }); + + it("serializes multiple Serializable values", () => { + class MoveStructA implements Serializable { + constructor( + public name: string, + public description: string, + public enabled: boolean, + public vectorU8: Array, + ) {} + + serialize(serializer: Serializer): void { + serializer.serializeStr(this.name); + serializer.serializeStr(this.description); + serializer.serializeBool(this.enabled); + serializer.serializeU32AsUleb128(this.vectorU8.length); + this.vectorU8.forEach((n) => serializer.serializeU8(n)); + } + } + class MoveStructB implements Serializable { + constructor( + public moveStructA: MoveStructA, + public name: string, + public description: string, + public vectorU8: Array, + ) {} + + serialize(serializer: Serializer): void { + serializer.serialize(this.moveStructA); + serializer.serializeStr(this.name); + serializer.serializeStr(this.description); + serializer.serializeU32AsUleb128(this.vectorU8.length); + this.vectorU8.forEach((n) => serializer.serializeU8(n)); + } + } + + const moveStructA = new MoveStructA("abc", "123", false, [1, 2, 3, 4]); + const moveStructB = new MoveStructB(moveStructA, "def", "456", [5, 6, 7, 8]); + + const serializer = new Serializer(); + serializer.serialize(moveStructB); + const serializedBytes = serializer.toUint8Array(); + + expect(serializedBytes).toEqual( + new Uint8Array([ + 3, 0x61, 0x62, 0x63, 3, 0x31, 0x32, 0x33, 0x00, 4, 0x01, 0x02, 0x03, 0x04, 3, 0x64, 0x65, 0x66, 3, 0x34, 0x35, + 0x36, 4, 0x05, 0x06, 0x07, 0x08, + ]), + ); + }); }); diff --git a/execution/executor-benchmark/src/db_access.rs b/execution/executor-benchmark/src/db_access.rs index 1ea3fefd316fe..4b0e7ceb08907 100644 --- a/execution/executor-benchmark/src/db_access.rs +++ b/execution/executor-benchmark/src/db_access.rs @@ -6,29 +6,15 @@ use aptos_state_view::StateView; use aptos_storage_interface::state_view::DbStateView; use aptos_types::{ access_path::AccessPath, account_address::AccountAddress, state_store::state_key::StateKey, + write_set::TOTAL_SUPPLY_STATE_KEY, }; use move_core_types::{ identifier::Identifier, language_storage::{StructTag, TypeTag}, }; -use once_cell::sync::Lazy; use serde::{de::DeserializeOwned, Deserialize, Serialize}; use std::str::FromStr; -// Note: in case this changes in the future, it doesn't have to be a constant, and can be read from -// genesis directly if necessary. -pub static TOTAL_SUPPLY_STATE_KEY: Lazy = Lazy::new(|| { - StateKey::table_item( - "1b854694ae746cdbd8d44186ca4929b2b337df21d1c74633be19b2710552fdca" - .parse() - .unwrap(), - vec![ - 6, 25, 220, 41, 160, 170, 200, 250, 20, 103, 20, 5, 142, 141, 214, 210, 208, 243, 189, - 245, 246, 51, 25, 7, 191, 145, 243, 172, 216, 30, 105, 53, - ], - ) -}); - #[derive(Debug, Default, Deserialize, Serialize)] pub struct CoinStore { pub coin: u64, @@ -138,7 +124,7 @@ impl DbAccessUtil { ) -> Result> { let value = state_view .get_state_value_bytes(state_key)? - .map(move |value| bcs::from_bytes(value.as_slice())); + .map(move |value| bcs::from_bytes(&value)); value.transpose().map_err(anyhow::Error::msg) } diff --git a/execution/executor-benchmark/src/native_executor.rs b/execution/executor-benchmark/src/native_executor.rs index 4726970386e05..824182c12eb75 100644 --- a/execution/executor-benchmark/src/native_executor.rs +++ b/execution/executor-benchmark/src/native_executor.rs @@ -117,11 +117,11 @@ impl NativeExecutor { let write_set = vec![ ( sender_account_key, - WriteOp::Modification(bcs::to_bytes(&sender_account)?), + WriteOp::Modification(bcs::to_bytes(&sender_account)?.into()), ), ( sender_coin_store_key, - WriteOp::Modification(bcs::to_bytes(&sender_coin_store)?), + WriteOp::Modification(bcs::to_bytes(&sender_coin_store)?.into()), ), // ( // TOTAL_SUPPLY_STATE_KEY.clone(), @@ -179,7 +179,7 @@ impl NativeExecutor { write_set.push(( recipient_coin_store_key, - WriteOp::Modification(bcs::to_bytes(&recipient_coin_store)?), + WriteOp::Modification(bcs::to_bytes(&recipient_coin_store)?.into()), )); } } else { @@ -215,11 +215,11 @@ impl NativeExecutor { write_set.push(( recipient_account_key, - WriteOp::Creation(bcs::to_bytes(&recipient_account)?), + WriteOp::Creation(bcs::to_bytes(&recipient_account)?.into()), )); write_set.push(( recipient_coin_store_key, - WriteOp::Creation(bcs::to_bytes(&recipient_coin_store)?), + WriteOp::Creation(bcs::to_bytes(&recipient_coin_store)?.into()), )); } diff --git a/execution/executor-service/Cargo.toml b/execution/executor-service/Cargo.toml index b00234d9621d5..90872f811f28a 100644 --- a/execution/executor-service/Cargo.toml +++ b/execution/executor-service/Cargo.toml @@ -25,10 +25,11 @@ aptos-secure-net = { workspace = true } aptos-state-view = { workspace = true } aptos-storage-interface = { workspace = true } aptos-types = { workspace = true } -aptos-vm = { workspace = true, features = ["testing"] } +aptos-vm = { workspace = true } bcs = { workspace = true } clap = { workspace = true } crossbeam-channel = { workspace = true } +dashmap = { workspace = true } itertools = { workspace = true } num_cpus = { workspace = true } rand = { workspace = true } diff --git a/execution/executor-service/src/lib.rs b/execution/executor-service/src/lib.rs index 031f7ab265efe..ac659215dc6dc 100644 --- a/execution/executor-service/src/lib.rs +++ b/execution/executor-service/src/lib.rs @@ -1,8 +1,8 @@ // Copyright © Aptos Foundation // SPDX-License-Identifier: Apache-2.0 -use aptos_state_view::in_memory_state_view::InMemoryStateView; use aptos_types::{ - block_executor::partitioner::SubBlocksForShard, + block_executor::partitioner::{ShardId, SubBlocksForShard}, + state_store::{state_key::StateKey, state_value::StateValue}, transaction::{analyzed_transaction::AnalyzedTransaction, TransactionOutput}, vm_status::VMStatus, }; @@ -14,6 +14,8 @@ mod remote_cordinator_client; mod remote_cross_shard_client; mod remote_executor_client; pub mod remote_executor_service; +mod remote_state_view; +mod remote_state_view_service; #[cfg(test)] mod test_utils; #[cfg(test)] @@ -40,30 +42,43 @@ pub enum RemoteExecutionRequest { #[derive(Clone, Debug, Deserialize, Serialize)] pub struct ExecuteBlockCommand { pub(crate) sub_blocks: SubBlocksForShard, - // Currently we only support the state view backed by in-memory hashmap, which means that - // the controller needs to pre-read all the KV pairs from the storage and pass them to the - // executor service. In the future, we will support other types of state view, e.g., the - // state view backed by remote storage service, which will allow the executor service to read the KV pairs - // directly from the storage. - pub(crate) state_view: InMemoryStateView, pub(crate) concurrency_level: usize, pub(crate) maybe_block_gas_limit: Option, } impl ExecuteBlockCommand { - pub fn into( - self, - ) -> ( - SubBlocksForShard, - InMemoryStateView, - usize, - Option, - ) { + pub fn into(self) -> (SubBlocksForShard, usize, Option) { ( self.sub_blocks, - self.state_view, self.concurrency_level, self.maybe_block_gas_limit, ) } } + +#[derive(Clone, Debug, Deserialize, Serialize)] +pub struct RemoteKVRequest { + pub(crate) shard_id: ShardId, + pub(crate) keys: Vec, +} + +impl RemoteKVRequest { + pub fn new(shard_id: ShardId, keys: Vec) -> Self { + Self { shard_id, keys } + } + + pub fn into(self) -> (ShardId, Vec) { + (self.shard_id, self.keys) + } +} + +#[derive(Clone, Debug, Deserialize, Serialize)] +pub struct RemoteKVResponse { + pub(crate) inner: Vec<(StateKey, Option)>, +} + +impl RemoteKVResponse { + pub fn new(inner: Vec<(StateKey, Option)>) -> Self { + Self { inner } + } +} diff --git a/execution/executor-service/src/remote_cordinator_client.rs b/execution/executor-service/src/remote_cordinator_client.rs index 8b7e9a4045b46..4dc1311dffcf1 100644 --- a/execution/executor-service/src/remote_cordinator_client.rs +++ b/execution/executor-service/src/remote_cordinator_client.rs @@ -1,18 +1,23 @@ // Copyright © Aptos Foundation // SPDX-License-Identifier: Apache-2.0 -use crate::{RemoteExecutionRequest, RemoteExecutionResult}; +use crate::{ + remote_state_view::RemoteStateViewClient, ExecuteBlockCommand, RemoteExecutionRequest, + RemoteExecutionResult, +}; use aptos_secure_net::network_controller::{Message, NetworkController}; -use aptos_state_view::in_memory_state_view::InMemoryStateView; use aptos_types::{ - block_executor::partitioner::ShardId, transaction::TransactionOutput, vm_status::VMStatus, + block_executor::partitioner::ShardId, state_store::state_key::StateKey, + transaction::TransactionOutput, vm_status::VMStatus, }; use aptos_vm::sharded_block_executor::{ coordinator_client::CoordinatorClient, ExecutorShardCommand, }; use crossbeam_channel::{Receiver, Sender}; +use rayon::prelude::*; use std::{net::SocketAddr, sync::Arc}; pub struct RemoteCoordinatorClient { + state_view_client: Arc, command_rx: Receiver, result_tx: Sender, } @@ -29,22 +34,57 @@ impl RemoteCoordinatorClient { let result_tx = controller.create_outbound_channel(coordinator_address, execute_result_type); + let state_view_client = + RemoteStateViewClient::new(shard_id, controller, coordinator_address); + Self { + state_view_client: Arc::new(state_view_client), command_rx, result_tx, } } + + // Extract all the state keys from the execute block command. It is possible that there are duplicate state keys. + // We are not de-duplicating them here to avoid the overhead of deduplication. The state view server will deduplicate + // the state keys. + fn extract_state_keys(command: &ExecuteBlockCommand) -> Vec { + command + .sub_blocks + .sub_block_iter() + .flat_map(|sub_block| { + sub_block + .transactions + .par_iter() + .map(|txn| { + let mut state_keys = vec![]; + for storage_location in txn + .txn() + .read_hints() + .iter() + .chain(txn.txn().write_hints().iter()) + { + state_keys.push(storage_location.state_key().clone()); + } + state_keys + }) + .flatten() + .collect::>() + }) + .collect::>() + } } -impl CoordinatorClient for RemoteCoordinatorClient { - fn receive_execute_command(&self) -> ExecutorShardCommand { +impl CoordinatorClient for RemoteCoordinatorClient { + fn receive_execute_command(&self) -> ExecutorShardCommand { let message = self.command_rx.recv().unwrap(); let request: RemoteExecutionRequest = bcs::from_bytes(&message.data).unwrap(); match request { RemoteExecutionRequest::ExecuteBlock(command) => { - let (sub_blocks, state_view, concurrency, gas_limit) = command.into(); + let state_keys = Self::extract_state_keys(&command); + self.state_view_client.init_for_block(state_keys); + let (sub_blocks, concurrency, gas_limit) = command.into(); ExecutorShardCommand::ExecuteSubBlocks( - Arc::new(state_view), + self.state_view_client.clone(), sub_blocks, concurrency, gas_limit, diff --git a/execution/executor-service/src/remote_executor_client.rs b/execution/executor-service/src/remote_executor_client.rs index 143f10960f4cb..72daa68c7568e 100644 --- a/execution/executor-service/src/remote_executor_client.rs +++ b/execution/executor-service/src/remote_executor_client.rs @@ -1,7 +1,10 @@ // Copyright © Aptos Foundation // Copyright © Aptos Foundation // SPDX-License-Identifier: Apache-2.0 -use crate::{ExecuteBlockCommand, RemoteExecutionRequest, RemoteExecutionResult}; +use crate::{ + remote_state_view_service::RemoteStateViewService, ExecuteBlockCommand, RemoteExecutionRequest, + RemoteExecutionResult, +}; use aptos_logger::trace; use aptos_secure_net::network_controller::{Message, NetworkController}; use aptos_state_view::StateView; @@ -13,12 +16,13 @@ use aptos_vm::sharded_block_executor::executor_client::{ExecutorClient, ShardedE use crossbeam_channel::{Receiver, Sender}; use std::{ net::SocketAddr, - ops::Deref, sync::{Arc, Mutex}, + thread, }; #[allow(dead_code)] pub struct RemoteExecutorClient { + state_view_service: Arc>, // Channels to send execute block commands to the executor shards. command_txs: Arc>>>, // Channels to receive execution results from the executor shards. @@ -27,6 +31,7 @@ pub struct RemoteExecutorClient { thread_pool: Arc, phantom: std::marker::PhantomData, + _join_handle: Option>, } #[allow(dead_code)] @@ -55,7 +60,23 @@ impl RemoteExecutorClient { (command_tx, result_rx) }) .unzip(); + + let state_view_service = Arc::new(RemoteStateViewService::new( + controller, + remote_shard_addresses, + None, + )); + + let state_view_service_clone = state_view_service.clone(); + + let join_handle = thread::Builder::new() + .name("remote-state_view-service".to_string()) + .spawn(move || state_view_service_clone.start()) + .unwrap(); + Self { + state_view_service, + _join_handle: Some(join_handle), command_txs: Arc::new(command_txs), result_rxs, thread_pool, @@ -87,34 +108,26 @@ impl ExecutorClient for RemoteExecutorC concurrency_level_per_shard: usize, maybe_block_gas_limit: Option, ) -> Result { - self.thread_pool.scope(|s| { - let (block, global_txns) = transactions.into(); - assert!( - global_txns.is_empty(), - "Global transactions are not supported yet in remote execution mode." - ); - for (shard_id, sub_blocks) in block.into_iter().enumerate() { - let state_view = state_view.clone(); - let senders = self.command_txs.clone(); - s.spawn(move |_| { - let execution_request = - RemoteExecutionRequest::ExecuteBlock(ExecuteBlockCommand { - sub_blocks, - // TODO(skedia): Instead of serializing the entire state view, we should - // serialize only the state values needed for the shard. - state_view: S::as_in_memory_state_view(state_view.deref()), - concurrency_level: concurrency_level_per_shard, - maybe_block_gas_limit, - }); + trace!("RemoteExecutorClient Sending block to shards"); + self.state_view_service.set_state_view(state_view); + let (sub_blocks, global_txns) = transactions.into(); + if !global_txns.is_empty() { + panic!("Global transactions are not supported yet"); + } + for (shard_id, sub_blocks) in sub_blocks.into_iter().enumerate() { + let senders = self.command_txs.clone(); + let execution_request = RemoteExecutionRequest::ExecuteBlock(ExecuteBlockCommand { + sub_blocks, + concurrency_level: concurrency_level_per_shard, + maybe_block_gas_limit, + }); - senders[shard_id] - .lock() - .unwrap() - .send(Message::new(bcs::to_bytes(&execution_request).unwrap())) - .unwrap() - }); - } - }); + senders[shard_id] + .lock() + .unwrap() + .send(Message::new(bcs::to_bytes(&execution_request).unwrap())) + .unwrap(); + } let execution_results = self.get_output_from_shards()?; diff --git a/execution/executor-service/src/remote_executor_service.rs b/execution/executor-service/src/remote_executor_service.rs index 781defb0996fb..0b9dd8f539bc3 100644 --- a/execution/executor-service/src/remote_executor_service.rs +++ b/execution/executor-service/src/remote_executor_service.rs @@ -3,10 +3,9 @@ use crate::{ remote_cordinator_client::RemoteCoordinatorClient, - remote_cross_shard_client::RemoteCrossShardClient, + remote_cross_shard_client::RemoteCrossShardClient, remote_state_view::RemoteStateViewClient, }; use aptos_secure_net::network_controller::NetworkController; -use aptos_state_view::in_memory_state_view::InMemoryStateView; use aptos_types::block_executor::partitioner::ShardId; use aptos_vm::sharded_block_executor::sharded_executor_service::ShardedExecutorService; use std::{net::SocketAddr, sync::Arc}; @@ -15,7 +14,7 @@ use std::{net::SocketAddr, sync::Arc}; /// the remote executor client and executes the block locally and returns the result. pub struct ExecutorService { controller: NetworkController, - executor_service: Arc>, + executor_service: Arc>, } impl ExecutorService { diff --git a/execution/executor-service/src/remote_state_view.rs b/execution/executor-service/src/remote_state_view.rs new file mode 100644 index 0000000000000..e6d6aaf64db93 --- /dev/null +++ b/execution/executor-service/src/remote_state_view.rs @@ -0,0 +1,218 @@ +// Copyright © Aptos Foundation +// SPDX-License-Identifier: Apache-2.0 +use crate::{RemoteKVRequest, RemoteKVResponse}; +use aptos_secure_net::network_controller::{Message, NetworkController}; +use aptos_types::state_store::state_key::StateKey; +use aptos_vm::sharded_block_executor::remote_state_value::RemoteStateValue; +use crossbeam_channel::{Receiver, Sender}; +use std::{ + net::SocketAddr, + sync::{Arc, RwLock}, + thread, +}; + +extern crate itertools; +use anyhow::Result; +use aptos_logger::trace; +use aptos_state_view::TStateView; +use aptos_types::{ + block_executor::partitioner::ShardId, + state_store::{state_storage_usage::StateStorageUsage, state_value::StateValue}, +}; +use dashmap::DashMap; + +pub static REMOTE_STATE_KEY_BATCH_SIZE: usize = 50000; + +pub struct RemoteStateView { + state_values: DashMap, +} + +impl RemoteStateView { + pub fn new() -> Self { + Self { + state_values: DashMap::new(), + } + } + + pub fn has_state_key(&self, state_key: &StateKey) -> bool { + self.state_values.contains_key(state_key) + } + + pub fn set_state_value(&self, state_key: &StateKey, state_value: Option) { + self.state_values + .get(state_key) + .unwrap() + .set_value(state_value); + } + + pub fn insert_state_key(&self, state_key: StateKey) { + self.state_values + .entry(state_key) + .or_insert(RemoteStateValue::waiting()); + } + + pub fn get_state_value(&self, state_key: &StateKey) -> Result> { + if let Some(value) = self.state_values.get(state_key) { + let value_clone = value.clone(); + // It is possible that the value is not ready yet and the get_value call blocks. In that + // case we explicitly drop the value to relinquish the read lock on the value. Cloning the + // value should be in expensive as this is just cloning the underlying Arc. + drop(value); + return Ok(value_clone.get_value()); + } + Ok(None) + } +} + +pub struct RemoteStateViewClient { + shard_id: ShardId, + kv_tx: Arc>, + state_view: Arc>, + thread_pool: Arc, + _join_handle: Option>, +} + +impl RemoteStateViewClient { + pub fn new( + shard_id: ShardId, + controller: &mut NetworkController, + coordinator_address: SocketAddr, + ) -> Self { + let thread_pool = Arc::new( + rayon::ThreadPoolBuilder::new() + .thread_name(move |index| format!("remote-state-view-shard-{}-{}", shard_id, index)) + .num_threads(num_cpus::get()) + .build() + .unwrap(), + ); + let kv_request_type = "remote_kv_request"; + let kv_response_type = "remote_kv_response"; + let result_rx = controller.create_inbound_channel(kv_response_type.to_string()); + let command_tx = + controller.create_outbound_channel(coordinator_address, kv_request_type.to_string()); + let state_view = Arc::new(RwLock::new(RemoteStateView::new())); + let state_value_receiver = RemoteStateValueReceiver::new( + shard_id, + state_view.clone(), + result_rx, + thread_pool.clone(), + ); + + let join_handle = thread::Builder::new() + .name(format!("remote-kv-receiver-{}", shard_id)) + .spawn(move || state_value_receiver.start()) + .unwrap(); + + Self { + shard_id, + kv_tx: Arc::new(command_tx), + state_view, + thread_pool, + _join_handle: Some(join_handle), + } + } + + pub fn init_for_block(&self, state_keys: Vec) { + *self.state_view.write().unwrap() = RemoteStateView::new(); + self.pre_fetch_state_values(state_keys); + } + + fn pre_fetch_state_values(&self, state_keys: Vec) { + state_keys + .chunks(REMOTE_STATE_KEY_BATCH_SIZE) + .map(|state_keys_chunk| state_keys_chunk.to_vec()) + .for_each(|state_keys| { + let sender = self.kv_tx.clone(); + let shard_id = self.shard_id; + self.thread_pool.spawn(move || { + Self::send_state_value_request(shard_id, sender, state_keys); + }); + }); + state_keys.into_iter().for_each(|state_key| { + self.state_view.read().unwrap().insert_state_key(state_key); + }); + } + + fn send_state_value_request( + shard_id: ShardId, + sender: Arc>, + state_keys: Vec, + ) { + let request = RemoteKVRequest::new(shard_id, state_keys); + let request_message = bcs::to_bytes(&request).unwrap(); + sender.send(Message::new(request_message)).unwrap(); + } +} + +impl TStateView for RemoteStateViewClient { + type Key = StateKey; + + fn get_state_value(&self, state_key: &StateKey) -> Result> { + let state_view_reader = self.state_view.read().unwrap(); + if state_view_reader.has_state_key(state_key) { + // If the key is already in the cache then we return it. + return state_view_reader.get_state_value(state_key); + } + // If the value is not already in the cache then we pre-fetch it and wait for it to arrive. + self.pre_fetch_state_values(vec![state_key.clone()]); + state_view_reader.get_state_value(state_key) + } + + fn get_usage(&self) -> Result { + unimplemented!("get_usage is not implemented for RemoteStateView") + } +} + +struct RemoteStateValueReceiver { + shard_id: ShardId, + state_view: Arc>, + kv_rx: Receiver, + thread_pool: Arc, +} + +impl RemoteStateValueReceiver { + fn new( + shard_id: ShardId, + state_view: Arc>, + kv_rx: Receiver, + thread_pool: Arc, + ) -> Self { + Self { + shard_id, + state_view, + kv_rx, + thread_pool, + } + } + + fn start(&self) { + loop { + let message = self.kv_rx.recv().unwrap(); + let state_view = self.state_view.clone(); + let shard_id = self.shard_id; + self.thread_pool.spawn(move || { + Self::handle_message(shard_id, message, state_view); + }); + } + } + + fn handle_message( + shard_id: ShardId, + message: Message, + state_view: Arc>, + ) { + let response: RemoteKVResponse = bcs::from_bytes(&message.data).unwrap(); + let state_view_lock = state_view.read().unwrap(); + trace!( + "Received state values for shard {} with size {}", + shard_id, + response.inner.len() + ); + response + .inner + .into_iter() + .for_each(|(state_key, state_value)| { + state_view_lock.set_state_value(&state_key, state_value); + }); + } +} diff --git a/execution/executor-service/src/remote_state_view_service.rs b/execution/executor-service/src/remote_state_view_service.rs new file mode 100644 index 0000000000000..8f155ab3064b1 --- /dev/null +++ b/execution/executor-service/src/remote_state_view_service.rs @@ -0,0 +1,105 @@ +// Copyright © Aptos Foundation +// SPDX-License-Identifier: Apache-2.0 +use crate::{RemoteKVRequest, RemoteKVResponse}; +use aptos_secure_net::network_controller::{Message, NetworkController}; +use crossbeam_channel::{Receiver, Sender}; +use std::{ + net::SocketAddr, + sync::{Arc, RwLock}, +}; + +extern crate itertools; +use aptos_logger::trace; +use aptos_state_view::{StateView, TStateView}; +use itertools::Itertools; + +pub struct RemoteStateViewService { + kv_rx: Receiver, + kv_tx: Arc>>, + thread_pool: Arc, + state_view: Arc>>>, +} + +impl RemoteStateViewService { + pub fn new( + controller: &mut NetworkController, + remote_shard_addresses: Vec, + num_threads: Option, + ) -> Self { + let num_threads = num_threads.unwrap_or_else(num_cpus::get); + let thread_pool = Arc::new( + rayon::ThreadPoolBuilder::new() + .num_threads(num_threads) + .build() + .unwrap(), + ); + let kv_request_type = "remote_kv_request"; + let kv_response_type = "remote_kv_response"; + let result_rx = controller.create_inbound_channel(kv_request_type.to_string()); + let command_txs = remote_shard_addresses + .iter() + .map(|address| { + controller.create_outbound_channel(*address, kv_response_type.to_string()) + }) + .collect_vec(); + Self { + kv_rx: result_rx, + kv_tx: Arc::new(command_txs), + thread_pool, + state_view: Arc::new(RwLock::new(None)), + } + } + + pub fn set_state_view(&self, state_view: Arc) { + let mut state_view_lock = self.state_view.write().unwrap(); + *state_view_lock = Some(state_view); + } + + pub fn start(&self) { + loop { + let message = self.kv_rx.recv().unwrap(); + let state_view = self.state_view.clone(); + let kv_txs = self.kv_tx.clone(); + self.thread_pool.spawn(move || { + Self::handle_message(message, state_view, kv_txs); + }); + } + } + + pub fn handle_message( + message: Message, + state_view: Arc>>>, + kv_tx: Arc>>, + ) { + let req: RemoteKVRequest = bcs::from_bytes(&message.data).unwrap(); + let (shard_id, state_keys) = req.into(); + trace!( + "remote state view service - received request for shard {} with {} keys", + shard_id, + state_keys.len() + ); + let resp = state_keys + .into_iter() + .map(|state_key| { + let state_value = state_view + .read() + .unwrap() + .as_ref() + .unwrap() + .get_state_value(&state_key) + .unwrap(); + (state_key, state_value) + }) + .collect_vec(); + let len = resp.len(); + let resp = RemoteKVResponse::new(resp); + let resp = bcs::to_bytes(&resp).unwrap(); + trace!( + "remote state view service - sending response for shard {} with {} keys", + shard_id, + len + ); + let message = Message::new(resp); + kv_tx[shard_id].send(message).unwrap(); + } +} diff --git a/execution/executor-service/src/test_utils.rs b/execution/executor-service/src/test_utils.rs index 47e4c3c40fdde..377b9c90fc3ca 100644 --- a/execution/executor-service/src/test_utils.rs +++ b/execution/executor-service/src/test_utils.rs @@ -95,7 +95,7 @@ pub fn compare_txn_outputs( pub fn test_sharded_block_executor_no_conflict>( sharded_block_executor: ShardedBlockExecutor, ) { - let num_txns = 400; + let num_txns = 10; let num_shards = sharded_block_executor.num_shards(); let mut executor = FakeExecutor::from_head_genesis(); let mut transactions = Vec::new(); diff --git a/execution/executor-service/src/tests.rs b/execution/executor-service/src/tests.rs index 1542cfc036d9f..861604c3fafca 100644 --- a/execution/executor-service/src/tests.rs +++ b/execution/executor-service/src/tests.rs @@ -55,6 +55,7 @@ pub fn create_thread_remote_executor_shards( } #[test] +#[ignore] fn test_sharded_block_executor_no_conflict() { let num_shards = 8; let (mut controller, executor_client, _executor_services) = diff --git a/execution/executor-test-helpers/src/integration_test_impl.rs b/execution/executor-test-helpers/src/integration_test_impl.rs index 3d84a2e6521b3..5844062ac6ffe 100644 --- a/execution/executor-test-helpers/src/integration_test_impl.rs +++ b/execution/executor-test-helpers/src/integration_test_impl.rs @@ -5,6 +5,7 @@ use crate::{bootstrap_genesis, gen_block_id, gen_ledger_info_with_sigs}; use anyhow::{anyhow, ensure, Result}; use aptos_cached_packages::aptos_stdlib; +use aptos_config::config::DEFAULT_MAX_NUM_NODES_PER_LRU_CACHE_SHARD; use aptos_consensus_types::block::Block; use aptos_db::AptosDB; use aptos_executor::block_executor::BlockExecutor; @@ -542,7 +543,12 @@ pub fn create_db_and_executor>( Waypoint, ) { let (db, dbrw) = force_sharding - .then(|| DbReaderWriter::wrap(AptosDB::new_for_test_with_sharding(&path))) + .then(|| { + DbReaderWriter::wrap(AptosDB::new_for_test_with_sharding( + &path, + DEFAULT_MAX_NUM_NODES_PER_LRU_CACHE_SHARD, + )) + }) .unwrap_or_else(|| DbReaderWriter::wrap(AptosDB::new_for_test(&path))); let waypoint = bootstrap_genesis::(&dbrw, genesis).unwrap(); let executor = BlockExecutor::new(dbrw.clone()); diff --git a/execution/executor-types/src/error.rs b/execution/executor-types/src/error.rs index 96e2f8ca2227f..c1ad0d781fd84 100644 --- a/execution/executor-types/src/error.rs +++ b/execution/executor-types/src/error.rs @@ -9,7 +9,7 @@ use thiserror::Error; #[derive(Debug, Deserialize, Error, PartialEq, Eq, Serialize)] /// Different reasons for proposal rejection -pub enum Error { +pub enum ExecutorError { #[error("Cannot find speculation result for block id {0}")] BlockNotFound(HashValue), @@ -41,7 +41,7 @@ pub enum Error { CouldNotGetData, } -impl From for Error { +impl From for ExecutorError { fn from(error: anyhow::Error) -> Self { Self::InternalError { error: format!("{}", error), @@ -49,16 +49,18 @@ impl From for Error { } } -impl From for Error { +impl From for ExecutorError { fn from(error: bcs::Error) -> Self { Self::SerializationError(format!("{}", error)) } } -impl From for Error { +impl From for ExecutorError { fn from(error: aptos_secure_net::Error) -> Self { Self::InternalError { error: format!("{}", error), } } } + +pub type ExecutorResult = Result; diff --git a/execution/executor-types/src/lib.rs b/execution/executor-types/src/lib.rs index 0a2e8630725fb..6ae206d04edd9 100644 --- a/execution/executor-types/src/lib.rs +++ b/execution/executor-types/src/lib.rs @@ -23,7 +23,7 @@ use aptos_types::{ }, write_set::WriteSet, }; -pub use error::Error; +pub use error::{ExecutorError, ExecutorResult}; pub use executed_chunk::ExecutedChunk; pub use ledger_update_output::LedgerUpdateOutput; pub use parsed_transaction_output::ParsedTransactionOutput; @@ -97,7 +97,7 @@ pub trait BlockExecutorTrait: Send + Sync { block: ExecutableBlock, parent_block_id: HashValue, maybe_block_gas_limit: Option, - ) -> Result { + ) -> ExecutorResult { let block_id = block.block_id; let state_checkpoint_output = self.execute_and_state_checkpoint(block, parent_block_id, maybe_block_gas_limit)?; @@ -110,14 +110,14 @@ pub trait BlockExecutorTrait: Send + Sync { block: ExecutableBlock, parent_block_id: HashValue, maybe_block_gas_limit: Option, - ) -> Result; + ) -> ExecutorResult; fn ledger_update( &self, block_id: HashValue, parent_block_id: HashValue, state_checkpoint_output: StateCheckpointOutput, - ) -> Result; + ) -> ExecutorResult; /// Saves eligible blocks to persistent storage. /// If we have multiple blocks and not all of them have signatures, we may send them to storage @@ -133,13 +133,13 @@ pub trait BlockExecutorTrait: Send + Sync { block_ids: Vec, ledger_info_with_sigs: LedgerInfoWithSignatures, save_state_snapshots: bool, - ) -> Result<(), Error>; + ) -> ExecutorResult<()>; fn commit_blocks( &self, block_ids: Vec, ledger_info_with_sigs: LedgerInfoWithSignatures, - ) -> Result<(), Error> { + ) -> ExecutorResult<()> { self.commit_blocks_ext( block_ids, ledger_info_with_sigs, diff --git a/execution/executor/Cargo.toml b/execution/executor/Cargo.toml index 8e9981c4a5ffe..463cd91099a80 100644 --- a/execution/executor/Cargo.toml +++ b/execution/executor/Cargo.toml @@ -30,6 +30,7 @@ aptos-types = { workspace = true } aptos-vm = { workspace = true } arr_macro = { workspace = true } bcs = { workspace = true } +bytes = { workspace = true } dashmap = { workspace = true } fail = { workspace = true } itertools = { workspace = true } diff --git a/execution/executor/src/block_executor.rs b/execution/executor/src/block_executor.rs index f96d3274fbe7d..e9b84198c731f 100644 --- a/execution/executor/src/block_executor.rs +++ b/execution/executor/src/block_executor.rs @@ -20,7 +20,7 @@ use anyhow::Result; use aptos_crypto::HashValue; use aptos_executor_types::{ execution_output::ExecutionOutput, state_checkpoint_output::StateCheckpointOutput, - BlockExecutorTrait, Error, StateComputeResult, + BlockExecutorTrait, ExecutorError, ExecutorResult, StateComputeResult, }; use aptos_experimental_runtimes::thread_manager::THREAD_MANAGER; use aptos_infallible::RwLock; @@ -116,7 +116,7 @@ where block: ExecutableBlock, parent_block_id: HashValue, maybe_block_gas_limit: Option, - ) -> Result { + ) -> ExecutorResult { self.maybe_initialize()?; self.inner .read() @@ -130,7 +130,7 @@ where block_id: HashValue, parent_block_id: HashValue, state_checkpoint_output: StateCheckpointOutput, - ) -> Result { + ) -> ExecutorResult { self.maybe_initialize()?; self.inner .read() @@ -144,7 +144,7 @@ where block_ids: Vec, ledger_info_with_sigs: LedgerInfoWithSignatures, save_state_snapshots: bool, - ) -> Result<(), Error> { + ) -> ExecutorResult<()> { self.inner .read() .as_ref() @@ -194,7 +194,7 @@ where block: ExecutableBlock, parent_block_id: HashValue, maybe_block_gas_limit: Option, - ) -> Result { + ) -> ExecutorResult { let _timer = APTOS_EXECUTOR_EXECUTE_BLOCK_SECONDS.start_timer(); let ExecutableBlock { block_id, @@ -206,7 +206,7 @@ where let parent_block = block_vec .pop() .expect("Must exist.") - .ok_or(Error::BlockNotFound(parent_block_id))?; + .ok_or(ExecutorError::BlockNotFound(parent_block_id))?; let parent_output = &parent_block.output; info!( LogSchema::new(LogEntry::BlockExecutor).block_id(block_id), @@ -242,7 +242,7 @@ where let chunk_output = { let _timer = APTOS_EXECUTOR_VM_EXECUTE_BLOCK_SECONDS.start_timer(); fail_point!("executor::vm_execute_block", |_| { - Err(Error::from(anyhow::anyhow!( + Err(ExecutorError::from(anyhow::anyhow!( "Injected error in vm_execute_block" ))) }); @@ -274,8 +274,12 @@ where block_id: HashValue, parent_block_id: HashValue, state_checkpoint_output: StateCheckpointOutput, - ) -> Result { + ) -> ExecutorResult { let _timer = APTOS_EXECUTOR_LEDGER_UPDATE_SECONDS.start_timer(); + info!( + LogSchema::new(LogEntry::BlockExecutor).block_id(block_id), + "ledger_update" + ); let committed_block_id = self.committed_block_id(); let mut block_vec = self .block_tree @@ -283,7 +287,7 @@ where let parent_block = block_vec .pop() .expect("Must exist.") - .ok_or(Error::BlockNotFound(parent_block_id))?; + .ok_or(ExecutorError::BlockNotFound(parent_block_id))?; // At this point of time two things must happen // 1. The block tree must also have the current block id with or without the ledger update output. // 2. We must have the ledger update output of the parent block. @@ -338,7 +342,7 @@ where block_ids: Vec, ledger_info_with_sigs: LedgerInfoWithSignatures, sync_commit: bool, - ) -> Result<(), Error> { + ) -> ExecutorResult<()> { let _timer = APTOS_EXECUTOR_COMMIT_BLOCKS_SECONDS.start_timer(); // Ensure the block ids are not empty @@ -383,7 +387,7 @@ where .sum(); let target_version = ledger_info_with_sigs.ledger_info().version(); if first_version + to_commit as u64 != target_version + 1 { - return Err(Error::BadNumTxnsToCommit { + return Err(ExecutorError::BadNumTxnsToCommit { first_version, to_commit, target_version, diff --git a/execution/executor/src/components/block_tree/mod.rs b/execution/executor/src/components/block_tree/mod.rs index 6ab04a53aecfe..4f29e6d7dcc9d 100644 --- a/execution/executor/src/components/block_tree/mod.rs +++ b/execution/executor/src/components/block_tree/mod.rs @@ -14,7 +14,7 @@ use crate::{ use anyhow::{anyhow, ensure, Result}; use aptos_consensus_types::block::Block as ConsensusBlock; use aptos_crypto::HashValue; -use aptos_executor_types::{execution_output::ExecutionOutput, Error, LedgerUpdateOutput}; +use aptos_executor_types::{execution_output::ExecutionOutput, ExecutorError, LedgerUpdateOutput}; use aptos_experimental_runtimes::thread_manager::THREAD_MANAGER; use aptos_infallible::Mutex; use aptos_logger::{debug, info}; @@ -204,7 +204,7 @@ impl BlockTree { let lookup_result = self.block_lookup.multi_get(ids)?; itertools::zip_eq(ids, lookup_result) - .map(|(id, res)| res.ok_or_else(|| Error::BlockNotFound(*id).into())) + .map(|(id, res)| res.ok_or_else(|| ExecutorError::BlockNotFound(*id).into())) .collect() } diff --git a/execution/executor/src/components/in_memory_state_calculator_v2.rs b/execution/executor/src/components/in_memory_state_calculator_v2.rs index b312471de2204..5d80d298664e8 100644 --- a/execution/executor/src/components/in_memory_state_calculator_v2.rs +++ b/execution/executor/src/components/in_memory_state_calculator_v2.rs @@ -24,6 +24,7 @@ use aptos_types::{ write_set::TransactionWrite, }; use arr_macro::arr; +use bytes::Bytes; use rayon::prelude::*; use std::collections::HashMap; @@ -39,16 +40,16 @@ impl<'a> CoreAccountStateView<'a> { } impl<'a> AccountView for CoreAccountStateView<'a> { - fn get_state_value(&self, state_key: &StateKey) -> Result>> { + fn get_state_value(&self, state_key: &StateKey) -> Result> { if let Some(v_opt) = self.updates[state_key.get_shard_id() as usize].get(state_key) { - return Ok(v_opt.as_ref().map(|x| x.bytes().to_vec())); + return Ok(v_opt.as_ref().map(StateValue::bytes).cloned()); } if let Some(entry) = self.base[state_key.get_shard_id() as usize] .get(state_key) .as_ref() { let state_value = entry.value().1.as_ref(); - return Ok(state_value.map(|x| x.bytes().to_vec())); + return Ok(state_value.map(StateValue::bytes).cloned()); } Ok(None) } diff --git a/execution/executor/src/mock_vm/mock_vm_test.rs b/execution/executor/src/mock_vm/mock_vm_test.rs index 21cbab6817b1e..c22557c7302a8 100644 --- a/execution/executor/src/mock_vm/mock_vm_test.rs +++ b/execution/executor/src/mock_vm/mock_vm_test.rs @@ -7,6 +7,7 @@ use anyhow::Result; use aptos_state_view::TStateView; use aptos_types::{ account_address::AccountAddress, + bytes::NumToBytes, state_store::{ state_key::StateKey, state_storage_usage::StateStorageUsage, state_value::StateValue, }, @@ -14,7 +15,6 @@ use aptos_types::{ }; use aptos_vm::VMExecutor; use std::collections::BTreeMap; - fn gen_address(index: u8) -> AccountAddress { AccountAddress::new([index; AccountAddress::LENGTH]) } @@ -55,11 +55,11 @@ fn test_mock_vm_different_senders() { [ ( StateKey::access_path(balance_ap(sender)), - WriteOp::Modification(amount.to_le_bytes().to_vec()) + WriteOp::Modification(amount.le_bytes()), ), ( StateKey::access_path(seqnum_ap(sender)), - WriteOp::Modification(1u64.to_le_bytes().to_vec()) + WriteOp::Modification(1u64.le_bytes()), ), ] .into_iter() @@ -90,11 +90,11 @@ fn test_mock_vm_same_sender() { [ ( StateKey::access_path(balance_ap(sender)), - WriteOp::Modification((amount * (i as u64 + 1)).to_le_bytes().to_vec()) + WriteOp::Modification((amount * (i as u64 + 1)).le_bytes()), ), ( StateKey::access_path(seqnum_ap(sender)), - WriteOp::Modification((i as u64 + 1).to_le_bytes().to_vec()) + WriteOp::Modification((i as u64 + 1).le_bytes()), ), ] .into_iter() @@ -128,15 +128,15 @@ fn test_mock_vm_payment() { [ ( StateKey::access_path(balance_ap(gen_address(0))), - WriteOp::Modification(50u64.to_le_bytes().to_vec()) + WriteOp::Modification(50u64.le_bytes()) ), ( StateKey::access_path(seqnum_ap(gen_address(0))), - WriteOp::Modification(2u64.to_le_bytes().to_vec()) + WriteOp::Modification(2u64.le_bytes()) ), ( StateKey::access_path(balance_ap(gen_address(1))), - WriteOp::Modification(150u64.to_le_bytes().to_vec()) + WriteOp::Modification(150u64.le_bytes()) ), ] .into_iter() diff --git a/execution/executor/src/mock_vm/mod.rs b/execution/executor/src/mock_vm/mod.rs index ee85fdcee3eca..7a62c68efb496 100644 --- a/execution/executor/src/mock_vm/mod.rs +++ b/execution/executor/src/mock_vm/mod.rs @@ -15,6 +15,7 @@ use aptos_types::{ account_address::AccountAddress, account_config::CORE_CODE_ADDRESS, block_executor::partitioner::{ExecutableTransactions, PartitionedTransactions}, + bytes::NumToBytes, chain_id::ChainId, contract_event::ContractEvent, event::EventKey, @@ -245,6 +246,7 @@ fn read_state_value_from_storage( state_view .get_state_value_bytes(&StateKey::access_path(access_path.clone())) .expect("Failed to query storage.") + .map(|bytes| bytes.to_vec()) } fn decode_bytes(bytes: &[u8]) -> u64 { @@ -267,14 +269,18 @@ fn gen_genesis_writeset() -> WriteSet { access_path_for_config(ValidatorSet::CONFIG_ID).expect("access path in test"); write_set.insert(( StateKey::access_path(validator_set_ap), - WriteOp::Modification(bcs::to_bytes(&ValidatorSet::new(vec![])).unwrap()), + WriteOp::Modification(bcs::to_bytes(&ValidatorSet::new(vec![])).unwrap().into()), )); write_set.insert(( StateKey::access_path(AccessPath::new( CORE_CODE_ADDRESS, ConfigurationResource::resource_path(), )), - WriteOp::Modification(bcs::to_bytes(&ConfigurationResource::default()).unwrap()), + WriteOp::Modification( + bcs::to_bytes(&ConfigurationResource::default()) + .unwrap() + .into(), + ), )); write_set .freeze() @@ -285,11 +291,11 @@ fn gen_mint_writeset(sender: AccountAddress, balance: u64, seqnum: u64) -> Write let mut write_set = WriteSetMut::default(); write_set.insert(( StateKey::access_path(balance_ap(sender)), - WriteOp::Modification(balance.to_le_bytes().to_vec()), + WriteOp::Modification(balance.le_bytes()), )); write_set.insert(( StateKey::access_path(seqnum_ap(sender)), - WriteOp::Modification(seqnum.to_le_bytes().to_vec()), + WriteOp::Modification(seqnum.le_bytes()), )); write_set.freeze().expect("mint writeset should be valid") } @@ -304,15 +310,15 @@ fn gen_payment_writeset( let mut write_set = WriteSetMut::default(); write_set.insert(( StateKey::access_path(balance_ap(sender)), - WriteOp::Modification(sender_balance.to_le_bytes().to_vec()), + WriteOp::Modification(sender_balance.le_bytes()), )); write_set.insert(( StateKey::access_path(seqnum_ap(sender)), - WriteOp::Modification(sender_seqnum.to_le_bytes().to_vec()), + WriteOp::Modification(sender_seqnum.le_bytes()), )); write_set.insert(( StateKey::access_path(balance_ap(recipient)), - WriteOp::Modification(recipient_balance.to_le_bytes().to_vec()), + WriteOp::Modification(recipient_balance.le_bytes()), )); write_set .freeze() diff --git a/execution/executor/src/tests/mod.rs b/execution/executor/src/tests/mod.rs index 4eaddfa55bc94..e62394c63718e 100644 --- a/execution/executor/src/tests/mod.rs +++ b/execution/executor/src/tests/mod.rs @@ -26,6 +26,7 @@ use aptos_types::{ account_address::AccountAddress, aggregate_signature::AggregateSignature, block_info::BlockInfo, + bytes::NumToBytes, chain_id::ChainId, ledger_info::{LedgerInfo, LedgerInfoWithSignatures}, proof::definition::LeafCount, @@ -504,9 +505,9 @@ fn test_deleted_key_from_state_store() { let executor = TestExecutor::new(); let db = &executor.db; let dummy_state_key1 = StateKey::raw(String::from("test_key1").into_bytes()); - let dummy_value1 = 10u64.to_le_bytes().to_vec(); + let dummy_value1 = 10u64.le_bytes(); let dummy_state_key2 = StateKey::raw(String::from("test_key2").into_bytes()); - let dummy_value2 = 20u64.to_le_bytes().to_vec(); + let dummy_value2 = 20u64.le_bytes(); // Create test transaction, event and transaction output let transaction1 = create_test_transaction(0); let transaction2 = create_test_transaction(1); diff --git a/execution/executor/tests/db_bootstrapper_test.rs b/execution/executor/tests/db_bootstrapper_test.rs index d6c1f8962acc8..baef4f24d2665 100644 --- a/execution/executor/tests/db_bootstrapper_test.rs +++ b/execution/executor/tests/db_bootstrapper_test.rs @@ -227,14 +227,18 @@ fn test_new_genesis() { StateKey::access_path( access_path_for_config(ValidatorSet::CONFIG_ID).expect("access path in test"), ), - WriteOp::Modification(bcs::to_bytes(&ValidatorSet::new(vec![])).unwrap()), + WriteOp::Modification(bcs::to_bytes(&ValidatorSet::new(vec![])).unwrap().into()), ), ( StateKey::access_path(AccessPath::new( CORE_CODE_ADDRESS, ConfigurationResource::resource_path(), )), - WriteOp::Modification(bcs::to_bytes(&configuration.bump_epoch_for_test()).unwrap()), + WriteOp::Modification( + bcs::to_bytes(&configuration.bump_epoch_for_test()) + .unwrap() + .into(), + ), ), ( StateKey::access_path(AccessPath::new( @@ -248,7 +252,8 @@ fn test_new_genesis() { EventHandle::random(0), EventHandle::random(0), )) - .unwrap(), + .unwrap() + .into(), ), ), ]) diff --git a/experimental/execution/ptx-executor/src/finalizer.rs b/experimental/execution/ptx-executor/src/finalizer.rs index d63d88477c0cb..4906cb06a42bf 100644 --- a/experimental/execution/ptx-executor/src/finalizer.rs +++ b/experimental/execution/ptx-executor/src/finalizer.rs @@ -14,6 +14,7 @@ use aptos_state_view::StateView; use aptos_types::{ state_store::state_key::StateKey, transaction::TransactionOutput, write_set::TransactionWrite, }; +use aptos_vm::storage_adapter::AsExecutorView; use aptos_vm_types::output::VMOutput; use once_cell::sync::Lazy; use rayon::Scope; @@ -119,7 +120,7 @@ impl<'view> Worker<'view> { fn finalize_one(&mut self) { let vm_output = self.buffer.pop_front().unwrap().unwrap(); let txn_out = vm_output - .try_into_transaction_output(&self.state_view) + .try_into_transaction_output(&self.state_view.as_executor_view()) .unwrap(); for (key, op) in txn_out.write_set() { // TODO(ptx): hack: deal only with the total supply diff --git a/experimental/execution/ptx-executor/src/runner.rs b/experimental/execution/ptx-executor/src/runner.rs index 6731c58c1419f..ffe9f36285698 100644 --- a/experimental/execution/ptx-executor/src/runner.rs +++ b/experimental/execution/ptx-executor/src/runner.rs @@ -22,7 +22,7 @@ use aptos_types::{ }; use aptos_vm::{ adapter_common::{preprocess_transaction, VMAdapter}, - data_cache::AsMoveResolver, + storage_adapter::AsExecutorView, AptosVM, }; use aptos_vm_logging::log_schema::AdapterLogSchema; @@ -240,7 +240,7 @@ impl<'scope, 'view: 'scope, BaseView: StateView + Sync> Worker<'view, BaseView> // TODO(ptx): maybe warm up vm like done in AptosExecutorTask let vm = { let _timer = PER_WORKER_TIMER.timer_with(&[&idx, "vm_init"]); - AptosVM::new(&self.base_view.as_move_resolver()) + AptosVM::new_from_state_view(&self.base_view) }; loop { @@ -270,11 +270,13 @@ impl<'scope, 'view: 'scope, BaseView: StateView + Sync> Worker<'view, BaseView> preprocess_transaction::(transaction) }; drop(_pre); + + let executor_view = state_view.as_executor_view(); let vm_output = { let _vm = PER_WORKER_TIMER.timer_with(&[&idx, "run_txn_vm"]); vm.execute_single_transaction( &preprocessed_txn, - &vm.as_move_resolver(&state_view), + &vm.as_move_resolver(&executor_view), &log_context, ) }; diff --git a/network/benchmark/Cargo.toml b/network/benchmark/Cargo.toml new file mode 100644 index 0000000000000..7cd8d39e9ae61 --- /dev/null +++ b/network/benchmark/Cargo.toml @@ -0,0 +1,30 @@ +[package] +name = "aptos-network-benchmark" +description = "Aptos network benchmark" +version = "0.1.0" +# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html + +# Workspace inherited keys +authors = { workspace = true } +edition = { workspace = true } +homepage = { workspace = true } +license = { workspace = true } +publish = { workspace = true } +repository = { workspace = true } +rust-version = { workspace = true } + +[dependencies] +aptos-config = { workspace = true } +aptos-logger = { workspace = true } +aptos-metrics-core = { workspace = true } +aptos-network = { workspace = true } +aptos-runtimes = { workspace = true } +aptos-time-service = { workspace = true } +aptos-types = { workspace = true } +async-channel = { workspace = true } +bytes = { workspace = true } +futures = { workspace = true } +once_cell = { workspace = true } +rand = { workspace = true } +serde = { workspace = true } +tokio = { workspace = true } diff --git a/network/benchmark/src/lib.rs b/network/benchmark/src/lib.rs new file mode 100644 index 0000000000000..9dfd3d24f55e5 --- /dev/null +++ b/network/benchmark/src/lib.rs @@ -0,0 +1,611 @@ +// Copyright © Aptos Foundation +// SPDX-License-Identifier: Apache-2.0 + +use aptos_config::{ + config::NodeConfig, + network_id::{NetworkId, PeerNetworkId}, +}; +use aptos_logger::{ + debug, info, + prelude::{sample, SampleRate}, + warn, +}; +use aptos_metrics_core::{register_int_counter_vec, IntCounter, IntCounterVec}; +use aptos_network::{ + application::interface::{NetworkClient, NetworkClientInterface, NetworkServiceEvents}, + protocols::{network::Event, rpc::error::RpcError, wire::handshake::v1::ProtocolId}, +}; +use aptos_time_service::{TimeService, TimeServiceTrait}; +use aptos_types::{account_address::AccountAddress, PeerId}; +use bytes::Bytes; +use futures::{ + channel::oneshot::Sender, + stream::{FuturesUnordered, StreamExt}, +}; +use once_cell::sync::Lazy; +use rand::{rngs::OsRng, Rng}; +use serde::{Deserialize, Serialize}; +use std::{ops::DerefMut, sync::Arc, time::Duration}; +use tokio::{runtime::Handle, select, sync::RwLock}; + +#[derive(Clone, Debug, Deserialize, Serialize)] +#[allow(clippy::large_enum_variant)] +pub enum NetbenchMessage { + DataSend(NetbenchDataSend), + DataReply(NetbenchDataReply), +} + +#[derive(Clone, Debug, Deserialize, Serialize)] +pub struct NetbenchDataSend { + pub request_counter: u64, // A monotonically increasing counter to verify responses + pub send_micros: u64, // micro seconds since some epoch at a moment just before this message is sent + pub data: Vec, // A vector of bytes to send in the request; zero length in reply +} + +#[derive(Clone, Debug, Deserialize, Serialize)] +pub struct NetbenchDataReply { + pub request_counter: u64, // A monotonically increasing counter to verify responses + pub send_micros: u64, // micro seconds since some epoch at a moment just before this message is sent + pub request_send_micros: u64, // the send_micros from the previous message +} + +/// Counter for pending network events to the network benchmark service (server-side) +pub static PENDING_NETBENCH_NETWORK_EVENTS: Lazy = Lazy::new(|| { + register_int_counter_vec!( + "aptos_netbench_pending_network_events", + "Counters for pending network events for benchmarking", + &["state"] + ) + .unwrap() +}); + +// Get messages from the network and quickly shuffle them to N threads of workers. +async fn source_loop( + network_requests: NetworkServiceEvents, + work: async_channel::Sender<(NetworkId, Event)>, +) { + let network_events: Vec<_> = network_requests + .into_network_and_events() + .into_iter() + .map(|(network_id, events)| events.map(move |event| (network_id, event))) + .collect(); + let mut network_events = futures::stream::select_all(network_events).fuse(); + + loop { + match network_events.next().await { + None => { + // fused stream will never return more + work.close(); + return; + }, + Some(x) => match work.send(x).await { + Ok(_) => {}, + Err(send_error) => { + warn!("netbench source_loop work send: {}", send_error); + }, + }, + }; + } +} + +async fn handle_direct( + network_client: &NetworkClient, + network_id: NetworkId, + peer_id: AccountAddress, + msg_wrapper: NetbenchMessage, + time_service: TimeService, + shared: Arc>, +) { + match msg_wrapper { + NetbenchMessage::DataSend(send) => { + let reply = NetbenchDataReply { + request_counter: send.request_counter, + send_micros: time_service.now_unix_time().as_micros() as u64, + request_send_micros: send.send_micros, + }; + let result = network_client.send_to_peer( + NetbenchMessage::DataReply(reply), + PeerNetworkId::new(network_id, peer_id), + ); + if let Err(err) = result { + direct_messages("reply_err"); + info!( + "netbench ds [{}] could not reply: {}", + send.request_counter, err + ); + } + }, + NetbenchMessage::DataReply(reply) => { + let receive_time = time_service.now_unix_time().as_micros() as u64; + let rec = { + let reader = shared.read().await; + reader.find(reply.request_counter) + }; + if rec.request_counter == reply.request_counter { + let micros = receive_time - rec.send_micros; + direct_messages("ok"); + direct_micros("ok", micros); + direct_bytes("ok", rec.bytes_sent as u64); + } else { + direct_messages("late"); + info!( + "netbench ds [{}] unknown bytes in > {} micros", + reply.request_counter, + receive_time - rec.send_micros + ) + } + }, + } +} + +async fn handle_rpc( + _peer_id: AccountAddress, + msg_wrapper: NetbenchMessage, + protocol_id: ProtocolId, + time_service: TimeService, + sender: Sender>, +) { + match msg_wrapper { + NetbenchMessage::DataSend(send) => { + let reply = NetbenchDataReply { + request_counter: send.request_counter, + send_micros: time_service.now_unix_time().as_micros() as u64, + request_send_micros: send.send_micros, + }; + let reply = NetbenchMessage::DataReply(reply); + let reply_bytes = match protocol_id.to_bytes(&reply) { + Ok(rb) => rb, + Err(_) => { + rpc_messages("err"); + return; + }, + }; + let reply_bytes: Bytes = reply_bytes.into(); + let result = sender.send(Ok(reply_bytes)); + if let Err(err) = result { + match err { + Ok(_) => {}, // what? Ok inside Err? + Err(err) => { + rpc_messages("err"); + info!("netbench rpc [{}] reply err: {}", send.request_counter, err); + }, + } + } + }, + NetbenchMessage::DataReply(_) => { + rpc_messages("err"); + }, + } +} + +/// handle work split out by source_loop() +async fn handler_task( + node_config: NodeConfig, + network_client: NetworkClient, + work_rx: async_channel::Receiver<(NetworkId, Event)>, + time_service: TimeService, + shared: Arc>, +) { + let config = node_config.netbench.unwrap(); + loop { + let (network_id, event) = match work_rx.recv().await { + Ok(v) => v, + Err(_) => { + // RecvError means source was closed, we're done here. + return; + }, + }; + match event { + Event::Message(peer_id, wat) => { + let msg_wrapper: NetbenchMessage = wat; + handle_direct( + &network_client, + network_id, + peer_id, + msg_wrapper, + time_service.clone(), + shared.clone(), + ) + .await; + }, + Event::RpcRequest(peer_id, msg_wrapper, protocol_id, sender) => { + handle_rpc( + peer_id, + msg_wrapper, + protocol_id, + time_service.clone(), + sender, + ) + .await; + }, + Event::NewPeer(wat) => { + if config.enable_direct_send_testing { + Handle::current().spawn(direct_sender( + node_config.clone(), + network_client.clone(), + time_service.clone(), + network_id, + wat.remote_peer_id, + shared.clone(), + )); + } + if config.enable_rpc_testing { + Handle::current().spawn(rpc_sender( + node_config.clone(), + network_client.clone(), + time_service.clone(), + network_id, + wat.remote_peer_id, + shared.clone(), + )); + } + }, + Event::LostPeer(_) => {}, // don't care + } + } +} + +/// run_netbench_service() does not return, it should be called by .spawn() +pub async fn run_netbench_service( + node_config: NodeConfig, + network_client: NetworkClient, + network_requests: NetworkServiceEvents, + time_service: TimeService, +) { + let shared = Arc::new(RwLock::new(NetbenchSharedState::new())); + let config = node_config.netbench.unwrap(); + let benchmark_service_threads = config.netbench_service_threads; + let num_threads = match benchmark_service_threads { + Some(x) => x, + None => match std::thread::available_parallelism() { + Ok(val) => { + let num_threads = val.get(); + debug!( + "netbench service running {:?} threads based on available parallelism", + num_threads + ); + num_threads + }, + Err(_) => { + debug!("netbench service running 1 thread as fallback"); + 1 + }, + }, + }; + let (work_sender, work_receiver) = async_channel::bounded(num_threads * 2); + let runtime_handle = Handle::current(); + let source_task = runtime_handle.spawn(source_loop(network_requests, work_sender)); + let mut handlers = vec![]; + for _ in 0..num_threads { + handlers.push(runtime_handle.spawn(handler_task( + node_config.clone(), + network_client.clone(), + work_receiver.clone(), + time_service.clone(), + shared.clone(), + ))); + } + if let Err(err) = source_task.await { + warn!("benchmark source_thread join: {}", err); + } + for hai in handlers { + if let Err(err) = hai.await { + warn!("benchmark handler_thread join: {}", err); + } + } +} + +// once every 0.1s log a message for something that may be happening 10_000 times per second +const BLAB_MICROS: u64 = 100_000; + +pub async fn direct_sender( + node_config: NodeConfig, + network_client: NetworkClient, + time_service: TimeService, + network_id: NetworkId, + peer_id: PeerId, + shared: Arc>, +) { + let config = node_config.netbench.unwrap(); + let interval = Duration::from_nanos(1_000_000_000 / config.direct_send_per_second); + let ticker = time_service.interval(interval); + futures::pin_mut!(ticker); + let data_size = config.direct_send_data_size; + let mut rng = OsRng; + let mut blob = Vec::::with_capacity(data_size); + + // random payload filler + for _ in 0..data_size { + blob.push(rng.gen()); + } + + let mut counter: u64 = rng.gen(); + + loop { + ticker.next().await; + + counter += 1; + { + // tweak the random payload a little on every send + let counter_bytes: [u8; 8] = counter.to_le_bytes(); + let (dest, _) = blob.deref_mut().split_at_mut(8); + dest.copy_from_slice(&counter_bytes); + } + + let nowu = time_service.now_unix_time().as_micros() as u64; + let msg = NetbenchDataSend { + request_counter: counter, + send_micros: nowu, + data: blob.clone(), + }; + { + shared.write().await.set(SendRecord { + request_counter: counter, + send_micros: nowu, + bytes_sent: blob.len(), + }) + } + let wrapper = NetbenchMessage::DataSend(msg); + let result = network_client.send_to_peer(wrapper, PeerNetworkId::new(network_id, peer_id)); + if let Err(err) = result { + direct_messages("serr"); + info!( + "netbench [{},{}] direct send err: {}", + network_id, peer_id, err + ); + return; + } else { + direct_messages("sent"); + } + + sample!( + SampleRate::Duration(Duration::from_micros(BLAB_MICROS)), + info!("netbench ds counter={}", counter) + ); + } +} + +pub async fn rpc_sender( + node_config: NodeConfig, + network_client: NetworkClient, + time_service: TimeService, + network_id: NetworkId, + peer_id: PeerId, + shared: Arc>, +) { + let config = node_config.netbench.unwrap(); + let interval = Duration::from_nanos(1_000_000_000 / config.rpc_per_second); + let ticker = time_service.interval(interval); + futures::pin_mut!(ticker); + // random payload filler + let data_size = config.rpc_data_size; + let mut blob = Vec::::with_capacity(data_size); + let mut rng = OsRng; + for _ in 0..data_size { + blob.push(rng.gen()); + } + + let mut counter: u64 = rng.gen(); + + let mut open_rpcs = FuturesUnordered::new(); + + loop { + select! { + _ = ticker.next() => { + if open_rpcs.len() >= config.rpc_in_flight { + continue; + } + // do rpc send + counter += 1; + { + // tweak the random payload a little on every send + let counter_bytes: [u8; 8] = counter.to_le_bytes(); + let (dest, _) = blob.deref_mut().split_at_mut(8); + dest.copy_from_slice(&counter_bytes); + } + + let nowu = time_service.now_unix_time().as_micros() as u64; + let msg = NetbenchDataSend { + request_counter: counter, + send_micros: nowu, + data: blob.clone(), + }; + { + shared.write().await.set(SendRecord{ + request_counter: counter, + send_micros: nowu, + bytes_sent: blob.len(), + }) + } + let wrapper = NetbenchMessage::DataSend(msg); + let result = network_client.send_to_peer_rpc(wrapper, Duration::from_secs(10), PeerNetworkId::new(network_id, peer_id)); + rpc_messages("sent"); + open_rpcs.push(result); + + sample!(SampleRate::Duration(Duration::from_micros(BLAB_MICROS)), info!("netbench rpc counter={}", counter)); + } + result = open_rpcs.next() => { + let result = match result { + Some(subr) => {subr} + None => { + continue + } + }; + // handle rpc result + match result { + Err(err) => { + info!("netbench [{},{}] rpc send err: {}", network_id, peer_id, err); + rpc_messages("err"); + return; + } + Ok(msg_wrapper) => { + let nowu = time_service.now_unix_time().as_micros() as u64; + if let NetbenchMessage::DataReply(msg) = msg_wrapper { + let send_dt = nowu - msg.request_send_micros; + info!("netbench [{}] rpc at {} µs, took {} µs", msg.request_counter, nowu, send_dt); + rpc_messages("ok"); + rpc_bytes("ok").inc_by(data_size as u64); + rpc_micros("ok").inc_by(send_dt); + } else { + rpc_messages("bad"); + info!("netbench [{}] rpc garbage reply", counter); + } + } + } + } + } + } +} + +pub struct NetbenchSharedState { + // Circular buffer of sent records + sent: Vec, + // sent[sent_pos] is the next index to write + sent_pos: usize, +} + +impl Default for NetbenchSharedState { + fn default() -> Self { + Self::new() + } +} + +impl NetbenchSharedState { + pub fn new() -> Self { + NetbenchSharedState { + sent: Vec::with_capacity(10000), // TODO: constant or config? + sent_pos: 0, + } + } + + pub fn set(&mut self, sent: SendRecord) { + if self.sent.len() < self.sent.capacity() { + self.sent.push(sent); + } else { + self.sent[self.sent_pos] = sent; + } + self.sent_pos = (self.sent_pos + 1) % self.sent.capacity(); + } + + /// return the record for the request_counter, or {0, oldest send_micros} + /// Option might seem like it would make sense, but we use the send_micros field to return the oldest known message time when we don't find a request_counter match. + pub fn find(&self, request_counter: u64) -> SendRecord { + if self.sent.is_empty() { + return SendRecord { + request_counter: 0, + send_micros: 0, + bytes_sent: 0, + }; + } + let mut oldest = self.sent[0].send_micros; + let capacity = self.sent.len(); + for i in 0..capacity { + let pos = (self.sent_pos + capacity - (1 + i)) % capacity; + let rec = self.sent[pos].clone(); + if rec.request_counter == request_counter { + return rec; + } + if rec.send_micros < oldest { + oldest = rec.send_micros; + } + } + SendRecord { + request_counter: 0, + send_micros: oldest, + bytes_sent: 0, + } + } +} + +#[derive(Clone)] +pub struct SendRecord { + pub request_counter: u64, + pub send_micros: u64, + pub bytes_sent: usize, +} + +pub static APTOS_NETWORK_BENCHMARK_DIRECT_MESSAGES: Lazy = Lazy::new(|| { + register_int_counter_vec!( + "aptos_network_benchmark_direct_messages", + "Number of net benchmark direct messages", + &["state"] + ) + .unwrap() +}); + +fn direct_messages(state_label: &'static str) { + APTOS_NETWORK_BENCHMARK_DIRECT_MESSAGES + .with_label_values(&[state_label]) + .inc(); +} + +pub static APTOS_NETWORK_BENCHMARK_DIRECT_BYTES: Lazy = Lazy::new(|| { + register_int_counter_vec!( + "aptos_network_benchmark_direct_bytes", + "Number of net benchmark direct bytes", + &["state"] + ) + .unwrap() +}); + +fn direct_bytes(state_label: &'static str, byte_count: u64) { + APTOS_NETWORK_BENCHMARK_DIRECT_BYTES + .with_label_values(&[state_label]) + .inc_by(byte_count); +} + +pub static APTOS_NETWORK_BENCHMARK_DIRECT_MICROS: Lazy = Lazy::new(|| { + register_int_counter_vec!( + "aptos_network_benchmark_direct_micros", + "Number of net benchmark direct micros", + &["state"] + ) + .unwrap() +}); + +fn direct_micros(state_label: &'static str, micros: u64) { + APTOS_NETWORK_BENCHMARK_DIRECT_MICROS + .with_label_values(&[state_label]) + .inc_by(micros); +} + +pub static APTOS_NETWORK_BENCHMARK_RPC_MESSAGES: Lazy = Lazy::new(|| { + register_int_counter_vec!( + "aptos_network_benchmark_rpc_messages", + "Number of net benchmark RPC messages", + &["state"] + ) + .unwrap() +}); + +fn rpc_messages(state_label: &'static str) { + APTOS_NETWORK_BENCHMARK_RPC_MESSAGES + .with_label_values(&[state_label]) + .inc(); +} + +pub static APTOS_NETWORK_BENCHMARK_RPC_BYTES: Lazy = Lazy::new(|| { + register_int_counter_vec!( + "aptos_network_benchmark_rpc_bytes", + "Number of net benchmark RPC bytes transferred", + &["state"] + ) + .unwrap() +}); + +pub fn rpc_bytes(state_label: &'static str) -> IntCounter { + APTOS_NETWORK_BENCHMARK_RPC_BYTES.with_label_values(&[state_label]) +} + +pub static APTOS_NETWORK_BENCHMARK_RPC_MICROS: Lazy = Lazy::new(|| { + register_int_counter_vec!( + "aptos_network_benchmark_rpc_micros", + "Number of net benchmark RPC microseconds used (hint: divide by _messages)", + &["state"] + ) + .unwrap() +}); + +pub fn rpc_micros(state_label: &'static str) -> IntCounter { + APTOS_NETWORK_BENCHMARK_RPC_MICROS.with_label_values(&[state_label]) +} diff --git a/network/framework/src/counters.rs b/network/framework/src/counters.rs index f20bc4f49275a..88460078b927e 100644 --- a/network/framework/src/counters.rs +++ b/network/framework/src/counters.rs @@ -25,6 +25,10 @@ pub const SENT_LABEL: &str = "sent"; pub const SUCCEEDED_LABEL: &str = "succeeded"; pub const FAILED_LABEL: &str = "failed"; +// Direction labels +pub const INBOUND_LABEL: &str = "inbound"; +pub const OUTBOUND_LABEL: &str = "outbound"; + // Serialization labels pub const SERIALIZATION_LABEL: &str = "serialization"; pub const DESERIALIZATION_LABEL: &str = "deserialization"; @@ -165,7 +169,8 @@ pub static APTOS_NETWORK_RPC_MESSAGES: Lazy = Lazy::new(|| { "role_type", "network_id", "peer_id", - "type", + "message_type", + "message_direction", "state" ]) .unwrap() @@ -173,14 +178,16 @@ pub static APTOS_NETWORK_RPC_MESSAGES: Lazy = Lazy::new(|| { pub fn rpc_messages( network_context: &NetworkContext, - type_label: &'static str, + message_type_label: &'static str, + message_direction_label: &'static str, state_label: &'static str, ) -> IntCounter { APTOS_NETWORK_RPC_MESSAGES.with_label_values(&[ network_context.role().as_str(), network_context.network_id().as_str(), network_context.peer_id().short_str().as_str(), - type_label, + message_type_label, + message_direction_label, state_label, ]) } @@ -189,21 +196,30 @@ pub static APTOS_NETWORK_RPC_BYTES: Lazy = Lazy::new(|| { register_int_counter_vec!( "aptos_network_rpc_bytes", "Number of RPC bytes transferred", - &["role_type", "network_id", "peer_id", "type", "state"] + &[ + "role_type", + "network_id", + "peer_id", + "message_type", + "message_direction", + "state" + ] ) .unwrap() }); pub fn rpc_bytes( network_context: &NetworkContext, - type_label: &'static str, + message_type_label: &'static str, + message_direction_label: &'static str, state_label: &'static str, ) -> IntCounter { APTOS_NETWORK_RPC_BYTES.with_label_values(&[ network_context.role().as_str(), network_context.network_id().as_str(), network_context.peer_id().short_str().as_str(), - type_label, + message_type_label, + message_direction_label, state_label, ]) } diff --git a/network/framework/src/peer/mod.rs b/network/framework/src/peer/mod.rs index 548e55432d047..9ca576055d2db 100644 --- a/network/framework/src/peer/mod.rs +++ b/network/framework/src/peer/mod.rs @@ -18,7 +18,7 @@ use crate::{ counters::{ self, network_application_inbound_traffic, network_application_outbound_traffic, - RECEIVED_LABEL, SENT_LABEL, + FAILED_LABEL, RECEIVED_LABEL, SENT_LABEL, }, logging::NetworkSchema, peer_manager::{PeerManagerError, TransportNotification}, @@ -523,10 +523,7 @@ where peer_id.short_str(), protocol_id ); - let data_len = data.len() as u64; - counters::direct_send_messages(&self.network_context, RECEIVED_LABEL).inc(); - counters::direct_send_bytes(&self.network_context, RECEIVED_LABEL).inc_by(data_len); - network_application_inbound_traffic(self.network_context, message.protocol_id, data_len); + self.update_inbound_direct_send_metrics(message.protocol_id, data.len() as u64); let notif = PeerNotification::RecvMessage(Message { protocol_id, @@ -544,6 +541,16 @@ where } } + /// Updates the inbound direct send metrics (e.g., messages and bytes received) + fn update_inbound_direct_send_metrics(&self, protocol_id: ProtocolId, data_len: u64) { + // Update the metrics for the received direct send message + counters::direct_send_messages(&self.network_context, RECEIVED_LABEL).inc(); + counters::direct_send_bytes(&self.network_context, RECEIVED_LABEL).inc_by(data_len); + + // Update the general network traffic metrics + network_application_inbound_traffic(self.network_context, protocol_id, data_len); + } + async fn handle_outbound_request( &mut self, request: PeerRequest, @@ -558,13 +565,9 @@ where // To send an outbound DirectSendMsg, we just bump some counters and // push it onto our outbound writer queue. PeerRequest::SendDirectSend(message) => { + // Create the direct send message let message_len = message.mdata.len(); let protocol_id = message.protocol_id; - network_application_outbound_traffic( - self.network_context, - protocol_id, - message_len as u64, - ); let message = NetworkMessage::DirectSendMsg(DirectSendMsg { protocol_id, priority: Priority::default(), @@ -573,11 +576,10 @@ where match write_reqs_tx.send(message).await { Ok(_) => { - counters::direct_send_messages(&self.network_context, SENT_LABEL).inc(); - counters::direct_send_bytes(&self.network_context, SENT_LABEL) - .inc_by(message_len as u64); + self.update_outbound_direct_send_metrics(protocol_id, message_len as u64); }, Err(e) => { + counters::direct_send_messages(&self.network_context, FAILED_LABEL).inc(); warn!( NetworkSchema::new(&self.network_context) .connection_metadata(&self.connection_metadata), @@ -592,11 +594,6 @@ where }, PeerRequest::SendRpc(request) => { let protocol_id = request.protocol_id; - network_application_outbound_traffic( - self.network_context, - protocol_id, - request.data.len() as u64, - ); if let Err(e) = self .outbound_rpcs .handle_outbound_request(request, write_reqs_tx) @@ -616,6 +613,16 @@ where } } + /// Updates the outbound direct send metrics (e.g., messages and bytes sent) + fn update_outbound_direct_send_metrics(&mut self, protocol_id: ProtocolId, data_len: u64) { + // Update the metrics for the sent direct send message + counters::direct_send_messages(&self.network_context, SENT_LABEL).inc(); + counters::direct_send_bytes(&self.network_context, SENT_LABEL).inc_by(data_len); + + // Update the general network traffic metrics + network_application_outbound_traffic(self.network_context, protocol_id, data_len); + } + fn shutdown(&mut self, reason: DisconnectReason) { // Set the state of the actor to `State::ShuttingDown` to true ensures that the peer actor // will terminate and close the connection. diff --git a/network/framework/src/protocols/rpc/mod.rs b/network/framework/src/protocols/rpc/mod.rs index e80626b180d98..c4c6ee21b6670 100644 --- a/network/framework/src/protocols/rpc/mod.rs +++ b/network/framework/src/protocols/rpc/mod.rs @@ -47,8 +47,8 @@ use crate::{ counters::{ self, network_application_inbound_traffic, network_application_outbound_traffic, - CANCELED_LABEL, DECLINED_LABEL, FAILED_LABEL, RECEIVED_LABEL, REQUEST_LABEL, - RESPONSE_LABEL, SENT_LABEL, + CANCELED_LABEL, DECLINED_LABEL, FAILED_LABEL, INBOUND_LABEL, OUTBOUND_LABEL, + RECEIVED_LABEL, REQUEST_LABEL, RESPONSE_LABEL, SENT_LABEL, }, logging::NetworkSchema, peer::PeerNotification, @@ -175,7 +175,8 @@ pub struct InboundRpcs { remote_peer_id: PeerId, /// The core async queue of pending inbound rpc tasks. The tasks are driven /// to completion by the `InboundRpcs::next_completed_response()` method. - inbound_rpc_tasks: FuturesUnordered>>, + inbound_rpc_tasks: + FuturesUnordered>>, /// A blanket timeout on all inbound rpc requests. If the application handler /// doesn't respond to the request before this timeout, the request will be /// dropped. @@ -213,15 +214,20 @@ impl InboundRpcs { // Drop new inbound requests if our completion queue is at capacity. if self.inbound_rpc_tasks.len() as u32 == self.max_concurrent_inbound_rpcs { - // Increase counter of declined responses and log warning. - counters::rpc_messages(network_context, RESPONSE_LABEL, DECLINED_LABEL).inc(); + // Increase counter of declined requests + counters::rpc_messages( + network_context, + REQUEST_LABEL, + INBOUND_LABEL, + DECLINED_LABEL, + ) + .inc(); return Err(RpcError::TooManyPending(self.max_concurrent_inbound_rpcs)); } let protocol_id = request.protocol_id; let request_id = request.request_id; let priority = request.priority; - let req_len = request.raw_request.len() as u64; trace!( NetworkSchema::new(network_context).remote_peer(&self.remote_peer_id), @@ -231,15 +237,12 @@ impl InboundRpcs { request_id, protocol_id, ); + self.update_inbound_rpc_request_metrics(protocol_id, request.raw_request.len() as u64); - // Collect counters for received request. - counters::rpc_messages(network_context, REQUEST_LABEL, RECEIVED_LABEL).inc(); - counters::rpc_bytes(network_context, REQUEST_LABEL, RECEIVED_LABEL).inc_by(req_len); - network_application_inbound_traffic(self.network_context, protocol_id, req_len); let timer = counters::inbound_rpc_handler_latency(network_context, protocol_id).start_timer(); - // Foward request to PeerManager for handling. + // Forward request to PeerManager for handling. let (response_tx, response_rx) = oneshot::channel(); let notif = PeerNotification::RecvRpc(InboundRpcRequest { protocol_id, @@ -247,7 +250,8 @@ impl InboundRpcs { res_tx: response_tx, }); if let Err(err) = peer_notifs_tx.push(protocol_id, notif) { - counters::rpc_messages(network_context, RESPONSE_LABEL, FAILED_LABEL).inc(); + counters::rpc_messages(network_context, REQUEST_LABEL, INBOUND_LABEL, FAILED_LABEL) + .inc(); return Err(err.into()); } @@ -258,11 +262,14 @@ impl InboundRpcs { .map(move |result| { // Flatten the errors let maybe_response = match result { - Ok(Ok(Ok(response_bytes))) => Ok(RpcResponse { - request_id, - priority, - raw_response: Vec::from(response_bytes.as_ref()), - }), + Ok(Ok(Ok(response_bytes))) => { + let rpc_response = RpcResponse { + request_id, + priority, + raw_response: Vec::from(response_bytes.as_ref()), + }; + Ok((rpc_response, protocol_id)) + }, Ok(Ok(Err(err))) => Err(err), Ok(Err(oneshot::Canceled)) => Err(RpcError::UnexpectedResponseChannelCancel), Err(timeout::Elapsed) => Err(RpcError::TimedOut), @@ -283,12 +290,34 @@ impl InboundRpcs { Ok(()) } + /// Updates the inbound RPC request metrics (e.g., messages and bytes received) + fn update_inbound_rpc_request_metrics(&self, protocol_id: ProtocolId, data_len: u64) { + // Update the metrics for the new RPC request + counters::rpc_messages( + &self.network_context, + REQUEST_LABEL, + INBOUND_LABEL, + RECEIVED_LABEL, + ) + .inc(); + counters::rpc_bytes( + &self.network_context, + REQUEST_LABEL, + INBOUND_LABEL, + RECEIVED_LABEL, + ) + .inc_by(data_len); + + // Update the general network traffic metrics + network_application_inbound_traffic(self.network_context, protocol_id, data_len); + } + /// Method for `Peer` actor to drive the pending inbound rpc tasks forward. /// The returned `Future` is a `FusedFuture` so it works correctly in a /// `futures::select!`. pub fn next_completed_response( &mut self, - ) -> impl Future> + FusedFuture + '_ { + ) -> impl Future> + FusedFuture + '_ { self.inbound_rpc_tasks.select_next_some() } @@ -298,13 +327,19 @@ impl InboundRpcs { pub async fn send_outbound_response( &mut self, write_reqs_tx: &mut aptos_channels::Sender, - maybe_response: Result, + maybe_response: Result<(RpcResponse, ProtocolId), RpcError>, ) -> Result<(), RpcError> { let network_context = &self.network_context; - let response = match maybe_response { + let (response, protocol_id) = match maybe_response { Ok(response) => response, Err(err) => { - counters::rpc_messages(network_context, RESPONSE_LABEL, FAILED_LABEL).inc(); + counters::rpc_messages( + network_context, + RESPONSE_LABEL, + OUTBOUND_LABEL, + FAILED_LABEL, + ) + .inc(); return Err(err); }, }; @@ -321,11 +356,32 @@ impl InboundRpcs { let message = NetworkMessage::RpcResponse(response); write_reqs_tx.send(message).await?; - // Collect counters for sent response. - counters::rpc_messages(network_context, RESPONSE_LABEL, SENT_LABEL).inc(); - counters::rpc_bytes(network_context, RESPONSE_LABEL, SENT_LABEL).inc_by(res_len); + // Update the outbound RPC response metrics + self.update_outbound_rpc_response_metrics(protocol_id, res_len); + Ok(()) } + + fn update_outbound_rpc_response_metrics(&self, protocol_id: ProtocolId, data_len: u64) { + // Update the metrics for the new RPC response + counters::rpc_messages( + &self.network_context, + RESPONSE_LABEL, + OUTBOUND_LABEL, + SENT_LABEL, + ) + .inc(); + counters::rpc_bytes( + &self.network_context, + RESPONSE_LABEL, + OUTBOUND_LABEL, + SENT_LABEL, + ) + .inc_by(data_len); + + // Update the general network traffic metrics + network_application_outbound_traffic(self.network_context, protocol_id, data_len); + } } /// `OutboundRpcs` handles new outbound rpc requests made from the application layer. @@ -396,13 +452,25 @@ impl OutboundRpcs { // Drop the outbound request if the application layer has already canceled. if application_response_tx.is_canceled() { - counters::rpc_messages(network_context, REQUEST_LABEL, CANCELED_LABEL).inc(); + counters::rpc_messages( + network_context, + REQUEST_LABEL, + OUTBOUND_LABEL, + CANCELED_LABEL, + ) + .inc(); return Err(RpcError::UnexpectedResponseChannelCancel); } // Drop new outbound requests if our completion queue is at capacity. if self.outbound_rpc_tasks.len() == self.max_concurrent_outbound_rpcs as usize { - counters::rpc_messages(network_context, REQUEST_LABEL, DECLINED_LABEL).inc(); + counters::rpc_messages( + network_context, + REQUEST_LABEL, + OUTBOUND_LABEL, + DECLINED_LABEL, + ) + .inc(); // Notify application that their request was dropped due to capacity. let err = Err(RpcError::TooManyPending(self.max_concurrent_outbound_rpcs)); let _ = application_response_tx.send(err); @@ -433,10 +501,8 @@ impl OutboundRpcs { }); write_reqs_tx.send(message).await?; - // Collect counters for requests sent. - counters::rpc_messages(network_context, REQUEST_LABEL, SENT_LABEL).inc(); - counters::rpc_bytes(network_context, REQUEST_LABEL, SENT_LABEL).inc_by(req_len); - network_application_outbound_traffic(self.network_context, protocol_id, req_len); + // Update the outbound RPC request metrics + self.update_outbound_rpc_request_metrics(protocol_id, req_len); // Create channel over which response is delivered to outbound_rpc_task. let (response_tx, response_rx) = oneshot::channel::(); @@ -505,6 +571,28 @@ impl OutboundRpcs { Ok(()) } + /// Updates the outbound RPC request metrics (e.g., messages and bytes sent) + fn update_outbound_rpc_request_metrics(&mut self, protocol_id: ProtocolId, data_len: u64) { + // Update the metrics for the new RPC request + counters::rpc_messages( + &self.network_context, + REQUEST_LABEL, + OUTBOUND_LABEL, + SENT_LABEL, + ) + .inc(); + counters::rpc_bytes( + &self.network_context, + REQUEST_LABEL, + OUTBOUND_LABEL, + SENT_LABEL, + ) + .inc_by(data_len); + + // Update the general network traffic metrics + network_application_outbound_traffic(self.network_context, protocol_id, data_len); + } + /// Method for `Peer` actor to drive the pending outbound rpc tasks forward. /// The returned `Future` is a `FusedFuture` so it works correctly in a /// `futures::select!`. @@ -535,9 +623,20 @@ impl OutboundRpcs { match result { Ok((latency, request_len)) => { - counters::rpc_messages(network_context, RESPONSE_LABEL, RECEIVED_LABEL).inc(); - counters::rpc_bytes(network_context, RESPONSE_LABEL, RECEIVED_LABEL) - .inc_by(request_len); + counters::rpc_messages( + network_context, + RESPONSE_LABEL, + INBOUND_LABEL, + RECEIVED_LABEL, + ) + .inc(); + counters::rpc_bytes( + network_context, + RESPONSE_LABEL, + INBOUND_LABEL, + RECEIVED_LABEL, + ) + .inc_by(request_len); trace!( NetworkSchema::new(network_context).remote_peer(peer_id), @@ -554,9 +653,21 @@ impl OutboundRpcs { // We don't log when the application has dropped the RPC // response channel because this is often expected (e.g., // on state sync subscription requests that timeout). - counters::rpc_messages(network_context, REQUEST_LABEL, CANCELED_LABEL).inc(); + counters::rpc_messages( + network_context, + REQUEST_LABEL, + OUTBOUND_LABEL, + CANCELED_LABEL, + ) + .inc(); } else { - counters::rpc_messages(network_context, REQUEST_LABEL, FAILED_LABEL).inc(); + counters::rpc_messages( + network_context, + REQUEST_LABEL, + OUTBOUND_LABEL, + FAILED_LABEL, + ) + .inc(); warn!( NetworkSchema::new(network_context).remote_peer(peer_id), "{} Error making outbound RPC request to {} (request_id {}). Error: {}", @@ -582,8 +693,7 @@ impl OutboundRpcs { let is_canceled = if let Some((protocol_id, response_tx)) = self.pending_outbound_rpcs.remove(&request_id) { - network_application_inbound_traffic( - self.network_context, + self.update_inbound_rpc_response_metrics( protocol_id, response.raw_response.len() as u64, ); @@ -612,4 +722,26 @@ impl OutboundRpcs { ); } } + + /// Updates the inbound RPC response metrics (e.g., messages and bytes received) + fn update_inbound_rpc_response_metrics(&self, protocol_id: ProtocolId, data_len: u64) { + // Update the metrics for the new RPC response + counters::rpc_messages( + &self.network_context, + RESPONSE_LABEL, + INBOUND_LABEL, + RECEIVED_LABEL, + ) + .inc(); + counters::rpc_bytes( + &self.network_context, + RESPONSE_LABEL, + INBOUND_LABEL, + RECEIVED_LABEL, + ) + .inc_by(data_len); + + // Update the general network traffic metrics + network_application_inbound_traffic(self.network_context, protocol_id, data_len); + } } diff --git a/network/framework/src/protocols/wire/handshake/v1/mod.rs b/network/framework/src/protocols/wire/handshake/v1/mod.rs index 629c23eacdfb4..fa787d66ab4e1 100644 --- a/network/framework/src/protocols/wire/handshake/v1/mod.rs +++ b/network/framework/src/protocols/wire/handshake/v1/mod.rs @@ -57,6 +57,8 @@ pub enum ProtocolId { PeerMonitoringServiceRpc = 10, ConsensusRpcCompressed = 11, ConsensusDirectSendCompressed = 12, + NetbenchDirectSend = 13, + NetbenchRpc = 14, } /// The encoding types for Protocols @@ -83,6 +85,8 @@ impl ProtocolId { PeerMonitoringServiceRpc => "PeerMonitoringServiceRpc", ConsensusRpcCompressed => "ConsensusRpcCompressed", ConsensusDirectSendCompressed => "ConsensusDirectSendCompressed", + NetbenchDirectSend => "NetbenchDirectSend", + NetbenchRpc => "NetbenchRpc", } } @@ -102,6 +106,8 @@ impl ProtocolId { ProtocolId::PeerMonitoringServiceRpc, ProtocolId::ConsensusRpcCompressed, ProtocolId::ConsensusDirectSendCompressed, + ProtocolId::NetbenchDirectSend, + ProtocolId::NetbenchRpc, ] } diff --git a/rust-toolchain b/rust-toolchain deleted file mode 100644 index 68bc7ff2a8ebc..0000000000000 --- a/rust-toolchain +++ /dev/null @@ -1 +0,0 @@ -1.71.1 diff --git a/rust-toolchain.toml b/rust-toolchain.toml new file mode 100644 index 0000000000000..1c77f5f203eff --- /dev/null +++ b/rust-toolchain.toml @@ -0,0 +1,6 @@ +[toolchain] +channel = "1.71.1" + +# Note: we don't specify cargofmt in our toolchain because we rely on +# the nightly version of cargofmt and verify formatting in CI/CD. +components = [ "cargo", "clippy", "rustc", "rust-docs", "rust-std" ] diff --git a/scripts/dev_setup.sh b/scripts/dev_setup.sh index fb97d7aeae7ed..133392dfb3ba9 100755 --- a/scripts/dev_setup.sh +++ b/scripts/dev_setup.sh @@ -863,7 +863,7 @@ if [[ "$INSTALL_BUILD_TOOLS" == "false" ]] && \ INSTALL_BUILD_TOOLS="true" fi -if [ ! -f rust-toolchain ]; then +if [ ! -f rust-toolchain.toml ]; then echo "Unknown location. Please run this from the aptos-core repository. Abort." exit 1 fi @@ -947,7 +947,7 @@ if [[ "$INSTALL_BUILD_TOOLS" == "true" ]]; then install_lld install_rustup "$BATCH_MODE" - install_toolchain "$(cat ./rust-toolchain)" + install_toolchain "$(grep channel ./rust-toolchain.toml | grep -o '"[^"]\+"' | sed 's/"//g')" # TODO: Fix me. This feels hacky. install_rustup_components_and_nightly install_cargo_sort diff --git a/secure/net/src/network_controller/inbound_handler.rs b/secure/net/src/network_controller/inbound_handler.rs index 24b461314f948..2ed2ed952bbf6 100644 --- a/secure/net/src/network_controller/inbound_handler.rs +++ b/secure/net/src/network_controller/inbound_handler.rs @@ -4,7 +4,7 @@ use crate::{ network_controller::{error::Error, Message, MessageType, NetworkMessage}, NetworkServer, }; -use aptos_logger::error; +use aptos_logger::{error, warn}; use crossbeam_channel::Sender; use std::{ collections::HashMap, @@ -70,7 +70,7 @@ impl InboundHandler { // Send the message to the registered handler handler.send(message).unwrap(); } else { - println!("No handler registered for message type: {:?}", message_type); + warn!("No handler registered for message type: {:?}", message_type); } } @@ -90,7 +90,10 @@ impl InboundHandler { // Send the message to the registered handler handler.send(msg)?; } else { - println!("No handler registered for sender: {:?}", sender); + warn!( + "No handler registered for sender: {:?} and msg type {:?}", + sender, message_type + ); } Ok(()) } diff --git a/state-sync/aptos-data-client/Cargo.toml b/state-sync/aptos-data-client/Cargo.toml index 3e9d74287ed8b..bcaa29c32a86b 100644 --- a/state-sync/aptos-data-client/Cargo.toml +++ b/state-sync/aptos-data-client/Cargo.toml @@ -26,7 +26,9 @@ aptos-storage-service-client = { workspace = true } aptos-storage-service-types = { workspace = true } aptos-time-service = { workspace = true } aptos-types = { workspace = true } +arc-swap = { workspace = true } async-trait = { workspace = true } +dashmap = { workspace = true } futures = { workspace = true } itertools = { workspace = true } rand = { workspace = true } diff --git a/state-sync/aptos-data-client/src/client.rs b/state-sync/aptos-data-client/src/client.rs index b558d5520cd14..2d530a2dbbf7b 100644 --- a/state-sync/aptos-data-client/src/client.rs +++ b/state-sync/aptos-data-client/src/client.rs @@ -6,7 +6,7 @@ use crate::{ global_summary::GlobalDataSummary, interface::{ AptosDataClientInterface, Response, ResponseCallback, ResponseContext, ResponseError, - ResponseId, + ResponseId, SubscriptionRequestMetadata, }, logging::{LogEntry, LogEvent, LogSchema}, metrics, @@ -21,7 +21,7 @@ use aptos_config::{ network_id::PeerNetworkId, }; use aptos_id_generator::{IdGenerator, U64IdGenerator}; -use aptos_infallible::RwLock; +use aptos_infallible::Mutex; use aptos_logger::{debug, info, sample, sample::SampleRate, trace, warn}; use aptos_network::{application::interface::NetworkClient, protocols::network::RpcError}; use aptos_storage_interface::DbReader; @@ -30,7 +30,10 @@ use aptos_storage_service_types::{ requests::{ DataRequest, EpochEndingLedgerInfoRequest, NewTransactionOutputsWithProofRequest, NewTransactionsOrOutputsWithProofRequest, NewTransactionsWithProofRequest, - StateValuesWithProofRequest, StorageServiceRequest, TransactionOutputsWithProofRequest, + StateValuesWithProofRequest, StorageServiceRequest, + SubscribeTransactionOutputsWithProofRequest, + SubscribeTransactionsOrOutputsWithProofRequest, SubscribeTransactionsWithProofRequest, + SubscriptionStreamMetadata, TransactionOutputsWithProofRequest, TransactionsOrOutputsWithProofRequest, TransactionsWithProofRequest, }, responses::{StorageServerSummary, StorageServiceResponse, TransactionOrOutputListWithProof}, @@ -43,9 +46,10 @@ use aptos_types::{ state_store::state_value::StateValueChunkWithProof, transaction::{TransactionListWithProof, TransactionOutputListWithProof, Version}, }; +use arc_swap::ArcSwap; use async_trait::async_trait; use rand::prelude::SliceRandom; -use std::{fmt, sync::Arc, time::Duration}; +use std::{fmt, ops::Deref, sync::Arc, time::Duration}; use tokio::runtime::Handle; // Useful constants @@ -73,15 +77,19 @@ const PEER_LOG_FREQ_SECS: u64 = 10; #[derive(Clone, Debug)] pub struct AptosDataClient { /// Config for AptosNet data client. - data_client_config: AptosDataClientConfig, + data_client_config: Arc, /// The underlying AptosNet storage service client. storage_service_client: StorageServiceClient>, + /// The state of the active subscription stream. + active_subscription_state: Arc>>, /// All of the data-client specific data we have on each network peer. - peer_states: Arc>, + peer_states: Arc, /// A cached, aggregate data summary of all unbanned peers' data summaries. - global_summary_cache: Arc>, + global_summary_cache: Arc>, /// Used for generating the next request/response id. response_id_generator: Arc, + /// Time service used for calculating peer lag + time_service: TimeService, } impl AptosDataClient { @@ -93,17 +101,23 @@ impl AptosDataClient { storage_service_client: StorageServiceClient>, runtime: Option, ) -> (Self, DataSummaryPoller) { + // Wrap the configs in an Arc (to be shared across components) + let base_config = Arc::new(base_config); + let data_client_config = Arc::new(data_client_config); + // Create the data client let data_client = Self { - data_client_config, + data_client_config: data_client_config.clone(), storage_service_client: storage_service_client.clone(), - peer_states: Arc::new(RwLock::new(PeerStates::new( + active_subscription_state: Arc::new(Mutex::new(None)), + peer_states: Arc::new(PeerStates::new( base_config, - data_client_config, + data_client_config.clone(), storage_service_client.get_peers_and_metadata(), - ))), - global_summary_cache: Arc::new(RwLock::new(GlobalDataSummary::empty())), + )), + global_summary_cache: Arc::new(ArcSwap::from(Arc::new(GlobalDataSummary::empty()))), response_id_generator: Arc::new(U64IdGenerator::new()), + time_service: time_service.clone(), }; // Create the data summary poller @@ -141,7 +155,7 @@ impl AptosDataClient { /// Update a peer's data summary. pub fn update_summary(&self, peer: PeerNetworkId, summary: StorageServerSummary) { - self.peer_states.write().update_summary(peer, summary) + self.peer_states.update_summary(peer, summary) } /// Recompute and update the global data summary cache @@ -150,9 +164,12 @@ impl AptosDataClient { // the peer states (to handle disconnected peers). self.garbage_collect_peer_states()?; - // Calculate the aggregate data summary - let aggregate = self.peer_states.read().calculate_aggregate_summary(); - *self.global_summary_cache.write() = aggregate; + // Calculate the global data summary + let global_data_summary = self.peer_states.calculate_global_data_summary(); + + // Update the cached data summary + self.global_summary_cache + .store(Arc::new(global_data_summary)); Ok(()) } @@ -164,7 +181,6 @@ impl AptosDataClient { // Garbage collect the disconnected peers self.peer_states - .write() .garbage_collect_peer_states(all_connected_peers); Ok(()) @@ -186,15 +202,80 @@ impl AptosDataClient { self.identify_serviceable(regular_peers, request) }; - // Randomly select a peer to handle the request - serviceable_peers - .choose(&mut rand::thread_rng()) - .copied() - .ok_or_else(|| { - Error::DataIsUnavailable( - format!("No connected peers are advertising that they can serve this data! Request: {:?}",request), - ) + // Identify the peer based on the request type + if request.data_request.is_subscription_request() { + self.choose_peer_for_subscription_request(request, serviceable_peers) + } else { + choose_random_peer(serviceable_peers).ok_or_else(|| { + Error::DataIsUnavailable(format!( + "No peers are advertising that they can serve the data! Request: {:?}", + request + )) }) + } + } + + /// Choose a peer that can service the given subscription request + pub(crate) fn choose_peer_for_subscription_request( + &self, + request: &StorageServiceRequest, + serviceable_peers: Vec, + ) -> crate::error::Result { + // Get the stream ID from the request + let request_stream_id = match &request.data_request { + DataRequest::SubscribeTransactionsWithProof(request) => { + request.subscription_stream_metadata.subscription_stream_id + }, + DataRequest::SubscribeTransactionOutputsWithProof(request) => { + request.subscription_stream_metadata.subscription_stream_id + }, + DataRequest::SubscribeTransactionsOrOutputsWithProof(request) => { + request.subscription_stream_metadata.subscription_stream_id + }, + data_request => { + return Err(Error::UnexpectedErrorEncountered(format!( + "Invalid subscription request type found: {:?}", + data_request + ))) + }, + }; + + // Grab the lock on the active subscription state + let mut active_subscription_state = self.active_subscription_state.lock(); + + // If we have an active subscription and the request is for the same + // stream ID, use the same peer (as long as it is still serviceable). + if let Some(subscription_state) = active_subscription_state.take() { + if subscription_state.subscription_stream_id == request_stream_id { + // The stream IDs match. Verify that the request is still serviceable. + let peer_network_id = subscription_state.peer_network_id; + if serviceable_peers.contains(&peer_network_id) { + // The previously chosen peer can still service the request + *active_subscription_state = Some(subscription_state); + return Ok(peer_network_id); + } else { + // The previously chosen peer can no longer service + // the request, so we need to return an error. + return Err(Error::DataIsUnavailable(format!( + "The peer that we were previously subscribing to can no longer service \ + the subscriptions! Peer: {:?}, request: {:?}", + peer_network_id, request + ))); + } + } + } + + // Otherwise, we need to choose a new peer and update the subscription state + let peer_network_id = choose_random_peer(serviceable_peers).ok_or_else(|| { + Error::DataIsUnavailable(format!( + "No peers are advertising that they can serve the subscription! Request: {:?}", + request + )) + })?; + let subscription_state = SubscriptionState::new(peer_network_id, request_stream_id); + *active_subscription_state = Some(subscription_state); + + Ok(peer_network_id) } /// Identifies the peers in the given set of prospective peers @@ -206,7 +287,10 @@ impl AptosDataClient { ) -> Vec { prospective_peers .into_iter() - .filter(|peer| self.peer_states.read().can_service_request(peer, request)) + .filter(|peer| { + self.peer_states + .can_service_request(peer, self.time_service.clone(), request) + }) .collect::>() } @@ -215,7 +299,7 @@ impl AptosDataClient { &self, ) -> crate::error::Result, Error> { // Fetch the number of in-flight polls and update the metrics - let num_in_flight_polls = self.peer_states.read().num_in_flight_priority_polls(); + let num_in_flight_polls = self.peer_states.num_in_flight_priority_polls(); update_in_flight_metrics(PRIORITIZED_PEER, num_in_flight_polls); // Ensure we don't go over the maximum number of in-flight polls @@ -231,7 +315,7 @@ impl AptosDataClient { /// Fetches the next regular peer to poll pub fn fetch_regular_peer_to_poll(&self) -> crate::error::Result, Error> { // Fetch the number of in-flight polls and update the metrics - let num_in_flight_polls = self.peer_states.read().num_in_flight_regular_polls(); + let num_in_flight_polls = self.peer_states.num_in_flight_regular_polls(); update_in_flight_metrics(REGULAR_PEER, num_in_flight_polls); // Ensure we don't go over the maximum number of in-flight polls @@ -250,7 +334,7 @@ impl AptosDataClient { mut peers: Vec, ) -> crate::error::Result, Error> { // Identify the peers who do not already have in-flight requests. - peers.retain(|peer| !self.peer_states.read().existing_in_flight_request(peer)); + peers.retain(|peer| !self.peer_states.existing_in_flight_request(peer)); // Select a peer at random for polling let peer_to_poll = peers.choose(&mut rand::thread_rng()); @@ -259,14 +343,12 @@ impl AptosDataClient { /// Marks the given peers as having an in-flight poll request pub fn in_flight_request_started(&self, peer: &PeerNetworkId) { - self.peer_states.write().new_in_flight_request(peer); + self.peer_states.new_in_flight_request(peer); } /// Marks the given peers as polled pub fn in_flight_request_complete(&self, peer: &PeerNetworkId) { - self.peer_states - .write() - .mark_in_flight_request_complete(peer); + self.peer_states.mark_in_flight_request_complete(peer); } /// Returns all peers connected to us @@ -292,7 +374,7 @@ impl AptosDataClient { let mut priority_peers = vec![]; let mut regular_peers = vec![]; for peer in all_connected_peers { - if self.peer_states.read().is_priority_peer(&peer) { + if self.peer_states.is_priority_peer(&peer) { priority_peers.push(peer); } else { regular_peers.push(peer); @@ -420,7 +502,7 @@ impl AptosDataClient { // On the one hand, scoring dynamics are simpler when each request // is successful or failed but not both; on the other hand, this // feels simpler for the consumer. - self.peer_states.write().update_score_success(peer); + self.peer_states.update_score_success(peer); // Package up all of the context needed to fully report an error // with this RPC. @@ -485,9 +567,7 @@ impl AptosDataClient { _request: &StorageServiceRequest, error_type: ErrorType, ) { - self.peer_states - .write() - .update_score_error(peer, error_type); + self.peer_states.update_score_error(peer, error_type); } /// Creates a storage service request using the given data request @@ -508,15 +588,15 @@ impl AptosDataClient { /// Returns a copy of the peer states for testing #[cfg(test)] - pub(crate) fn get_peer_states(&self) -> PeerStates { - self.peer_states.read().clone() + pub(crate) fn get_peer_states(&self) -> Arc { + self.peer_states.clone() } } #[async_trait] impl AptosDataClientInterface for AptosDataClient { fn get_global_data_summary(&self) -> GlobalDataSummary { - self.global_summary_cache.read().clone() + self.global_summary_cache.load().clone().deref().clone() } async fn get_epoch_ending_ledger_infos( @@ -668,6 +748,72 @@ impl AptosDataClientInterface for AptosDataClient { self.create_and_send_storage_request(request_timeout_ms, data_request) .await } + + async fn subscribe_to_transaction_outputs_with_proof( + &self, + request_metadata: SubscriptionRequestMetadata, + request_timeout_ms: u64, + ) -> crate::error::Result> + { + let subscription_stream_metadata = SubscriptionStreamMetadata { + known_version_at_stream_start: request_metadata.known_version_at_stream_start, + known_epoch_at_stream_start: request_metadata.known_epoch_at_stream_start, + subscription_stream_id: request_metadata.subscription_stream_id, + }; + let data_request = DataRequest::SubscribeTransactionOutputsWithProof( + SubscribeTransactionOutputsWithProofRequest { + subscription_stream_metadata, + subscription_stream_index: request_metadata.subscription_stream_index, + }, + ); + self.create_and_send_storage_request(request_timeout_ms, data_request) + .await + } + + async fn subscribe_to_transactions_with_proof( + &self, + request_metadata: SubscriptionRequestMetadata, + include_events: bool, + request_timeout_ms: u64, + ) -> crate::error::Result> { + let subscription_stream_metadata = SubscriptionStreamMetadata { + known_version_at_stream_start: request_metadata.known_version_at_stream_start, + known_epoch_at_stream_start: request_metadata.known_epoch_at_stream_start, + subscription_stream_id: request_metadata.subscription_stream_id, + }; + let data_request = + DataRequest::SubscribeTransactionsWithProof(SubscribeTransactionsWithProofRequest { + subscription_stream_metadata, + include_events, + subscription_stream_index: request_metadata.subscription_stream_index, + }); + self.create_and_send_storage_request(request_timeout_ms, data_request) + .await + } + + async fn subscribe_to_transactions_or_outputs_with_proof( + &self, + request_metadata: SubscriptionRequestMetadata, + include_events: bool, + request_timeout_ms: u64, + ) -> crate::error::Result> + { + let subscription_stream_metadata = SubscriptionStreamMetadata { + known_version_at_stream_start: request_metadata.known_version_at_stream_start, + known_epoch_at_stream_start: request_metadata.known_epoch_at_stream_start, + subscription_stream_id: request_metadata.subscription_stream_id, + }; + let data_request = DataRequest::SubscribeTransactionsOrOutputsWithProof( + SubscribeTransactionsOrOutputsWithProofRequest { + subscription_stream_metadata, + include_events, + max_num_output_reductions: self.get_max_num_output_reductions(), + subscription_stream_index: request_metadata.subscription_stream_index, + }, + ); + self.create_and_send_storage_request(request_timeout_ms, data_request) + .await + } } /// The AptosNet-specific request context needed to update a peer's scoring. @@ -697,6 +843,28 @@ impl fmt::Debug for AptosNetResponseCallback { } } +/// A struct that holds a subscription state, including +/// the subscription stream ID and the peer serving the requests. +#[derive(Clone, Debug)] +struct SubscriptionState { + peer_network_id: PeerNetworkId, + subscription_stream_id: u64, +} + +impl SubscriptionState { + fn new(peer_network_id: PeerNetworkId, subscription_stream_id: u64) -> Self { + Self { + peer_network_id, + subscription_stream_id, + } + } +} + +/// Selects a peer randomly from the list of specified peers +fn choose_random_peer(peers: Vec) -> Option { + peers.choose(&mut rand::thread_rng()).copied() +} + /// Updates the metrics for the number of connected peers (priority and regular) fn update_connected_peer_metrics(num_priority_peers: usize, num_regular_peers: usize) { // Log the number of connected peers diff --git a/state-sync/aptos-data-client/src/error.rs b/state-sync/aptos-data-client/src/error.rs index 1c3d49e11c47f..abbd98d7542de 100644 --- a/state-sync/aptos-data-client/src/error.rs +++ b/state-sync/aptos-data-client/src/error.rs @@ -35,6 +35,11 @@ impl Error { Self::UnexpectedErrorEncountered(_) => "unexpected_error_encountered", } } + + /// Returns true iff the error is a timeout error + pub fn is_timeout(&self) -> bool { + matches!(self, Self::TimeoutWaitingForResponse(_)) + } } impl From for Error { diff --git a/state-sync/aptos-data-client/src/interface.rs b/state-sync/aptos-data-client/src/interface.rs index a40bac770244b..7d7eb769cb1b4 100644 --- a/state-sync/aptos-data-client/src/interface.rs +++ b/state-sync/aptos-data-client/src/interface.rs @@ -129,6 +129,52 @@ pub trait AptosDataClientInterface { include_events: bool, request_timeout_ms: u64, ) -> error::Result>; + + /// Subscribes to new transaction output lists with proofs. Subscriptions + /// start at `known_version + 1` and `known_epoch` (inclusive), as + /// specified by the stream metadata. The end version and proof version + /// are specified by the server. If the data cannot be fetched, an + /// error is returned. + async fn subscribe_to_transaction_outputs_with_proof( + &self, + subscription_request_metadata: SubscriptionRequestMetadata, + request_timeout_ms: u64, + ) -> error::Result>; + + /// Subscribes to new transaction lists with proofs. Subscriptions start + /// at `known_version + 1` and `known_epoch` (inclusive), as specified + /// by the subscription metadata. If `include_events` is true, + /// events are included in the proof. The end version and proof version + /// are specified by the server. If the data cannot be fetched, an error + /// is returned. + async fn subscribe_to_transactions_with_proof( + &self, + subscription_request_metadata: SubscriptionRequestMetadata, + include_events: bool, + request_timeout_ms: u64, + ) -> error::Result>; + + /// Subscribes to new transaction or output lists with proofs. Subscriptions + /// start at `known_version + 1` and `known_epoch` (inclusive), as + /// specified by the subscription metadata. If `include_events` is true, + /// events are included in the proof. The end version and proof version + /// are specified by the server. If the data cannot be fetched, an error + /// is returned. + async fn subscribe_to_transactions_or_outputs_with_proof( + &self, + subscription_request_metadata: SubscriptionRequestMetadata, + include_events: bool, + request_timeout_ms: u64, + ) -> error::Result>; +} + +/// Subscription stream metadata associated with each subscription request +#[derive(Clone, Copy, Debug, Deserialize, Eq, Hash, PartialEq, Serialize)] +pub struct SubscriptionRequestMetadata { + pub known_version_at_stream_start: u64, // The highest known transaction version at stream start + pub known_epoch_at_stream_start: u64, // The highest known epoch at stream start + pub subscription_stream_id: u64, // The unique id of the subscription stream + pub subscription_stream_index: u64, // The index of the request in the subscription stream } /// A response error that users of the Aptos Data Client can use to notify @@ -203,7 +249,7 @@ impl Response { } /// The different data client response payloads as an enum. -#[derive(Debug, Eq, PartialEq)] +#[derive(Clone, Debug, Eq, PartialEq)] pub enum ResponsePayload { EpochEndingLedgerInfos(Vec), NewTransactionOutputsWithProof((TransactionOutputListWithProof, LedgerInfoWithSignatures)), @@ -215,6 +261,8 @@ pub enum ResponsePayload { } impl ResponsePayload { + /// Returns a label for the response payload. This is useful + /// for logging and metrics. pub fn get_label(&self) -> &'static str { match self { Self::EpochEndingLedgerInfos(_) => "epoch_ending_ledger_infos", @@ -226,6 +274,34 @@ impl ResponsePayload { Self::TransactionsWithProof(_) => "transactions_with_proof", } } + + /// Returns the chunk size of the response payload (i.e., the + /// number of data items held in the response). + pub fn get_data_chunk_size(&self) -> usize { + match self { + Self::EpochEndingLedgerInfos(epoch_ending_ledger_infos) => { + epoch_ending_ledger_infos.len() + }, + Self::NewTransactionOutputsWithProof((outputs_with_proof, _)) => { + outputs_with_proof.transactions_and_outputs.len() + }, + Self::NewTransactionsWithProof((transactions_with_proof, _)) => { + transactions_with_proof.transactions.len() + }, + Self::NumberOfStates(_) => { + 1 // The number of states is a single u64 + }, + Self::StateValuesWithProof(state_values_with_proof) => { + state_values_with_proof.raw_values.len() + }, + Self::TransactionOutputsWithProof(outputs_with_proof) => { + outputs_with_proof.transactions_and_outputs.len() + }, + Self::TransactionsWithProof(transactions_with_proof) => { + transactions_with_proof.transactions.len() + }, + } + } } impl From for ResponsePayload { diff --git a/state-sync/aptos-data-client/src/latency_monitor.rs b/state-sync/aptos-data-client/src/latency_monitor.rs index 0ace5f42e9c1f..5cd7fcf3526b9 100644 --- a/state-sync/aptos-data-client/src/latency_monitor.rs +++ b/state-sync/aptos-data-client/src/latency_monitor.rs @@ -35,7 +35,7 @@ pub struct LatencyMonitor { impl LatencyMonitor { pub fn new( - data_client_config: AptosDataClientConfig, + data_client_config: Arc, data_client: Arc, storage: Arc, time_service: TimeService, @@ -318,7 +318,7 @@ mod tests { }; use aptos_config::config::AptosDataClientConfig; use aptos_time_service::{TimeService, TimeServiceTrait}; - use std::time::Duration; + use std::{sync::Arc, time::Duration}; #[test] fn test_calculate_duration_from_proposal() { @@ -626,7 +626,7 @@ mod tests { /// Creates a latency monitor for testing fn create_latency_monitor() -> (TimeService, LatencyMonitor) { - let data_client_config = AptosDataClientConfig::default(); + let data_client_config = Arc::new(AptosDataClientConfig::default()); let data_client = create_mock_data_client(); let storage = create_mock_db_reader(); let time_service = TimeService::mock(); diff --git a/state-sync/aptos-data-client/src/metrics.rs b/state-sync/aptos-data-client/src/metrics.rs index 3e2830977610b..97307ea2f68b7 100644 --- a/state-sync/aptos-data-client/src/metrics.rs +++ b/state-sync/aptos-data-client/src/metrics.rs @@ -49,9 +49,9 @@ pub static ERROR_RESPONSES: Lazy = Lazy::new(|| { }); // Latency buckets for network latencies (seconds) -const REQUEST_LATENCY_BUCKETS_SECS: [f64; 18] = [ +const REQUEST_LATENCY_BUCKETS_SECS: &[f64] = &[ 0.05, 0.1, 0.2, 0.3, 0.5, 0.75, 1.0, 1.5, 2.0, 3.0, 5.0, 7.5, 10.0, 15.0, 20.0, 30.0, 40.0, - 60.0, + 60.0, 120.0, 180.0, 240.0, 300.0, ]; /// Counter for tracking request latencies @@ -116,9 +116,10 @@ pub static OPTIMAL_CHUNK_SIZES: Lazy = Lazy::new(|| { // Latency buckets for the sync latencies (seconds). Note: there are a // lot of buckets here because we really care about sync latencies. -const SYNC_LATENCY_BUCKETS_SECS: [f64; 36] = [ +const SYNC_LATENCY_BUCKETS_SECS: &[f64] = &[ 0.05, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1.0, 1.1, 1.2, 1.3, 1.4, 1.5, 1.6, 1.7, 1.8, 1.9, 2.0, 2.1, 2.2, 2.3, 2.4, 2.5, 3.0, 5.0, 10.0, 15.0, 20.0, 30.0, 40.0, 60.0, 120.0, 180.0, + 240.0, 300.0, 360.0, 420.0, 480.0, 540.0, 600.0, 1200.0, 1800.0, ]; /// Counter for tracking various sync latencies diff --git a/state-sync/aptos-data-client/src/peer_states.rs b/state-sync/aptos-data-client/src/peer_states.rs index 706ce43e418ae..ba8aaa074dced 100644 --- a/state-sync/aptos-data-client/src/peer_states.rs +++ b/state-sync/aptos-data-client/src/peer_states.rs @@ -16,12 +16,10 @@ use aptos_network::application::storage::PeersAndMetadata; use aptos_storage_service_types::{ requests::StorageServiceRequest, responses::StorageServerSummary, }; +use aptos_time_service::TimeService; +use dashmap::{DashMap, DashSet}; use itertools::Itertools; -use std::{ - cmp::min, - collections::{HashMap, HashSet}, - sync::Arc, -}; +use std::{cmp::min, sync::Arc}; /// Scores for peer rankings based on preferences and behavior. const MAX_SCORE: f64 = 100.0; @@ -81,7 +79,7 @@ impl PeerState { } /// Returns the storage summary iff the peer is not below the ignore threshold - fn storage_summary_if_not_ignored(&self) -> Option<&StorageServerSummary> { + fn get_storage_summary_if_not_ignored(&self) -> Option<&StorageServerSummary> { if self.score <= IGNORE_PEER_THRESHOLD { None } else { @@ -108,26 +106,26 @@ impl PeerState { /// advertisements and data-client internal metadata for scoring. #[derive(Clone, Debug)] pub(crate) struct PeerStates { - base_config: BaseConfig, - data_client_config: AptosDataClientConfig, - peer_to_state: HashMap, - in_flight_priority_polls: HashSet, // The priority peers with in-flight polls - in_flight_regular_polls: HashSet, // The regular peers with in-flight polls + base_config: Arc, + data_client_config: Arc, + peer_to_state: DashMap, + in_flight_priority_polls: DashSet, // The priority peers with in-flight polls + in_flight_regular_polls: DashSet, // The regular peers with in-flight polls peers_and_metadata: Arc, } impl PeerStates { pub fn new( - base_config: BaseConfig, - data_client_config: AptosDataClientConfig, + base_config: Arc, + data_client_config: Arc, peers_and_metadata: Arc, ) -> Self { Self { base_config, data_client_config, - peer_to_state: HashMap::new(), - in_flight_priority_polls: HashSet::new(), - in_flight_regular_polls: HashSet::new(), + peer_to_state: DashMap::new(), + in_flight_priority_polls: DashSet::new(), + in_flight_regular_polls: DashSet::new(), peers_and_metadata, } } @@ -137,6 +135,7 @@ impl PeerStates { pub fn can_service_request( &self, peer: &PeerNetworkId, + time_service: TimeService, request: &StorageServiceRequest, ) -> bool { // Storage services can always respond to data advertisement requests. @@ -149,46 +148,60 @@ impl PeerStates { } // Check if the peer can service the request - self.peer_to_state - .get(peer) - .and_then(PeerState::storage_summary_if_not_ignored) - .map(|summary| summary.can_service(&self.data_client_config, request)) - .unwrap_or(false) + if let Some(peer_state) = self.peer_to_state.get(peer) { + return match peer_state.get_storage_summary_if_not_ignored() { + Some(storage_summary) => { + storage_summary.can_service(&self.data_client_config, time_service, request) + }, + None => false, // The peer is temporarily ignored + }; + } + + // Otherwise, the request cannot be serviced + false } /// Updates the score of the peer according to a successful operation - pub fn update_score_success(&mut self, peer: PeerNetworkId) { - let old_score = self.peer_to_state.entry(peer).or_default().score; - self.peer_to_state - .entry(peer) - .or_default() - .update_score_success(); - let new_score = self.peer_to_state.entry(peer).or_default().score; - if old_score <= IGNORE_PEER_THRESHOLD && new_score > IGNORE_PEER_THRESHOLD { - info!( - (LogSchema::new(LogEntry::PeerStates) - .event(LogEvent::PeerNoLongerIgnored) - .message("Peer will no longer be ignored") - .peer(&peer)) - ); + pub fn update_score_success(&self, peer: PeerNetworkId) { + if let Some(mut entry) = self.peer_to_state.get_mut(&peer) { + // Get the peer's old score + let old_score = entry.score; + + // Update the peer's score with a successful operation + entry.update_score_success(); + + // Log if the peer is no longer ignored + let new_score = entry.score; + if old_score <= IGNORE_PEER_THRESHOLD && new_score > IGNORE_PEER_THRESHOLD { + info!( + (LogSchema::new(LogEntry::PeerStates) + .event(LogEvent::PeerNoLongerIgnored) + .message("Peer will no longer be ignored") + .peer(&peer)) + ); + } } } /// Updates the score of the peer according to an error - pub fn update_score_error(&mut self, peer: PeerNetworkId, error: ErrorType) { - let old_score = self.peer_to_state.entry(peer).or_default().score; - self.peer_to_state - .entry(peer) - .or_default() - .update_score_error(error); - let new_score = self.peer_to_state.entry(peer).or_default().score; - if old_score > IGNORE_PEER_THRESHOLD && new_score <= IGNORE_PEER_THRESHOLD { - info!( - (LogSchema::new(LogEntry::PeerStates) - .event(LogEvent::PeerIgnored) - .message("Peer will be ignored") - .peer(&peer)) - ); + pub fn update_score_error(&self, peer: PeerNetworkId, error: ErrorType) { + if let Some(mut entry) = self.peer_to_state.get_mut(&peer) { + // Get the peer's old score + let old_score = entry.score; + + // Update the peer's score with an error + entry.update_score_error(error); + + // Log if the peer is now ignored + let new_score = entry.score; + if old_score > IGNORE_PEER_THRESHOLD && new_score <= IGNORE_PEER_THRESHOLD { + info!( + (LogSchema::new(LogEntry::PeerStates) + .event(LogEvent::PeerIgnored) + .message("Peer will be ignored") + .peer(&peer)) + ); + } } } @@ -208,13 +221,13 @@ impl PeerStates { } /// Marks an in-flight request as started for the specified peer - pub fn new_in_flight_request(&mut self, peer: &PeerNetworkId) { + pub fn new_in_flight_request(&self, peer: &PeerNetworkId) { // Get the current in-flight polls let is_priority_peer = self.is_priority_peer(peer); let in_flight_polls = if is_priority_peer { - &mut self.in_flight_priority_polls + &self.in_flight_priority_polls } else { - &mut self.in_flight_regular_polls + &self.in_flight_regular_polls }; // Insert the new peer @@ -232,11 +245,12 @@ impl PeerStates { } /// Marks the pending in-flight request as complete for the specified peer - pub fn mark_in_flight_request_complete(&mut self, peer: &PeerNetworkId) { + pub fn mark_in_flight_request_complete(&self, peer: &PeerNetworkId) { // The priority of the peer might have changed since we // last polled it, so we attempt to remove it from both // the regular and priority in-flight requests. - if !self.in_flight_priority_polls.remove(peer) && !self.in_flight_regular_polls.remove(peer) + if self.in_flight_priority_polls.remove(peer).is_none() + && self.in_flight_regular_polls.remove(peer).is_none() { error!( (LogSchema::new(LogEntry::PeerStates) @@ -290,7 +304,7 @@ impl PeerStates { } /// Updates the storage summary for the given peer - pub fn update_summary(&mut self, peer: PeerNetworkId, summary: StorageServerSummary) { + pub fn update_summary(&self, peer: PeerNetworkId, summary: StorageServerSummary) { self.peer_to_state .entry(peer) .or_default() @@ -298,23 +312,27 @@ impl PeerStates { } /// Garbage collects the peer states to remove data for disconnected peers - pub fn garbage_collect_peer_states(&mut self, connected_peers: Vec) { + pub fn garbage_collect_peer_states(&self, connected_peers: Vec) { self.peer_to_state .retain(|peer_network_id, _| connected_peers.contains(peer_network_id)); } /// Calculates a global data summary using all known storage summaries - pub fn calculate_aggregate_summary(&self) -> GlobalDataSummary { - // Only include likely-not-malicious peers in the data summary aggregation - let summaries: Vec = self + pub fn calculate_global_data_summary(&self) -> GlobalDataSummary { + // Gather all storage summaries, but exclude peers that are ignored + let storage_summaries: Vec = self .peer_to_state - .values() - .filter_map(PeerState::storage_summary_if_not_ignored) - .cloned() + .iter() + .filter_map(|peer_state| { + peer_state + .value() + .get_storage_summary_if_not_ignored() + .cloned() + }) .collect(); // If we have no peers, return an empty global summary - if summaries.is_empty() { + if storage_summaries.is_empty() { return GlobalDataSummary::empty(); } @@ -324,7 +342,7 @@ impl PeerStates { let mut max_state_chunk_sizes = vec![]; let mut max_transaction_chunk_sizes = vec![]; let mut max_transaction_output_chunk_sizes = vec![]; - for summary in summaries { + for summary in storage_summaries { // Collect aggregate data advertisements if let Some(epoch_ending_ledger_infos) = summary.data_summary.epoch_ending_ledger_infos { @@ -373,7 +391,7 @@ impl PeerStates { #[cfg(test)] /// Returns a copy of the peer to states map for test purposes - pub fn get_peer_to_states(&self) -> HashMap { + pub fn get_peer_to_states(&self) -> DashMap { self.peer_to_state.clone() } } diff --git a/state-sync/aptos-data-client/src/poller.rs b/state-sync/aptos-data-client/src/poller.rs index 1618cc886fc8e..2d0d8cc185401 100644 --- a/state-sync/aptos-data-client/src/poller.rs +++ b/state-sync/aptos-data-client/src/poller.rs @@ -32,9 +32,9 @@ const REGULAR_PEER_SAMPLE_FREQ: u64 = 3; /// A poller for storage summaries that is responsible for periodically refreshing /// the view of advertised data in the network. pub struct DataSummaryPoller { - data_client_config: AptosDataClientConfig, // The configuration for the data client - data_client: AptosDataClient, // The data client through which to poll peers - poll_loop_interval: Duration, // The interval between polling loop executions + data_client_config: Arc, // The configuration for the data client + data_client: AptosDataClient, // The data client through which to poll peers + poll_loop_interval: Duration, // The interval between polling loop executions runtime: Option, // An optional runtime on which to spawn the poller threads storage: Arc, // The reader interface to storage time_service: TimeService, // The service to monitor elapsed time @@ -42,7 +42,7 @@ pub struct DataSummaryPoller { impl DataSummaryPoller { pub fn new( - data_client_config: AptosDataClientConfig, + data_client_config: Arc, data_client: AptosDataClient, poll_loop_interval: Duration, runtime: Option, @@ -63,7 +63,7 @@ impl DataSummaryPoller { pub async fn start_poller(self) { // Create and start the latency monitor start_latency_monitor( - self.data_client_config, + self.data_client_config.clone(), self.data_client.clone(), self.storage.clone(), self.time_service.clone(), @@ -248,7 +248,7 @@ pub(crate) fn poll_peer( /// Spawns the dedicated latency monitor fn start_latency_monitor( - data_client_config: AptosDataClientConfig, + data_client_config: Arc, data_client: AptosDataClient, storage: Arc, time_service: TimeService, diff --git a/state-sync/aptos-data-client/src/tests/mock.rs b/state-sync/aptos-data-client/src/tests/mock.rs index 9d8b20c35470f..6e931f99981d4 100644 --- a/state-sync/aptos-data-client/src/tests/mock.rs +++ b/state-sync/aptos-data-client/src/tests/mock.rs @@ -5,7 +5,7 @@ use crate::{ client::AptosDataClient, error::Result, global_summary::GlobalDataSummary, - interface::{AptosDataClientInterface, Response}, + interface::{AptosDataClientInterface, Response, SubscriptionRequestMetadata}, poller::DataSummaryPoller, }; use aptos_channels::{aptos_channel, message_queues::QueueStyle}; @@ -270,6 +270,26 @@ mock! { include_events: bool, request_timeout_ms: u64, ) -> Result>; + + async fn subscribe_to_transaction_outputs_with_proof( + &self, + subscription_request_metadata: SubscriptionRequestMetadata, + request_timeout_ms: u64, + ) -> Result>; + + async fn subscribe_to_transactions_with_proof( + &self, + subscription_request_metadata: SubscriptionRequestMetadata, + include_events: bool, + request_timeout_ms: u64, + ) -> Result>; + + async fn subscribe_to_transactions_or_outputs_with_proof( + &self, + subscription_request_metadata: SubscriptionRequestMetadata, + include_events: bool, + request_timeout_ms: u64, + ) -> Result>; } } diff --git a/state-sync/aptos-data-client/src/tests/priority.rs b/state-sync/aptos-data-client/src/tests/priority.rs index 007c7a8452c24..a84d0cf9764db 100644 --- a/state-sync/aptos-data-client/src/tests/priority.rs +++ b/state-sync/aptos-data-client/src/tests/priority.rs @@ -9,10 +9,17 @@ use aptos_config::{ config::{AptosDataClientConfig, BaseConfig, RoleType}, network_id::NetworkId, }; -use aptos_storage_service_types::requests::{ - DataRequest, NewTransactionOutputsWithProofRequest, NewTransactionsWithProofRequest, - StorageServiceRequest, TransactionOutputsWithProofRequest, +use aptos_storage_service_types::{ + requests::{ + DataRequest, NewTransactionOutputsWithProofRequest, + NewTransactionsOrOutputsWithProofRequest, NewTransactionsWithProofRequest, + StorageServiceRequest, SubscribeTransactionOutputsWithProofRequest, + SubscribeTransactionsOrOutputsWithProofRequest, SubscribeTransactionsWithProofRequest, + SubscriptionStreamMetadata, TransactionOutputsWithProofRequest, + }, + responses::NUM_MICROSECONDS_IN_SECOND, }; +use aptos_time_service::TimeServiceTrait; use claims::assert_matches; #[tokio::test] @@ -146,31 +153,21 @@ async fn prioritized_peer_request_selection() { async fn prioritized_peer_optimistic_fetch_selection() { ::aptos_logger::Logger::init_for_testing(); - // Create a data client with a max version lag of 100 - let max_optimistic_fetch_version_lag = 100; + // Create a data client with a max lag of 100 + let max_optimistic_fetch_lag_secs = 100; let data_client_config = AptosDataClientConfig { - max_optimistic_fetch_version_lag, + max_optimistic_fetch_lag_secs, ..Default::default() }; - let (mut mock_network, _, client, _) = MockNetwork::new(None, Some(data_client_config), None); + let (mut mock_network, time_service, client, _) = + MockNetwork::new(None, Some(data_client_config), None); // Create test data let known_version = 10000000; let known_epoch = 10; - // Ensure the properties hold for both optimistic fetch requests - let new_transactions_request = - DataRequest::GetNewTransactionsWithProof(NewTransactionsWithProofRequest { - known_version, - known_epoch, - include_events: false, - }); - let new_outputs_request = - DataRequest::GetNewTransactionOutputsWithProof(NewTransactionOutputsWithProofRequest { - known_version, - known_epoch, - }); - for data_request in [new_transactions_request, new_outputs_request] { + // Ensure the properties hold for all optimistic fetch requests + for data_request in enumerate_optimistic_fetch_requests(known_version, known_epoch) { let storage_request = StorageServiceRequest::new(data_request, true); // Ensure no peers can service the request (we have no connections) @@ -187,13 +184,17 @@ async fn prioritized_peer_optimistic_fetch_selection() { ); // Advertise the data for the regular peer and verify it is now selected - client.update_summary(regular_peer_1, utils::create_storage_summary(known_version)); + let timestamp_usecs = time_service.now_unix_time().as_micros() as u64; + client.update_summary( + regular_peer_1, + utils::create_storage_summary_with_timestamp(known_version, timestamp_usecs), + ); assert_eq!( client.choose_peer_for_request(&storage_request), Ok(regular_peer_1) ); - // Add a priority peer and verify the regular peer is selected + // Add a priority peer and verify the regular peer is still selected let priority_peer_1 = mock_network.add_peer(true); assert_eq!( client.choose_peer_for_request(&storage_request), @@ -203,52 +204,326 @@ async fn prioritized_peer_optimistic_fetch_selection() { // Advertise the data for the priority peer and verify it is now selected client.update_summary( priority_peer_1, - utils::create_storage_summary(known_version), + utils::create_storage_summary_with_timestamp(known_version, timestamp_usecs), ); assert_eq!( client.choose_peer_for_request(&storage_request), Ok(priority_peer_1) ); - // Update the priority peer to be too far behind and verify it is not selected + // Elapse enough time for both peers to be too far behind + time_service + .clone() + .advance_secs(max_optimistic_fetch_lag_secs + 1); + + // Verify neither peer is now selected + assert_matches!( + client.choose_peer_for_request(&storage_request), + Err(Error::DataIsUnavailable(_)) + ); + + // Update the regular peer to be up-to-date and verify it is now chosen + let timestamp_usecs = time_service.now_unix_time().as_micros() as u64; + let regular_peer_timestamp_usecs = + timestamp_usecs - ((max_optimistic_fetch_lag_secs / 2) * NUM_MICROSECONDS_IN_SECOND); + client.update_summary( + regular_peer_1, + utils::create_storage_summary_with_timestamp( + known_version, + regular_peer_timestamp_usecs, + ), + ); + assert_eq!( + client.choose_peer_for_request(&storage_request), + Ok(regular_peer_1) + ); + + // Update the priority peer to be up-to-date and verify it is now chosen + let priority_peer_timestamp_usecs = + timestamp_usecs - ((max_optimistic_fetch_lag_secs / 2) * NUM_MICROSECONDS_IN_SECOND); client.update_summary( priority_peer_1, - utils::create_storage_summary(known_version - max_optimistic_fetch_version_lag), + utils::create_storage_summary_with_timestamp( + known_version, + priority_peer_timestamp_usecs, + ), + ); + assert_eq!( + client.choose_peer_for_request(&storage_request), + Ok(priority_peer_1) ); + + // Disconnect the priority peer and verify the regular peer is selected + mock_network.disconnect_peer(priority_peer_1); assert_eq!( client.choose_peer_for_request(&storage_request), Ok(regular_peer_1) ); - // Update the regular peer to be too far behind and verify neither is selected + // Elapse enough time for the regular peer to be too far behind + time_service + .clone() + .advance_secs(max_optimistic_fetch_lag_secs); + + // Verify neither peer is now select + assert_matches!( + client.choose_peer_for_request(&storage_request), + Err(Error::DataIsUnavailable(_)) + ); + + // Disconnect the regular peer so that we no longer have any connections + mock_network.disconnect_peer(regular_peer_1); + } +} + +#[tokio::test] +async fn prioritized_peer_subscription_requests() { + ::aptos_logger::Logger::init_for_testing(); + + // Create a data client with a max lag of 10 + let max_subscription_lag_secs = 10; + let data_client_config = AptosDataClientConfig { + max_subscription_lag_secs, + ..Default::default() + }; + let (mut mock_network, time_service, client, _) = + MockNetwork::new(None, Some(data_client_config), None); + + // Create test data + let known_version = 1000; + let known_epoch = 5; + + // Ensure the properties hold for all subscription requests + for data_request in enumerate_subscription_requests(known_version, known_epoch) { + let storage_request = StorageServiceRequest::new(data_request, true); + + // Ensure no peers can service the request (we have no connections) + assert_matches!( + client.choose_peer_for_request(&storage_request), + Err(Error::DataIsUnavailable(_)) + ); + + // Add two priority peers and a regular peer + let priority_peer_1 = mock_network.add_peer(true); + let priority_peer_2 = mock_network.add_peer(true); + let regular_peer_1 = mock_network.add_peer(false); + + // Verify no peers can service the request (no peers are advertising data) + assert_matches!( + client.choose_peer_for_request(&storage_request), + Err(Error::DataIsUnavailable(_)) + ); + + // Advertise the data for all peers + let timestamp_usecs = time_service.now_unix_time().as_micros() as u64; + for peer in [priority_peer_1, priority_peer_2, regular_peer_1] { + client.update_summary( + peer, + utils::create_storage_summary_with_timestamp(known_version, timestamp_usecs), + ); + } + + // Verify a priority peer is selected + let selected_peer = client.choose_peer_for_request(&storage_request).unwrap(); + assert!(selected_peer == priority_peer_1 || selected_peer == priority_peer_2); + + // Make several more requests and verify the same priority peer is selected + for _ in 0..10 { + let current_selected_peer = client.choose_peer_for_request(&storage_request).unwrap(); + assert_eq!(selected_peer, current_selected_peer); + } + + // Elapse enough time for all peers to be too far behind + time_service + .clone() + .advance_secs(max_subscription_lag_secs + 1); + + // Advertise new data for all peers (except the selected peer) + let timestamp_usecs = time_service.now_unix_time().as_micros() as u64; + for peer in [priority_peer_1, priority_peer_2, regular_peer_1] { + if peer != selected_peer { + client.update_summary( + peer, + utils::create_storage_summary_with_timestamp(known_version, timestamp_usecs), + ); + } + } + + // Verify no peers can service the request (because the + // previously selected peer is still too far behind). + assert_matches!( + client.choose_peer_for_request(&storage_request), + Err(Error::DataIsUnavailable(_)) + ); + + // Verify the other priority peer is now select (as the + // previous request will terminate the subscription). + let next_selected_peer = client.choose_peer_for_request(&storage_request).unwrap(); + assert!(selected_peer != next_selected_peer); + assert!(selected_peer == priority_peer_1 || selected_peer == priority_peer_2); + + // Update the request's subscription ID and verify the other priority peer is selected + let storage_request = update_subscription_request_id(&storage_request); + let next_selected_peer = client.choose_peer_for_request(&storage_request).unwrap(); + assert!(selected_peer != next_selected_peer); + assert!(next_selected_peer == priority_peer_1 || next_selected_peer == priority_peer_2); + + // Make several more requests and verify the same priority peer is selected + for _ in 0..10 { + let current_select_peer = client.choose_peer_for_request(&storage_request).unwrap(); + assert_eq!(current_select_peer, next_selected_peer); + } + + // Disconnect all peers and verify no peers can service the request + for peer in [priority_peer_1, priority_peer_2, regular_peer_1] { + mock_network.disconnect_peer(peer); + } + assert_matches!( + client.choose_peer_for_request(&storage_request), + Err(Error::DataIsUnavailable(_)) + ); + } +} + +#[tokio::test] +async fn prioritized_peer_subscription_selection() { + ::aptos_logger::Logger::init_for_testing(); + + // Create a data client with a max lag of 100 + let max_subscription_lag_secs = 100; + let data_client_config = AptosDataClientConfig { + max_subscription_lag_secs, + ..Default::default() + }; + let (mut mock_network, time_service, client, _) = + MockNetwork::new(None, Some(data_client_config), None); + + // Create test data + let known_version = 10000000; + let known_epoch = 10; + + // Ensure the properties hold for all subscription requests + for data_request in enumerate_subscription_requests(known_version, known_epoch) { + let storage_request = StorageServiceRequest::new(data_request, true); + + // Ensure no peers can service the request (we have no connections) + assert_matches!( + client.choose_peer_for_request(&storage_request), + Err(Error::DataIsUnavailable(_)) + ); + + // Add a regular peer and verify the peer cannot support the request + let regular_peer_1 = mock_network.add_peer(false); + assert_matches!( + client.choose_peer_for_request(&storage_request), + Err(Error::DataIsUnavailable(_)) + ); + + // Advertise the data for the regular peer and verify it is now selected + let timestamp_usecs = time_service.now_unix_time().as_micros() as u64; client.update_summary( regular_peer_1, - utils::create_storage_summary(known_version - (max_optimistic_fetch_version_lag * 2)), + utils::create_storage_summary_with_timestamp(known_version, timestamp_usecs), + ); + assert_eq!( + client.choose_peer_for_request(&storage_request), + Ok(regular_peer_1) + ); + + // Add a priority peer and verify the regular peer is still selected + let priority_peer_1 = mock_network.add_peer(true); + assert_eq!( + client.choose_peer_for_request(&storage_request), + Ok(regular_peer_1) + ); + + // Advertise the data for the priority peer and verify it is not selected + // (the previous subscription request went to the regular peer). + client.update_summary( + priority_peer_1, + utils::create_storage_summary_with_timestamp(known_version, timestamp_usecs), ); assert_matches!( client.choose_peer_for_request(&storage_request), Err(Error::DataIsUnavailable(_)) ); - // Disconnect the regular peer and verify neither is selected - mock_network.disconnect_peer(regular_peer_1); + // Update the request's subscription ID and verify it now goes to the priority peer + let storage_request = update_subscription_request_id(&storage_request); + assert_eq!( + client.choose_peer_for_request(&storage_request), + Ok(priority_peer_1) + ); + + // Elapse enough time for both peers to be too far behind + time_service + .clone() + .advance_secs(max_subscription_lag_secs + 1); + + // Verify neither peer is now selected assert_matches!( client.choose_peer_for_request(&storage_request), Err(Error::DataIsUnavailable(_)) ); - // Advertise the data for the priority peer and verify it is now selected again + // Update the request's subscription ID + let storage_request = update_subscription_request_id(&storage_request); + + // Update the regular peer to be up-to-date and verify it is now chosen + let timestamp_usecs = time_service.now_unix_time().as_micros() as u64; + let regular_peer_timestamp_usecs = + timestamp_usecs - ((max_subscription_lag_secs / 2) * NUM_MICROSECONDS_IN_SECOND); + client.update_summary( + regular_peer_1, + utils::create_storage_summary_with_timestamp( + known_version, + regular_peer_timestamp_usecs, + ), + ); + assert_eq!( + client.choose_peer_for_request(&storage_request), + Ok(regular_peer_1) + ); + + // Update the request's subscription ID + let storage_request = update_subscription_request_id(&storage_request); + + // Update the priority peer to be up-to-date and verify it is now chosen + let priority_peer_timestamp_usecs = + timestamp_usecs - ((max_subscription_lag_secs / 2) * NUM_MICROSECONDS_IN_SECOND); client.update_summary( priority_peer_1, - utils::create_storage_summary(known_version + 1000), + utils::create_storage_summary_with_timestamp( + known_version, + priority_peer_timestamp_usecs, + ), ); assert_eq!( client.choose_peer_for_request(&storage_request), Ok(priority_peer_1) ); - // Disconnect the priority peer so that we no longer have any connections + // Update the request's subscription ID + let storage_request = update_subscription_request_id(&storage_request); + + // Disconnect the priority peer and verify the regular peer is selected mock_network.disconnect_peer(priority_peer_1); + assert_eq!( + client.choose_peer_for_request(&storage_request), + Ok(regular_peer_1) + ); + + // Elapse enough time for the regular peer to be too far behind + time_service.clone().advance_secs(max_subscription_lag_secs); + + // Verify neither peer is now select + assert_matches!( + client.choose_peer_for_request(&storage_request), + Err(Error::DataIsUnavailable(_)) + ); + + // Disconnect the regular peer so that we no longer have any connections + mock_network.disconnect_peer(regular_peer_1); } } @@ -324,3 +599,106 @@ async fn pfn_peer_prioritization() { assert_eq!(priority_peers, vec![outbound_peer]); assert_eq!(regular_peers, vec![inbound_peer]); } + +/// Enumerates all optimistic fetch request types +fn enumerate_optimistic_fetch_requests(known_version: u64, known_epoch: u64) -> Vec { + // Create all optimistic fetch requests + let new_transactions_request = + DataRequest::GetNewTransactionsWithProof(NewTransactionsWithProofRequest { + known_version, + known_epoch, + include_events: false, + }); + let new_outputs_requests = + DataRequest::GetNewTransactionOutputsWithProof(NewTransactionOutputsWithProofRequest { + known_version, + known_epoch, + }); + let new_transactions_or_outputs_request = DataRequest::GetNewTransactionsOrOutputsWithProof( + NewTransactionsOrOutputsWithProofRequest { + known_version, + known_epoch, + include_events: false, + max_num_output_reductions: 0, + }, + ); + + // Return all optimistic fetch requests + vec![ + new_transactions_request, + new_outputs_requests, + new_transactions_or_outputs_request, + ] +} + +/// Enumerates all subscription request types +fn enumerate_subscription_requests(known_version: u64, known_epoch: u64) -> Vec { + // Create all subscription requests + let subscribe_transactions_request = + DataRequest::SubscribeTransactionsWithProof(SubscribeTransactionsWithProofRequest { + subscription_stream_metadata: SubscriptionStreamMetadata { + known_version_at_stream_start: known_version, + known_epoch_at_stream_start: known_epoch, + subscription_stream_id: 100, + }, + subscription_stream_index: 0, + include_events: false, + }); + let subscribe_outputs_request = DataRequest::SubscribeTransactionOutputsWithProof( + SubscribeTransactionOutputsWithProofRequest { + subscription_stream_metadata: SubscriptionStreamMetadata { + known_version_at_stream_start: known_version, + known_epoch_at_stream_start: known_epoch, + subscription_stream_id: 200, + }, + subscription_stream_index: 0, + }, + ); + let subscribe_transactions_or_outputs_request = + DataRequest::SubscribeTransactionsOrOutputsWithProof( + SubscribeTransactionsOrOutputsWithProofRequest { + subscription_stream_metadata: SubscriptionStreamMetadata { + known_version_at_stream_start: known_version, + known_epoch_at_stream_start: known_epoch, + subscription_stream_id: 300, + }, + subscription_stream_index: 0, + include_events: false, + max_num_output_reductions: 0, + }, + ); + + // Return all subscription requests + vec![ + subscribe_transactions_request, + subscribe_outputs_request, + subscribe_transactions_or_outputs_request, + ] +} + +/// Updates the subscription request ID in the given storage request +/// and returns the updated storage request. +fn update_subscription_request_id( + storage_service_request: &StorageServiceRequest, +) -> StorageServiceRequest { + let mut storage_service_request = storage_service_request.clone(); + + // Update the subscription's request ID + match &mut storage_service_request.data_request { + DataRequest::SubscribeTransactionsWithProof(request) => { + request.subscription_stream_metadata.subscription_stream_id += 1 + }, + DataRequest::SubscribeTransactionOutputsWithProof(request) => { + request.subscription_stream_metadata.subscription_stream_id += 1 + }, + DataRequest::SubscribeTransactionsOrOutputsWithProof(request) => { + request.subscription_stream_metadata.subscription_stream_id += 1 + }, + _ => panic!( + "Unexpected subscription request type! {:?}", + storage_service_request + ), + } + + storage_service_request +} diff --git a/state-sync/aptos-data-client/src/tests/utils.rs b/state-sync/aptos-data-client/src/tests/utils.rs index cef6e82104050..852183d7b980b 100644 --- a/state-sync/aptos-data-client/src/tests/utils.rs +++ b/state-sync/aptos-data-client/src/tests/utils.rs @@ -12,19 +12,35 @@ use aptos_types::{ transaction::Version, }; -/// Creates a test ledger info at the given version -fn create_ledger_info(version: Version) -> LedgerInfoWithSignatures { +/// Creates a test ledger info at the given version and timestamp +fn create_ledger_info(version: Version, timestamp_usecs: u64) -> LedgerInfoWithSignatures { LedgerInfoWithSignatures::new( LedgerInfo::new( - BlockInfo::new(0, 0, HashValue::zero(), HashValue::zero(), version, 0, None), + BlockInfo::new( + 0, + 0, + HashValue::zero(), + HashValue::zero(), + version, + timestamp_usecs, + None, + ), HashValue::zero(), ), AggregateSignature::empty(), ) } -/// Creates a test storage server summary at the given version +/// Creates a test storage server summary at the given version and timestamp pub fn create_storage_summary(version: Version) -> StorageServerSummary { + create_storage_summary_with_timestamp(version, 0) +} + +/// Creates a test storage server summary at the given version and timestamp +pub fn create_storage_summary_with_timestamp( + version: Version, + timestamp_usecs: u64, +) -> StorageServerSummary { StorageServerSummary { protocol_metadata: ProtocolMetadata { max_epoch_chunk_size: 1000, @@ -33,7 +49,7 @@ pub fn create_storage_summary(version: Version) -> StorageServerSummary { max_transaction_output_chunk_size: 1000, }, data_summary: DataSummary { - synced_ledger_info: Some(create_ledger_info(version)), + synced_ledger_info: Some(create_ledger_info(version, timestamp_usecs)), epoch_ending_ledger_infos: None, transactions: Some(CompleteDataRange::new(0, version).unwrap()), transaction_outputs: Some(CompleteDataRange::new(0, version).unwrap()), diff --git a/state-sync/state-sync-v2/data-streaming-service/Cargo.toml b/state-sync/data-streaming-service/Cargo.toml similarity index 100% rename from state-sync/state-sync-v2/data-streaming-service/Cargo.toml rename to state-sync/data-streaming-service/Cargo.toml diff --git a/state-sync/state-sync-v2/data-streaming-service/src/data_notification.rs b/state-sync/data-streaming-service/src/data_notification.rs similarity index 71% rename from state-sync/state-sync-v2/data-streaming-service/src/data_notification.rs rename to state-sync/data-streaming-service/src/data_notification.rs index 383d77a76a68f..c867e14204f23 100644 --- a/state-sync/state-sync-v2/data-streaming-service/src/data_notification.rs +++ b/state-sync/data-streaming-service/src/data_notification.rs @@ -23,7 +23,7 @@ pub struct DataNotification { /// A single payload (e.g. chunk) of data delivered to a data listener. #[allow(clippy::large_enum_variant)] -#[derive(Clone, Debug)] +#[derive(Clone, Debug, Eq, PartialEq)] pub enum DataPayload { ContinuousTransactionOutputsWithProof(LedgerInfoWithSignatures, TransactionOutputListWithProof), ContinuousTransactionsWithProof(LedgerInfoWithSignatures, TransactionListWithProof), @@ -46,6 +46,9 @@ pub enum DataClientRequest { TransactionOutputsWithProof(TransactionOutputsWithProofRequest), NewTransactionsOrOutputsWithProof(NewTransactionsOrOutputsWithProofRequest), TransactionsOrOutputsWithProof(TransactionsOrOutputsWithProofRequest), + SubscribeTransactionsWithProof(SubscribeTransactionsWithProofRequest), + SubscribeTransactionOutputsWithProof(SubscribeTransactionOutputsWithProofRequest), + SubscribeTransactionsOrOutputsWithProof(SubscribeTransactionsOrOutputsWithProofRequest), } impl DataClientRequest { @@ -61,6 +64,13 @@ impl DataClientRequest { Self::TransactionOutputsWithProof(_) => "transaction_outputs_with_proof", Self::NewTransactionsOrOutputsWithProof(_) => "new_transactions_or_outputs_with_proof", Self::TransactionsOrOutputsWithProof(_) => "transactions_or_outputs_with_proof", + Self::SubscribeTransactionsWithProof(_) => "subscribe_transactions_with_proof", + Self::SubscribeTransactionOutputsWithProof(_) => { + "subscribe_transaction_outputs_with_proof" + }, + Self::SubscribeTransactionsOrOutputsWithProof(_) => { + "subscribe_transactions_or_outputs_with_proof" + }, } } } @@ -109,6 +119,35 @@ pub struct NumberOfStatesRequest { pub version: Version, } +/// A client request for subscribing to transactions with proofs. +#[derive(Clone, Debug, Eq, PartialEq)] +pub struct SubscribeTransactionsWithProofRequest { + pub known_version: Version, + pub known_epoch: Epoch, + pub include_events: bool, + pub subscription_stream_id: u64, + pub subscription_stream_index: u64, +} + +/// A client request for subscribing to transaction outputs with proofs. +#[derive(Clone, Debug, Eq, PartialEq)] +pub struct SubscribeTransactionOutputsWithProofRequest { + pub known_version: Version, + pub known_epoch: Epoch, + pub subscription_stream_id: u64, + pub subscription_stream_index: u64, +} + +/// A client request for subscribing to transactions or outputs with proofs. +#[derive(Clone, Debug, Eq, PartialEq)] +pub struct SubscribeTransactionsOrOutputsWithProofRequest { + pub known_version: Version, + pub known_epoch: Epoch, + pub include_events: bool, + pub subscription_stream_id: u64, + pub subscription_stream_index: u64, +} + /// A client request for fetching transactions with proofs. #[derive(Clone, Debug, Eq, PartialEq)] pub struct TransactionsWithProofRequest { @@ -142,6 +181,27 @@ pub struct PendingClientResponse { pub client_response: Option, aptos_data_client::error::Error>>, } +impl PendingClientResponse { + pub fn new(client_request: DataClientRequest) -> Self { + Self { + client_request, + client_response: None, + } + } + + #[cfg(test)] + /// Creates a new pending client response with a response already available + pub fn new_with_response( + client_request: DataClientRequest, + client_response: Result, aptos_data_client::error::Error>, + ) -> Self { + Self { + client_request, + client_response: Some(client_response), + } + } +} + impl Debug for PendingClientResponse { fn fmt(&self, f: &mut Formatter) -> std::fmt::Result { write!( diff --git a/state-sync/state-sync-v2/data-streaming-service/src/data_stream.rs b/state-sync/data-streaming-service/src/data_stream.rs similarity index 63% rename from state-sync/state-sync-v2/data-streaming-service/src/data_stream.rs rename to state-sync/data-streaming-service/src/data_stream.rs index c24c8860e9eda..dadfb508c69ba 100644 --- a/state-sync/state-sync-v2/data-streaming-service/src/data_stream.rs +++ b/state-sync/data-streaming-service/src/data_stream.rs @@ -8,13 +8,15 @@ use crate::{ DataClientRequest, DataNotification, DataPayload, EpochEndingLedgerInfosRequest, NewTransactionOutputsWithProofRequest, NewTransactionsOrOutputsWithProofRequest, NewTransactionsWithProofRequest, NotificationId, NumberOfStatesRequest, - StateValuesWithProofRequest, TransactionOutputsWithProofRequest, - TransactionsOrOutputsWithProofRequest, TransactionsWithProofRequest, + StateValuesWithProofRequest, SubscribeTransactionOutputsWithProofRequest, + SubscribeTransactionsOrOutputsWithProofRequest, SubscribeTransactionsWithProofRequest, + TransactionOutputsWithProofRequest, TransactionsOrOutputsWithProofRequest, + TransactionsWithProofRequest, }, error::Error, logging::{LogEntry, LogEvent, LogSchema}, metrics, - metrics::{increment_counter, increment_counter_multiple, start_timer}, + metrics::{increment_counter, increment_counter_multiple_labels, start_timer}, stream_engine::{DataStreamEngine, StreamEngine}, streaming_client::{NotificationFeedback, StreamRequest}, }; @@ -23,6 +25,7 @@ use aptos_data_client::{ global_summary::{AdvertisedData, GlobalDataSummary}, interface::{ AptosDataClientInterface, Response, ResponseContext, ResponseError, ResponsePayload, + SubscriptionRequestMetadata, }, }; use aptos_id_generator::{IdGenerator, U64IdGenerator}; @@ -121,7 +124,7 @@ impl DataStream { let data_stream_listener = DataStreamListener::new(data_stream_id, notification_receiver); // Create a new stream engine - let stream_engine = StreamEngine::new(stream_request, advertised_data)?; + let stream_engine = StreamEngine::new(data_stream_config, stream_request, advertised_data)?; // Create a new data stream let data_stream = Self { @@ -143,6 +146,17 @@ impl DataStream { Ok((data_stream, data_stream_listener)) } + /// Clears the sent data requests queue and drops all tasks + pub fn clear_sent_data_requests_queue(&mut self) { + // Clear all pending data requests + if let Some(sent_data_requests) = self.sent_data_requests.as_mut() { + sent_data_requests.clear(); + } + + // Abort all spawned tasks + self.abort_spawned_tasks(); + } + /// Returns true iff the first batch of data client requests has been sent pub fn data_requests_initialized(&self) -> bool { self.sent_data_requests.is_some() @@ -232,9 +246,11 @@ impl DataStream { // Send the client requests if max_num_requests_to_send > 0 { - let client_requests = self - .stream_engine - .create_data_client_requests(max_num_requests_to_send, global_data_summary)?; + let client_requests = self.stream_engine.create_data_client_requests( + max_num_requests_to_send, + global_data_summary, + self.notification_id_generator.clone(), + )?; for client_request in &client_requests { // Send the client request let pending_client_response = @@ -275,16 +291,15 @@ impl DataStream { ) -> PendingClientResponse { // Create a new pending client response let pending_client_response = Arc::new(Mutex::new(Box::new( - data_notification::PendingClientResponse { - client_request: data_client_request.clone(), - client_response: None, - }, + data_notification::PendingClientResponse::new(data_client_request.clone()), ))); // Calculate the request timeout to use, based on the // request type and the number of previous failures. let request_timeout_ms = if is_optimistic_fetch_request(&data_client_request) { self.data_client_config.optimistic_fetch_timeout_ms + } else if is_subscription_request(&data_client_request) { + self.data_client_config.subscription_response_timeout_ms } else if !request_retry { self.data_client_config.response_timeout_ms } else { @@ -299,7 +314,7 @@ impl DataStream { ); // Update the retry counter and log the request - increment_counter_multiple( + increment_counter_multiple_labels( &metrics::RETRIED_DATA_REQUESTS, data_client_request.get_label(), &request_timeout_ms.to_string(), @@ -394,43 +409,72 @@ impl DataStream { // Process any ready data responses for _ in 0..self.get_max_concurrent_requests() { if let Some(pending_response) = self.pop_pending_response_queue()? { + // Get the client request and response information let maybe_client_response = pending_response.lock().client_response.take(); let client_response = maybe_client_response.ok_or_else(|| { Error::UnexpectedErrorEncountered("The client response should be ready!".into()) })?; let client_request = &pending_response.lock().client_request.clone(); + // Process the client response match client_response { Ok(client_response) => { - if sanity_check_client_response(client_request, &client_response) { + // Sanity check and process the response + if sanity_check_client_response_type(client_request, &client_response) { + // If the response wasn't enough to satisfy the original request (e.g., + // it was truncated), missing data should be requested. + let missing_data_requested = + self.request_missing_data(client_request, &client_response.payload); + + // Send the data notification to the client self.send_data_notification_to_client(client_request, client_response) .await?; + + // Check if any missing data was requested + match missing_data_requested { + Ok(missing_data_requested) => { + if missing_data_requested { + break; // We're now head of line blocked on the missing data + } + }, + Err(error) => { + warn!(LogSchema::new(LogEntry::ReceivedDataResponse) + .stream_id(self.data_stream_id) + .event(LogEvent::Error) + .error(&error) + .message( + "Failed to determine if missing data was requested!" + )); + }, + } } else { + // The sanity check failed self.handle_sanity_check_failure( client_request, &client_response.context, )?; - break; + break; // We're now head of line blocked on the failed request } }, Err(error) => { - // If the error was a timeout and the request was an optimistic fetch - // we need to notify the stream engine and not retry the request. - if matches!( - error, - aptos_data_client::error::Error::TimeoutWaitingForResponse(_) - ) && is_optimistic_fetch_request(client_request) + // Handle the error depending on the request type + if is_subscription_request(client_request) + || is_optimistic_fetch_request(client_request) { + // The request was for new data. We should notify the + // stream engine and clear the requests queue. self.stream_engine - .notify_optimistic_fetch_timeout(client_request)?; + .notify_new_data_request_error(client_request, error)?; + self.clear_sent_data_requests_queue(); } else { + // Otherwise, we should handle the error and simply retry self.handle_data_client_error(client_request, &error)?; - }; - break; + } + break; // We're now head of line blocked on the failed request }, } } else { - break; // The first response hasn't arrived yet. + break; // The first response hasn't arrived yet } } @@ -439,6 +483,37 @@ impl DataStream { self.create_and_send_client_requests(&global_data_summary) } + /// Requests any missing data from the previous client response + /// and returns true iff missing data was requested. + fn request_missing_data( + &mut self, + data_client_request: &DataClientRequest, + response_payload: &ResponsePayload, + ) -> Result { + // Identify if any missing data needs to be requested + if let Some(missing_data_request) = + create_missing_data_request(data_client_request, response_payload)? + { + // Increment the missing client request counter + increment_counter( + &metrics::SENT_DATA_REQUESTS_FOR_MISSING_DATA, + data_client_request.get_label(), + ); + + // Send the missing data request + let pending_client_response = + self.send_client_request(false, missing_data_request.clone()); + + // Push the pending response to the front of the queue + self.get_sent_data_requests()? + .push_front(pending_client_response); + + return Ok(true); // Missing data was requested + } + + Ok(false) // No missing data was requested + } + /// Pops and returns the first pending client response if the response has /// been received. Returns `None` otherwise. fn pop_pending_response_queue(&mut self) -> Result, Error> { @@ -668,6 +743,14 @@ impl DataStream { impl Drop for DataStream { /// Terminates the stream by aborting all spawned tasks fn drop(&mut self) { + self.abort_spawned_tasks(); + } +} + +impl DataStream { + /// Aborts all currently spawned tasks. This is useful if the stream is + /// terminated prematurely, or if the sent data requests are cleared. + fn abort_spawned_tasks(&mut self) { for spawned_task in &self.spawned_tasks { spawned_task.abort(); } @@ -711,9 +794,271 @@ impl FusedStream for DataStreamListener { } } -/// Returns true iff the data client response payload matches the expected type -/// of the original request. No other sanity checks are done. -fn sanity_check_client_response( +/// Creates and returns a missing data request if the given client response +/// doesn't satisfy the original request. If the request is satisfied, +/// None is returned. +pub(crate) fn create_missing_data_request( + data_client_request: &DataClientRequest, + response_payload: &ResponsePayload, +) -> Result, Error> { + // Determine if the request was satisfied, and if not, create + // a missing data request to satisfy the original request. + match data_client_request { + DataClientRequest::EpochEndingLedgerInfos(request) => { + create_missing_epoch_ending_ledger_infos_request(request, response_payload) + }, + DataClientRequest::StateValuesWithProof(request) => { + create_missing_state_values_request(request, response_payload) + }, + DataClientRequest::TransactionsWithProof(request) => { + create_missing_transactions_request(request, response_payload) + }, + DataClientRequest::TransactionOutputsWithProof(request) => { + create_missing_transaction_outputs_request(request, response_payload) + }, + DataClientRequest::TransactionsOrOutputsWithProof(request) => { + create_missing_transactions_or_outputs_request(request, response_payload) + }, + _ => Ok(None), // The request was trivially satisfied (based on the type) + } +} + +/// Creates and returns a missing epoch ending ledger info request if the +/// given client response doesn't satisfy the original request. If the request +/// is satisfied, None is returned. +fn create_missing_epoch_ending_ledger_infos_request( + request: &EpochEndingLedgerInfosRequest, + response_payload: &ResponsePayload, +) -> Result, Error> { + // Determine the number of requested ledger infos + let num_requested_ledger_infos = request + .end_epoch + .checked_sub(request.start_epoch) + .and_then(|v| v.checked_add(1)) + .ok_or_else(|| { + Error::IntegerOverflow("Number of requested ledger infos has overflown!".into()) + })?; + + // Identify the missing data if the request was not satisfied + match response_payload { + ResponsePayload::EpochEndingLedgerInfos(ledger_infos) => { + // Check if the request was satisfied + let num_received_ledger_infos = ledger_infos.len() as u64; + if num_received_ledger_infos < num_requested_ledger_infos { + let start_epoch = request + .start_epoch + .checked_add(num_received_ledger_infos) + .ok_or_else(|| Error::IntegerOverflow("Start epoch has overflown!".into()))?; + Ok(Some(DataClientRequest::EpochEndingLedgerInfos( + EpochEndingLedgerInfosRequest { + start_epoch, + end_epoch: request.end_epoch, + }, + ))) + } else { + Ok(None) // The request was satisfied! + } + }, + payload => Err(Error::AptosDataClientResponseIsInvalid(format!( + "Invalid response payload found for epoch ending ledger info request: {:?}", + payload + ))), + } +} + +/// Creates and returns a missing state values request if the given client +/// response doesn't satisfy the original request. If the request is satisfied, +/// None is returned. +fn create_missing_state_values_request( + request: &StateValuesWithProofRequest, + response_payload: &ResponsePayload, +) -> Result, Error> { + // Determine the number of requested state values + let num_requested_state_values = request + .end_index + .checked_sub(request.start_index) + .and_then(|v| v.checked_add(1)) + .ok_or_else(|| { + Error::IntegerOverflow("Number of requested state values has overflown!".into()) + })?; + + // Identify the missing data if the request was not satisfied + match response_payload { + ResponsePayload::StateValuesWithProof(state_values_with_proof) => { + // Check if the request was satisfied + let num_received_state_values = state_values_with_proof.raw_values.len() as u64; + if num_received_state_values < num_requested_state_values { + let start_index = request + .start_index + .checked_add(num_received_state_values) + .ok_or_else(|| Error::IntegerOverflow("Start index has overflown!".into()))?; + Ok(Some(DataClientRequest::StateValuesWithProof( + StateValuesWithProofRequest { + version: request.version, + start_index, + end_index: request.end_index, + }, + ))) + } else { + Ok(None) // The request was satisfied! + } + }, + payload => Err(Error::AptosDataClientResponseIsInvalid(format!( + "Invalid response payload found for state values request: {:?}", + payload + ))), + } +} + +/// Creates and returns a missing transactions request if the given client +/// response doesn't satisfy the original request. If the request is satisfied, +/// None is returned. +fn create_missing_transactions_request( + request: &TransactionsWithProofRequest, + response_payload: &ResponsePayload, +) -> Result, Error> { + // Determine the number of requested transactions + let num_requested_transactions = request + .end_version + .checked_sub(request.start_version) + .and_then(|v| v.checked_add(1)) + .ok_or_else(|| { + Error::IntegerOverflow("Number of requested transactions has overflown!".into()) + })?; + + // Identify the missing data if the request was not satisfied + match response_payload { + ResponsePayload::TransactionsWithProof(transactions_with_proof) => { + // Check if the request was satisfied + let num_received_transactions = transactions_with_proof.transactions.len() as u64; + if num_received_transactions < num_requested_transactions { + let start_version = request + .start_version + .checked_add(num_received_transactions) + .ok_or_else(|| Error::IntegerOverflow("Start version has overflown!".into()))?; + Ok(Some(DataClientRequest::TransactionsWithProof( + TransactionsWithProofRequest { + start_version, + end_version: request.end_version, + proof_version: request.proof_version, + include_events: request.include_events, + }, + ))) + } else { + Ok(None) // The request was satisfied! + } + }, + payload => Err(Error::AptosDataClientResponseIsInvalid(format!( + "Invalid response payload found for transactions request: {:?}", + payload + ))), + } +} + +/// Creates and returns a missing transaction outputs request if the given client +/// response doesn't satisfy the original request. If the request is satisfied, +/// None is returned. +fn create_missing_transaction_outputs_request( + request: &TransactionOutputsWithProofRequest, + response_payload: &ResponsePayload, +) -> Result, Error> { + // Determine the number of requested transaction outputs + let num_requested_outputs = request + .end_version + .checked_sub(request.start_version) + .and_then(|v| v.checked_add(1)) + .ok_or_else(|| { + Error::IntegerOverflow("Number of requested transaction outputs has overflown!".into()) + })?; + + // Identify the missing data if the request was not satisfied + match response_payload { + ResponsePayload::TransactionOutputsWithProof(transaction_outputs_with_proof) => { + // Check if the request was satisfied + let num_received_outputs = transaction_outputs_with_proof + .transactions_and_outputs + .len() as u64; + if num_received_outputs < num_requested_outputs { + let start_version = request + .start_version + .checked_add(num_received_outputs) + .ok_or_else(|| Error::IntegerOverflow("Start version has overflown!".into()))?; + Ok(Some(DataClientRequest::TransactionOutputsWithProof( + TransactionOutputsWithProofRequest { + start_version, + end_version: request.end_version, + proof_version: request.proof_version, + }, + ))) + } else { + Ok(None) // The request was satisfied! + } + }, + payload => Err(Error::AptosDataClientResponseIsInvalid(format!( + "Invalid response payload found for transaction outputs request: {:?}", + payload + ))), + } +} + +/// Creates and returns a missing transactions or outputs request if the +/// given client response doesn't satisfy the original request. If the request +/// is satisfied, None is returned. +fn create_missing_transactions_or_outputs_request( + request: &TransactionsOrOutputsWithProofRequest, + response_payload: &ResponsePayload, +) -> Result, Error> { + // Determine the number of requested transactions or outputs + let num_request_data_items = request + .end_version + .checked_sub(request.start_version) + .and_then(|v| v.checked_add(1)) + .ok_or_else(|| { + Error::IntegerOverflow( + "Number of requested transactions or outputs has overflown!".into(), + ) + })?; + + // Calculate the number of received data items + let num_received_data_items = match response_payload { + ResponsePayload::TransactionsWithProof(transactions_with_proof) => { + transactions_with_proof.transactions.len() as u64 + }, + ResponsePayload::TransactionOutputsWithProof(transaction_outputs_with_proof) => { + transaction_outputs_with_proof + .transactions_and_outputs + .len() as u64 + }, + payload => { + return Err(Error::AptosDataClientResponseIsInvalid(format!( + "Invalid response payload found for transactions or outputs request: {:?}", + payload + ))) + }, + }; + + // Identify the missing data if the request was not satisfied + if num_received_data_items < num_request_data_items { + let start_version = request + .start_version + .checked_add(num_received_data_items) + .ok_or_else(|| Error::IntegerOverflow("Start version has overflown!".into()))?; + Ok(Some(DataClientRequest::TransactionsOrOutputsWithProof( + TransactionsOrOutputsWithProofRequest { + start_version, + end_version: request.end_version, + proof_version: request.proof_version, + include_events: request.include_events, + }, + ))) + } else { + Ok(None) // The request was satisfied! + } +} + +/// Returns true iff the data client response payload type matches the +/// expected type of the original request. No other sanity checks are done. +fn sanity_check_client_response_type( data_client_request: &DataClientRequest, data_client_response: &Response, ) -> bool { @@ -757,6 +1102,27 @@ fn sanity_check_client_response( ResponsePayload::StateValuesWithProof(_) ) }, + DataClientRequest::SubscribeTransactionsWithProof(_) => { + matches!( + data_client_response.payload, + ResponsePayload::NewTransactionsWithProof(_) + ) + }, + DataClientRequest::SubscribeTransactionOutputsWithProof(_) => { + matches!( + data_client_response.payload, + ResponsePayload::NewTransactionOutputsWithProof(_) + ) + }, + DataClientRequest::SubscribeTransactionsOrOutputsWithProof(_) => { + matches!( + data_client_response.payload, + ResponsePayload::NewTransactionsWithProof(_) + ) || matches!( + data_client_response.payload, + ResponsePayload::NewTransactionOutputsWithProof(_) + ) + }, DataClientRequest::TransactionsWithProof(_) => { matches!( data_client_response.payload, @@ -848,6 +1214,26 @@ fn spawn_request_task( DataClientRequest::StateValuesWithProof(request) => { get_states_values_with_proof(aptos_data_client, request, request_timeout_ms).await }, + DataClientRequest::SubscribeTransactionsWithProof(request) => { + subscribe_to_transactions_with_proof(aptos_data_client, request, request_timeout_ms) + .await + }, + DataClientRequest::SubscribeTransactionOutputsWithProof(request) => { + subscribe_to_transaction_outputs_with_proof( + aptos_data_client, + request, + request_timeout_ms, + ) + .await + }, + DataClientRequest::SubscribeTransactionsOrOutputsWithProof(request) => { + subscribe_to_transactions_or_outputs_with_proof( + aptos_data_client, + request, + request_timeout_ms, + ) + .await + }, DataClientRequest::TransactionOutputsWithProof(request) => { get_transaction_outputs_with_proof(aptos_data_client, request, request_timeout_ms) .await @@ -1029,6 +1415,73 @@ async fn get_transactions_or_outputs_with_proof< Ok(Response::new(context, ResponsePayload::try_from(payload)?)) } +async fn subscribe_to_transactions_with_proof< + T: AptosDataClientInterface + Send + Clone + 'static, +>( + aptos_data_client: T, + request: SubscribeTransactionsWithProofRequest, + request_timeout_ms: u64, +) -> Result, aptos_data_client::error::Error> { + let subscription_request_metadata = SubscriptionRequestMetadata { + known_version_at_stream_start: request.known_version, + known_epoch_at_stream_start: request.known_epoch, + subscription_stream_id: request.subscription_stream_id, + subscription_stream_index: request.subscription_stream_index, + }; + let client_response = aptos_data_client.subscribe_to_transactions_with_proof( + subscription_request_metadata, + request.include_events, + request_timeout_ms, + ); + client_response + .await + .map(|response| response.map(ResponsePayload::from)) +} + +async fn subscribe_to_transaction_outputs_with_proof< + T: AptosDataClientInterface + Send + Clone + 'static, +>( + aptos_data_client: T, + request: SubscribeTransactionOutputsWithProofRequest, + request_timeout_ms: u64, +) -> Result, aptos_data_client::error::Error> { + let subscription_request_metadata = SubscriptionRequestMetadata { + known_version_at_stream_start: request.known_version, + known_epoch_at_stream_start: request.known_epoch, + subscription_stream_id: request.subscription_stream_id, + subscription_stream_index: request.subscription_stream_index, + }; + let client_response = aptos_data_client.subscribe_to_transaction_outputs_with_proof( + subscription_request_metadata, + request_timeout_ms, + ); + client_response + .await + .map(|response| response.map(ResponsePayload::from)) +} + +async fn subscribe_to_transactions_or_outputs_with_proof< + T: AptosDataClientInterface + Send + Clone + 'static, +>( + aptos_data_client: T, + request: SubscribeTransactionsOrOutputsWithProofRequest, + request_timeout_ms: u64, +) -> Result, aptos_data_client::error::Error> { + let subscription_request_metadata = SubscriptionRequestMetadata { + known_version_at_stream_start: request.known_version, + known_epoch_at_stream_start: request.known_epoch, + subscription_stream_id: request.subscription_stream_id, + subscription_stream_index: request.subscription_stream_index, + }; + let client_response = aptos_data_client.subscribe_to_transactions_or_outputs_with_proof( + subscription_request_metadata, + request.include_events, + request_timeout_ms, + ); + let (context, payload) = client_response.await?.into_parts(); + Ok(Response::new(context, ResponsePayload::try_from(payload)?)) +} + /// Returns true iff the given request is an optimistic fetch request fn is_optimistic_fetch_request(request: &DataClientRequest) -> bool { matches!(request, DataClientRequest::NewTransactionsWithProof(_)) @@ -1041,3 +1494,17 @@ fn is_optimistic_fetch_request(request: &DataClientRequest) -> bool { DataClientRequest::NewTransactionsOrOutputsWithProof(_) ) } + +/// Returns true iff the given request is a subscription request +fn is_subscription_request(request: &DataClientRequest) -> bool { + matches!( + request, + DataClientRequest::SubscribeTransactionsWithProof(_) + ) || matches!( + request, + DataClientRequest::SubscribeTransactionOutputsWithProof(_) + ) || matches!( + request, + DataClientRequest::SubscribeTransactionsOrOutputsWithProof(_) + ) +} diff --git a/state-sync/state-sync-v2/data-streaming-service/src/error.rs b/state-sync/data-streaming-service/src/error.rs similarity index 100% rename from state-sync/state-sync-v2/data-streaming-service/src/error.rs rename to state-sync/data-streaming-service/src/error.rs diff --git a/state-sync/state-sync-v2/data-streaming-service/src/lib.rs b/state-sync/data-streaming-service/src/lib.rs similarity index 100% rename from state-sync/state-sync-v2/data-streaming-service/src/lib.rs rename to state-sync/data-streaming-service/src/lib.rs diff --git a/state-sync/state-sync-v2/data-streaming-service/src/logging.rs b/state-sync/data-streaming-service/src/logging.rs similarity index 95% rename from state-sync/state-sync-v2/data-streaming-service/src/logging.rs rename to state-sync/data-streaming-service/src/logging.rs index df6bbe94aeb79..a09575dbfad8a 100644 --- a/state-sync/state-sync-v2/data-streaming-service/src/logging.rs +++ b/state-sync/data-streaming-service/src/logging.rs @@ -30,15 +30,16 @@ impl<'a> LogSchema<'a> { #[derive(Clone, Copy, Serialize)] #[serde(rename_all = "snake_case")] pub enum LogEntry { - CheckStreamProgress, AptosDataClient, + CheckStreamProgress, + CreatedSubscriptionStream, EndOfStreamNotification, - HandleTerminateRequest, HandleStreamRequest, + HandleTerminateRequest, InitializeStream, ReceivedDataResponse, RefreshGlobalData, - RequestTimeout, + RequestError, RespondToStreamRequest, RetryDataRequest, SendDataRequests, diff --git a/state-sync/state-sync-v2/data-streaming-service/src/metrics.rs b/state-sync/data-streaming-service/src/metrics.rs similarity index 69% rename from state-sync/state-sync-v2/data-streaming-service/src/metrics.rs rename to state-sync/data-streaming-service/src/metrics.rs index ddf17fcc8b04c..40cec1d8b2a8e 100644 --- a/state-sync/state-sync-v2/data-streaming-service/src/metrics.rs +++ b/state-sync/data-streaming-service/src/metrics.rs @@ -8,6 +8,16 @@ use aptos_metrics_core::{ }; use once_cell::sync::Lazy; +// Subscription stream termination labels +pub const MAX_CONSECUTIVE_REQUESTS_LABEL: &str = "max_consecutive_requests"; + +// Histogram buckets for tracking chunk sizes of data responses +const DATA_RESPONSE_CHUNK_SIZE_BUCKETS: &[f64] = &[ + 1.0, 2.0, 4.0, 5.0, 10.0, 25.0, 50.0, 75.0, 100.0, 250.0, 500.0, 750.0, 1000.0, 2500.0, 5000.0, + 7500.0, 10_000.0, 12_500.0, 15_000.0, 17_500.0, 20_000.0, 25_000.0, 30_000.0, 35_000.0, + 40_000.0, 45_000.0, 50_000.0, 75_000.0, 100_000.0, +]; + // Latency buckets for network latencies (i.e., the defaults only go up // to 10 seconds, but we usually require more). const NETWORK_LATENCY_BUCKETS: [f64; 14] = [ @@ -42,6 +52,15 @@ pub static CREATE_DATA_STREAM: Lazy = Lazy::new(|| { .unwrap() }); +/// Counter for the creation of new subscription streams +pub static CREATE_SUBSCRIPTION_STREAM: Lazy = Lazy::new(|| { + register_int_counter!( + "aptos_data_streaming_service_create_subscription_stream", + "Counters related to the creation of new subscription streams", + ) + .unwrap() +}); + /// Counter for the termination of existing data streams pub static TERMINATE_DATA_STREAM: Lazy = Lazy::new(|| { register_int_counter_vec!( @@ -52,6 +71,16 @@ pub static TERMINATE_DATA_STREAM: Lazy = Lazy::new(|| { .unwrap() }); +/// Counter for the termination of existing subscription streams +pub static TERMINATE_SUBSCRIPTION_STREAM: Lazy = Lazy::new(|| { + register_int_counter_vec!( + "aptos_data_streaming_service_terminate_subscription_stream", + "Counters related to the termination of existing subscription streams", + &["termination_reason"] + ) + .unwrap() +}); + /// Counter for stream progress check errors pub static CHECK_STREAM_PROGRESS_ERROR: Lazy = Lazy::new(|| { register_int_counter_vec!( @@ -82,6 +111,16 @@ pub static SENT_DATA_REQUESTS: Lazy = Lazy::new(|| { .unwrap() }); +/// Counter for tracking sent data requests for missing data +pub static SENT_DATA_REQUESTS_FOR_MISSING_DATA: Lazy = Lazy::new(|| { + register_int_counter_vec!( + "aptos_data_streaming_service_sent_data_requests_for_missing_data", + "Counters related to sent data requests for missing data", + &["request_type"] + ) + .unwrap() +}); + /// Counter for tracking data requests that were retried (including /// the new timeouts). pub static RETRIED_DATA_REQUESTS: Lazy = Lazy::new(|| { @@ -112,6 +151,16 @@ pub static RECEIVED_DATA_RESPONSE: Lazy = Lazy::new(|| { .unwrap() }); +/// Counter for tracking the sizes of received data chunks +pub static RECEIVED_DATA_RESPONSE_CHUNK_SIZE: Lazy = Lazy::new(|| { + let histogram_opts = histogram_opts!( + "aptos_data_streaming_service_received_data_chunk_sizes", + "Counter for tracking sizes of data chunks received by the data stream", + DATA_RESPONSE_CHUNK_SIZE_BUCKETS.to_vec() + ); + register_histogram_vec!(histogram_opts, &["request_type", "response_type"]).unwrap() +}); + /// Counter for tracking received data responses pub static RECEIVED_RESPONSE_ERROR: Lazy = Lazy::new(|| { register_int_counter_vec!( @@ -138,7 +187,7 @@ pub fn increment_counter(counter: &Lazy, label: &str) { } /// Increments the given counter with two label values. -pub fn increment_counter_multiple( +pub fn increment_counter_multiple_labels( counter: &Lazy, first_label: &str, second_label: &str, @@ -148,6 +197,18 @@ pub fn increment_counter_multiple( .inc(); } +/// Adds a new observation for the given histogram, labels and value +pub fn observe_value( + histogram: &Lazy, + first_label: &str, + second_label: &str, + value: u64, +) { + histogram + .with_label_values(&[first_label, second_label]) + .observe(value as f64); +} + /// Sets the number of active data streams pub fn set_active_data_streams(value: usize) { ACTIVE_DATA_STREAMS.set(value as i64); diff --git a/state-sync/state-sync-v2/data-streaming-service/src/stream_engine.rs b/state-sync/data-streaming-service/src/stream_engine.rs similarity index 66% rename from state-sync/state-sync-v2/data-streaming-service/src/stream_engine.rs rename to state-sync/data-streaming-service/src/stream_engine.rs index 2ac8fd15c7106..f8f98b0a40196 100644 --- a/state-sync/state-sync-v2/data-streaming-service/src/stream_engine.rs +++ b/state-sync/data-streaming-service/src/stream_engine.rs @@ -8,21 +8,26 @@ use crate::{ DataClientRequest::{ EpochEndingLedgerInfos, NewTransactionOutputsWithProof, NewTransactionsOrOutputsWithProof, NewTransactionsWithProof, NumberOfStates, - StateValuesWithProof, TransactionOutputsWithProof, TransactionsOrOutputsWithProof, - TransactionsWithProof, + StateValuesWithProof, SubscribeTransactionOutputsWithProof, + SubscribeTransactionsOrOutputsWithProof, SubscribeTransactionsWithProof, + TransactionOutputsWithProof, TransactionsOrOutputsWithProof, TransactionsWithProof, }, DataNotification, DataPayload, EpochEndingLedgerInfosRequest, NewTransactionOutputsWithProofRequest, NewTransactionsOrOutputsWithProofRequest, NewTransactionsWithProofRequest, NumberOfStatesRequest, StateValuesWithProofRequest, + SubscribeTransactionOutputsWithProofRequest, + SubscribeTransactionsOrOutputsWithProofRequest, SubscribeTransactionsWithProofRequest, TransactionOutputsWithProofRequest, TransactionsOrOutputsWithProofRequest, TransactionsWithProofRequest, }, error::Error, logging::{LogEntry, LogEvent, LogSchema}, + metrics, streaming_client::{ Epoch, GetAllEpochEndingLedgerInfosRequest, GetAllStatesRequest, StreamRequest, }, }; +use aptos_config::config::DataStreamingServiceConfig; use aptos_data_client::{ global_summary::{AdvertisedData, GlobalDataSummary}, interface::ResponsePayload, @@ -69,6 +74,7 @@ pub trait DataStreamEngine { &mut self, max_number_of_requests: u64, global_data_summary: &GlobalDataSummary, + unique_id_generator: Arc, ) -> Result, Error>; /// Returns true iff all remaining data required to satisfy the stream is @@ -78,17 +84,20 @@ pub trait DataStreamEngine { /// Returns true iff the stream has sent all data to the stream listener. fn is_stream_complete(&self) -> bool; - /// Notifies the data stream engine that a timeout was encountered when - /// trying to send the optimistic fetch request. + /// Notifies the data stream engine that an error was encountered when + /// trying to send an optimistic fetch or subscription request. /// - /// Note: Most engines shouldn't process these notifications, so a default - /// implementation that returns an error is provided. If an optimistic fetch - /// request does exist, there should only ever be a single request in-flight. - fn notify_optimistic_fetch_timeout( + /// Note: Most engines shouldn't process these notifications, so a + /// default implementation that returns an error is provided. + fn notify_new_data_request_error( &mut self, client_request: &DataClientRequest, + request_error: aptos_data_client::error::Error, ) -> Result<(), Error> { - Err(Error::UnexpectedErrorEncountered(format!("Received an optimistic fetch request timeout but no request was sent! Reported request: {:?}", client_request))) + Err(Error::UnexpectedErrorEncountered(format!( + "Received a new data request error notification but no request was sent! Reported error: {:?}, request: {:?}", + request_error, client_request + ))) } /// Transforms a given data client response (for the previously sent @@ -117,19 +126,20 @@ pub enum StreamEngine { impl StreamEngine { pub fn new( + data_stream_config: DataStreamingServiceConfig, stream_request: &StreamRequest, advertised_data: &AdvertisedData, ) -> Result { match stream_request { - StreamRequest::ContinuouslyStreamTransactionOutputs(_) => { - Ok(ContinuousTransactionStreamEngine::new(stream_request)?.into()) - }, - StreamRequest::ContinuouslyStreamTransactions(_) => { - Ok(ContinuousTransactionStreamEngine::new(stream_request)?.into()) - }, - StreamRequest::ContinuouslyStreamTransactionsOrOutputs(_) => { - Ok(ContinuousTransactionStreamEngine::new(stream_request)?.into()) - }, + StreamRequest::ContinuouslyStreamTransactionOutputs(_) => Ok( + ContinuousTransactionStreamEngine::new(data_stream_config, stream_request)?.into(), + ), + StreamRequest::ContinuouslyStreamTransactions(_) => Ok( + ContinuousTransactionStreamEngine::new(data_stream_config, stream_request)?.into(), + ), + StreamRequest::ContinuouslyStreamTransactionsOrOutputs(_) => Ok( + ContinuousTransactionStreamEngine::new(data_stream_config, stream_request)?.into(), + ), StreamRequest::GetAllStates(request) => Ok(StateStreamEngine::new(request)?.into()), StreamRequest::GetAllEpochEndingLedgerInfos(request) => { Ok(EpochEndingStreamEngine::new(request, advertised_data)?.into()) @@ -216,6 +226,7 @@ impl DataStreamEngine for StateStreamEngine { &mut self, max_number_of_requests: u64, global_data_summary: &GlobalDataSummary, + _unique_id_generator: Arc, ) -> Result, Error> { if self.number_of_states.is_none() && self.state_num_requested { return Ok(vec![]); // Wait for the number of states to be returned @@ -227,7 +238,7 @@ impl DataStreamEngine for StateStreamEngine { .ok_or_else(|| Error::IntegerOverflow("End state index has overflown!".into()))?; // Create the client requests - let client_requests = create_data_client_requests( + let client_requests = create_data_client_request_batch( self.next_request_index, end_state_index, max_number_of_requests, @@ -273,26 +284,49 @@ impl DataStreamEngine for StateStreamEngine { client_response_payload: ResponsePayload, notification_id_generator: Arc, ) -> Result, Error> { + // Update the metrics for the number of received items + update_response_chunk_size_metrics(client_request, &client_response_payload); + + // Handle and transform the response match client_request { StateValuesWithProof(request) => { + // Verify the client request indices verify_client_request_indices( self.next_stream_index, request.start_index, request.end_index, )?; - // Update the local stream notification tracker - self.next_stream_index = request.end_index.checked_add(1).ok_or_else(|| { + // Identify the last received state index and bound it appropriately + let last_received_index = match &client_response_payload { + ResponsePayload::StateValuesWithProof(state_values_with_proof) => { + // Verify that we received at least one state value + if state_values_with_proof.raw_values.is_empty() { + return Err(Error::AptosDataClientResponseIsInvalid(format!( + "Received an empty state values response! Request: {:?}", + client_request + ))); + } + + // Get the last received state index + state_values_with_proof.last_index + }, + _ => invalid_response_type!(client_response_payload), + }; + let last_received_index = + bound_by_range(last_received_index, request.start_index, request.end_index); + + // Update the next stream index + self.next_stream_index = last_received_index.checked_add(1).ok_or_else(|| { Error::IntegerOverflow("Next stream index has overflown!".into()) })?; // Check if the stream is complete - if request.end_index - == self - .get_number_of_states()? - .checked_sub(1) - .ok_or_else(|| Error::IntegerOverflow("End index has overflown!".into()))? - { + let last_stream_index = self + .get_number_of_states()? + .checked_sub(1) + .ok_or_else(|| Error::IntegerOverflow("End index has overflown!".into()))?; + if last_received_index >= last_stream_index { self.stream_is_complete = true; } @@ -337,6 +371,9 @@ impl DataStreamEngine for StateStreamEngine { #[derive(Clone, Debug)] pub struct ContinuousTransactionStreamEngine { + // The data streaming service config + pub data_streaming_config: DataStreamingServiceConfig, + // The original stream request made by the client (i.e., a continuous // transaction or transaction output stream request). pub request: StreamRequest, @@ -350,6 +387,9 @@ pub struct ContinuousTransactionStreamEngine { // True iff a request has been created to optimistically fetch data pub optimistic_fetch_requested: bool, + // The active subscription stream (if it exists) + active_subscription_stream: Option, + // The next version and epoch that we're waiting to send to the // client along the stream. All versions before this have been sent. pub next_stream_version_and_epoch: (Version, Epoch), @@ -364,7 +404,10 @@ pub struct ContinuousTransactionStreamEngine { } impl ContinuousTransactionStreamEngine { - fn new(stream_request: &StreamRequest) -> Result { + fn new( + data_streaming_config: DataStreamingServiceConfig, + stream_request: &StreamRequest, + ) -> Result { let (next_version, next_epoch) = match stream_request { StreamRequest::ContinuouslyStreamTransactions(request) => { Self::calculate_next_version_and_epoch(request.known_version, request.known_epoch)? @@ -379,10 +422,12 @@ impl ContinuousTransactionStreamEngine { }; Ok(ContinuousTransactionStreamEngine { + data_streaming_config, request: stream_request.clone(), current_target_ledger_info: None, end_of_epoch_requested: false, optimistic_fetch_requested: false, + active_subscription_stream: None, next_stream_version_and_epoch: (next_version, next_epoch), next_request_version_and_epoch: (next_version, next_epoch), stream_is_complete: false, @@ -446,14 +491,47 @@ impl ContinuousTransactionStreamEngine { fn create_notification_for_continuous_data( &mut self, - request_start: Version, - request_end: Version, + request_start_version: Version, + request_end_version: Version, client_response_payload: ResponsePayload, notification_id_generator: Arc, ) -> Result { + // Check the number of received versions + let num_received_versions = match &client_response_payload { + ResponsePayload::TransactionsWithProof(transactions_with_proof) => { + transactions_with_proof.transactions.len() + }, + ResponsePayload::TransactionOutputsWithProof(outputs_with_proof) => { + outputs_with_proof.transactions_and_outputs.len() + }, + _ => invalid_response_type!(client_response_payload), + }; + if num_received_versions == 0 { + return Err(Error::AptosDataClientResponseIsInvalid(format!( + "Received an empty continuous data response! Request: {:?}", + self.request + ))); + } + + // Identify the last received version and bound it appropriately + let last_received_version = request_start_version + .checked_add(num_received_versions as u64) + .and_then(|version| version.checked_sub(1)) + .ok_or_else(|| Error::IntegerOverflow("Last received version has overflown!".into()))?; + let last_received_version = bound_by_range( + last_received_version, + request_start_version, + request_end_version, + ); + // Update the stream version let target_ledger_info = self.get_target_ledger_info()?.clone(); - self.update_stream_version_and_epoch(request_start, request_end, &target_ledger_info)?; + self.update_stream_version_and_epoch( + request_start_version, + request_end_version, + &target_ledger_info, + last_received_version, + )?; // Create the data notification let data_notification = create_data_notification( @@ -465,54 +543,32 @@ impl ContinuousTransactionStreamEngine { Ok(data_notification) } - fn create_notification_for_optimistic_fetch_data( + /// Creates a data notification for new transaction data + /// starting at the specified first version. + fn create_notification_for_new_data( &mut self, - known_version: Version, + first_version: u64, client_response_payload: ResponsePayload, notification_id_generator: Arc, ) -> Result { - // Calculate the first version - let first_version = known_version - .checked_add(1) - .ok_or_else(|| Error::IntegerOverflow("First version has overflown!".into()))?; - let (num_versions, target_ledger_info) = match &client_response_payload { - ResponsePayload::NewTransactionsWithProof(( - transactions_with_proof, - target_ledger_info, - )) => ( - transactions_with_proof.transactions.len(), - target_ledger_info.clone(), - ), - ResponsePayload::NewTransactionOutputsWithProof(( - outputs_with_proof, - target_ledger_info, - )) => ( - outputs_with_proof.transactions_and_outputs.len(), - target_ledger_info.clone(), - ), - response_payload => { - // TODO(joshlind): eventually we want to notify the data client of the bad response - return Err(Error::AptosDataClientResponseIsInvalid(format!( - "Expected new transactions or outputs but got: {:?}", - response_payload - ))); - }, - }; + // Calculate the number of data items and target ledger info + let (num_versions, target_ledger_info) = + extract_new_versions_and_target(&client_response_payload)?; - // Calculate the last version - if num_versions == 0 { - // TODO(joshlind): eventually we want to notify the data client of the bad response - return Err(Error::AptosDataClientResponseIsInvalid( - "Received an empty transaction or output list!".into(), - )); - } - let last_version = known_version + // Calculate the last version (last_version = first_version + num_versions - 1) + let last_version = first_version .checked_add(num_versions as u64) + .and_then(|v| v.checked_sub(1)) .ok_or_else(|| Error::IntegerOverflow("Last version has overflown!".into()))?; // Update the request and stream versions self.update_request_version_and_epoch(last_version, &target_ledger_info)?; - self.update_stream_version_and_epoch(first_version, last_version, &target_ledger_info)?; + self.update_stream_version_and_epoch( + first_version, + last_version, + &target_ledger_info, + last_version, + )?; // Create the data notification let data_notification = create_data_notification( @@ -521,15 +577,63 @@ impl ContinuousTransactionStreamEngine { Some(target_ledger_info.clone()), self.clone().into(), )?; + Ok(data_notification) } - fn create_optimistic_fetch_request(&mut self) -> Result { - let (next_request_version, known_epoch) = self.next_request_version_and_epoch; - let known_version = next_request_version - .checked_sub(1) - .ok_or_else(|| Error::IntegerOverflow("Last version has overflown!".into()))?; + fn create_notification_for_optimistic_fetch_data( + &mut self, + known_version: Version, + client_response_payload: ResponsePayload, + notification_id_generator: Arc, + ) -> Result { + // Calculate the first version + let first_version = known_version + .checked_add(1) + .ok_or_else(|| Error::IntegerOverflow("First version has overflown!".into()))?; + + // Create the data notification + self.create_notification_for_new_data( + first_version, + client_response_payload, + notification_id_generator, + ) + } + + /// Creates a notification for subscription data by + /// transforming the given client response payload. + fn create_notification_for_subscription_data( + &mut self, + subscription_stream_index: u64, + client_response_payload: ResponsePayload, + notification_id_generator: Arc, + ) -> Result { + // If there's an active subscription and this is the + // last expected response then terminate the stream. + if let Some(active_subscription_stream) = &self.active_subscription_stream { + if subscription_stream_index + >= active_subscription_stream.get_max_subscription_stream_index() + { + // Terminate the stream and update the termination metrics + self.active_subscription_stream = None; + update_terminated_subscription_metrics(metrics::MAX_CONSECUTIVE_REQUESTS_LABEL); + } + } + + // Get the first version + let (first_version, _) = self.next_request_version_and_epoch; + // Create the data notification + self.create_notification_for_new_data( + first_version, + client_response_payload, + notification_id_generator, + ) + } + + /// Creates an optimistic fetch request for the current stream state + fn create_optimistic_fetch_request(&mut self) -> Result { + let (known_version, known_epoch) = self.get_known_version_and_epoch()?; let data_client_request = match &self.request { StreamRequest::ContinuouslyStreamTransactions(request) => { NewTransactionsWithProof(NewTransactionsWithProofRequest { @@ -553,9 +657,99 @@ impl ContinuousTransactionStreamEngine { }, request => invalid_stream_request!(request), }; + Ok(data_client_request) } + /// Creates a new set of subscription stream requests to extend + /// the currently active subscription stream. The number of requests + /// created will be bound by the specified `max_number_of_requests`. + fn create_subscription_stream_requests( + &mut self, + max_number_of_requests: u64, + ) -> Result, Error> { + // Get the active subscription stream + let mut active_subscription_stream = match self.active_subscription_stream.take() { + Some(active_subscription_stream) => active_subscription_stream, + None => { + // We don't have an active subscription stream! + return Err(Error::UnexpectedErrorEncountered( + "No active subscription stream found! Unable to create requests!".into(), + )); + }, + }; + + // Get the highest known version and epoch at stream start + let (known_version, known_epoch) = + active_subscription_stream.get_known_version_and_epoch_at_stream_start(); + + // Create the subscription stream requests + let mut subscription_stream_requests = vec![]; + for _ in 0..max_number_of_requests { + // Get the current subscription stream ID and index + let subscription_stream_id = active_subscription_stream.get_subscription_stream_id(); + let subscription_stream_index = + active_subscription_stream.get_next_subscription_stream_index(); + + // Note: if the stream hits the total max subscription stream index, + // then no new requests should be created. The stream will eventually + // be marked for termination once a response is received for + // the last subscription request. + if subscription_stream_index + > active_subscription_stream.get_max_subscription_stream_index() + { + break; + } + + // Create the request based on the stream type + let data_client_request = match &self.request { + StreamRequest::ContinuouslyStreamTransactions(request) => { + SubscribeTransactionsWithProof(SubscribeTransactionsWithProofRequest { + known_version, + known_epoch, + include_events: request.include_events, + subscription_stream_id, + subscription_stream_index, + }) + }, + StreamRequest::ContinuouslyStreamTransactionOutputs(_) => { + SubscribeTransactionOutputsWithProof( + SubscribeTransactionOutputsWithProofRequest { + known_version, + known_epoch, + subscription_stream_id, + subscription_stream_index, + }, + ) + }, + StreamRequest::ContinuouslyStreamTransactionsOrOutputs(request) => { + SubscribeTransactionsOrOutputsWithProof( + SubscribeTransactionsOrOutputsWithProofRequest { + known_version, + known_epoch, + include_events: request.include_events, + subscription_stream_id, + subscription_stream_index, + }, + ) + }, + request => invalid_stream_request!(request), + }; + + // Update the next subscription stream index + active_subscription_stream.increment_subscription_stream_index(); + + // Add the request to the active list + subscription_stream_requests.push(data_client_request); + } + + // Update the active subscription stream state + self.active_subscription_stream = Some(active_subscription_stream); + + // Return the subscription stream requests + Ok(subscription_stream_requests) + } + fn handle_epoch_ending_response( &mut self, response_payload: ResponsePayload, @@ -594,12 +788,189 @@ impl ContinuousTransactionStreamEngine { } } + /// Returns the known version and epoch for the stream + fn get_known_version_and_epoch(&mut self) -> Result<(u64, Epoch), Error> { + let (next_request_version, known_epoch) = self.next_request_version_and_epoch; + let known_version = next_request_version + .checked_sub(1) + .ok_or_else(|| Error::IntegerOverflow("Last version has overflown!".into()))?; + + Ok((known_version, known_epoch)) + } + + /// Handles an optimistic fetch timeout for the specified client request + fn handle_optimistic_fetch_error( + &mut self, + client_request: &DataClientRequest, + request_error: aptos_data_client::error::Error, + ) -> Result<(), Error> { + // We should only receive an error notification if we sent an optimistic fetch request + if !self.optimistic_fetch_requested { + return Err(Error::UnexpectedErrorEncountered(format!( + "Received an optimistic fetch notification error but no request is in-flight! Error: {:?}, request: {:?}", + request_error, client_request + ))); + } + + // Reset the optimistic fetch request + self.optimistic_fetch_requested = false; + + // Log the error based on the request type + if matches!( + self.request, + StreamRequest::ContinuouslyStreamTransactions(_) + ) && matches!( + client_request, + DataClientRequest::NewTransactionsWithProof(_) + ) { + info!( + (LogSchema::new(LogEntry::RequestError).message(&format!( + "Optimistic fetch error for new transactions: {:?}", + request_error + ))) + ); + } else if matches!( + self.request, + StreamRequest::ContinuouslyStreamTransactionOutputs(_) + ) && matches!( + client_request, + DataClientRequest::NewTransactionOutputsWithProof(_) + ) { + info!( + (LogSchema::new(LogEntry::RequestError).message(&format!( + "Optimistic fetch error for new transaction outputs: {:?}", + request_error + ))) + ); + } else if matches!( + self.request, + StreamRequest::ContinuouslyStreamTransactionsOrOutputs(_) + ) && matches!( + client_request, + DataClientRequest::NewTransactionsOrOutputsWithProof(_) + ) { + info!( + (LogSchema::new(LogEntry::RequestError).message(&format!( + "Optimistic fetch error for new transactions or outputs: {:?}", + request_error + ))) + ); + } else { + return Err(Error::UnexpectedErrorEncountered(format!( + "Received an optimistic fetch error but the request did not match the expected type for the stream! \ + Error: {:?}, request: {:?}, stream: {:?}", request_error, client_request, self.request + ))); + } + + Ok(()) + } + + /// Handles a subscription error for the specified client request + fn handle_subscription_error( + &mut self, + client_request: &DataClientRequest, + request_error: aptos_data_client::error::Error, + ) -> Result<(), Error> { + // We should only receive an error notification if we have an active stream + if self.active_subscription_stream.is_none() { + return Err(Error::UnexpectedErrorEncountered(format!( + "Received a subscription notification error but no active subscription stream exists! Error: {:?}, request: {:?}", + request_error, client_request + ))); + } + + // Reset the active subscription stream and update the metrics + self.active_subscription_stream = None; + update_terminated_subscription_metrics(request_error.get_label()); + + // Log the error based on the request type + if matches!( + self.request, + StreamRequest::ContinuouslyStreamTransactions(_) + ) && matches!( + client_request, + DataClientRequest::SubscribeTransactionsWithProof(_) + ) { + info!( + (LogSchema::new(LogEntry::RequestError).message(&format!( + "Subscription error for new transactions: {:?}", + request_error + ))) + ); + } else if matches!( + self.request, + StreamRequest::ContinuouslyStreamTransactionOutputs(_) + ) && matches!( + client_request, + DataClientRequest::SubscribeTransactionOutputsWithProof(_) + ) { + info!( + (LogSchema::new(LogEntry::RequestError).message(&format!( + "Subscription error for new transaction outputs: {:?}", + request_error + ))) + ); + } else if matches!( + self.request, + StreamRequest::ContinuouslyStreamTransactionsOrOutputs(_) + ) && matches!( + client_request, + DataClientRequest::SubscribeTransactionsOrOutputsWithProof(_) + ) { + info!( + (LogSchema::new(LogEntry::RequestError).message(&format!( + "Subscription error for new transactions or outputs: {:?}", + request_error + ))) + ); + } else { + return Err(Error::UnexpectedErrorEncountered(format!( + "Received a subscription request error but the request did not match the expected type for the stream! \ + Error: {:?}, request: {:?}, stream: {:?}", request_error, client_request, self.request + ))); + } + + Ok(()) + } + + /// Starts a new active subscription stream + fn start_active_subscription_stream( + &mut self, + unique_id_generator: Arc, + ) -> Result<(), Error> { + // Verify that we don't already have an active subscription stream + if self.active_subscription_stream.is_some() { + return Err(Error::UnexpectedErrorEncountered( + "Unable to start a new subscription stream when one is already active!".into(), + )); + } + + // Get the highest known version and epoch + let (known_version, known_epoch) = self.get_known_version_and_epoch()?; + + // Create and save a new subscription stream + let subscription_stream = SubscriptionStream::new( + self.data_streaming_config, + unique_id_generator, + known_version, + known_epoch, + ); + self.active_subscription_stream = Some(subscription_stream); + + // Update the metrics counter + metrics::CREATE_SUBSCRIPTION_STREAM.inc(); + + Ok(()) + } + fn update_stream_version_and_epoch( &mut self, request_start_version: Version, request_end_version: Version, target_ledger_info: &LedgerInfoWithSignatures, + last_received_version: Version, ) -> Result<(), Error> { + // Verify the client request indices let (next_stream_version, mut next_stream_epoch) = self.next_stream_version_and_epoch; verify_client_request_indices( next_stream_version, @@ -608,46 +979,35 @@ impl ContinuousTransactionStreamEngine { )?; // Update the next stream version and epoch - if request_end_version == target_ledger_info.ledger_info().version() + if last_received_version == target_ledger_info.ledger_info().version() && target_ledger_info.ledger_info().ends_epoch() { next_stream_epoch = next_stream_epoch .checked_add(1) .ok_or_else(|| Error::IntegerOverflow("Next stream epoch has overflown!".into()))?; } - let next_stream_version = request_end_version + let next_stream_version = last_received_version .checked_add(1) .ok_or_else(|| Error::IntegerOverflow("Next stream version has overflown!".into()))?; self.next_stream_version_and_epoch = (next_stream_version, next_stream_epoch); // Check if the stream is now complete - match &self.request { - StreamRequest::ContinuouslyStreamTransactions(request) => { - if let Some(target) = &request.target { - if request_end_version == target.ledger_info().version() { - self.stream_is_complete = true; - } - } - }, - StreamRequest::ContinuouslyStreamTransactionOutputs(request) => { - if let Some(target) = &request.target { - if request_end_version == target.ledger_info().version() { - self.stream_is_complete = true; - } - } - }, + let stream_request_target = match &self.request { + StreamRequest::ContinuouslyStreamTransactions(request) => request.target.clone(), + StreamRequest::ContinuouslyStreamTransactionOutputs(request) => request.target.clone(), StreamRequest::ContinuouslyStreamTransactionsOrOutputs(request) => { - if let Some(target) = &request.target { - if request_end_version == target.ledger_info().version() { - self.stream_is_complete = true; - } - } + request.target.clone() }, request => invalid_stream_request!(request), }; + if let Some(target) = stream_request_target { + if last_received_version >= target.ledger_info().version() { + self.stream_is_complete = true; + } + } // Update the current target ledger info if we've hit it - if request_end_version == target_ledger_info.ledger_info().version() { + if last_received_version >= target_ledger_info.ledger_info().version() { self.current_target_ledger_info = None; } @@ -736,11 +1096,17 @@ impl DataStreamEngine for ContinuousTransactionStreamEngine { &mut self, max_number_of_requests: u64, global_data_summary: &GlobalDataSummary, + unique_id_generator: Arc, ) -> Result, Error> { if self.end_of_epoch_requested || self.optimistic_fetch_requested { return Ok(vec![]); // We are waiting for a blocking response type } + // If there's an active subscription stream we should utilize it + if self.active_subscription_stream.is_some() { + return self.create_subscription_stream_requests(max_number_of_requests); + } + // If we don't have a syncing target, try to select one let (next_request_version, next_request_epoch) = self.next_request_version_and_epoch; if self.current_target_ledger_info.is_none() { @@ -807,7 +1173,7 @@ impl DataStreamEngine for ContinuousTransactionStreamEngine { }, request => invalid_stream_request!(request), }; - let client_requests = create_data_client_requests( + let client_requests = create_data_client_request_batch( next_request_version, target_ledger_info.ledger_info().version(), max_number_of_requests, @@ -817,10 +1183,18 @@ impl DataStreamEngine for ContinuousTransactionStreamEngine { self.update_request_tracking(&client_requests, &target_ledger_info)?; client_requests } else { - // We don't have a target, send a single optimistic fetch request - let optimistic_fetch_request = self.create_optimistic_fetch_request()?; - self.optimistic_fetch_requested = true; - vec![optimistic_fetch_request] + // We don't have a target. We should either send an optimistic + // fetch request or start a new subscription stream. + if self.data_streaming_config.enable_subscription_streaming { + // Start a new subscription stream and send the first set of requests + self.start_active_subscription_stream(unique_id_generator)?; + self.create_subscription_stream_requests(max_number_of_requests)? + } else { + // Send a single optimistic fetch request + let optimistic_fetch_request = self.create_optimistic_fetch_request()?; + self.optimistic_fetch_requested = true; + vec![optimistic_fetch_request] + } }; Ok(client_requests) @@ -851,58 +1225,18 @@ impl DataStreamEngine for ContinuousTransactionStreamEngine { self.stream_is_complete } - fn notify_optimistic_fetch_timeout( + fn notify_new_data_request_error( &mut self, client_request: &DataClientRequest, + request_error: aptos_data_client::error::Error, ) -> Result<(), Error> { - if !self.optimistic_fetch_requested { - return Err(Error::UnexpectedErrorEncountered(format!( - "Received an optimistic fetch timeout but no request is in-flight! Request: {:?}", - client_request - ))); - } - - // Reset the optimistic fetch request and handle the timeout - self.optimistic_fetch_requested = false; - if matches!( - self.request, - StreamRequest::ContinuouslyStreamTransactions(_) - ) && matches!( - client_request, - DataClientRequest::NewTransactionsWithProof(_) - ) { - info!( - (LogSchema::new(LogEntry::RequestTimeout) - .message("Optimistic fetch request for new transactions timed out!")) - ); - } else if matches!( - self.request, - StreamRequest::ContinuouslyStreamTransactionOutputs(_) - ) && matches!( - client_request, - DataClientRequest::NewTransactionOutputsWithProof(_) - ) { - info!( - (LogSchema::new(LogEntry::RequestTimeout) - .message("Optimistic fetch request for new transaction outputs timed out!")) - ); - } else if matches!( - self.request, - StreamRequest::ContinuouslyStreamTransactionsOrOutputs(_) - ) && matches!( - client_request, - DataClientRequest::NewTransactionsOrOutputsWithProof(_) - ) { - info!( - (LogSchema::new(LogEntry::RequestTimeout).message( - "Optimistic fetch request for new transactions or outputs timed out!" - )) - ); + // If subscription streaming is enabled, the timeout should be for + // subscription data. Otherwise, it should be for optimistic fetch data. + if self.data_streaming_config.enable_subscription_streaming { + self.handle_subscription_error(client_request, request_error) } else { - return Err(Error::UnexpectedErrorEncountered(format!("Received an optimistic fetch request timeout but the request did not match the expected type for the stream! Request: {:?}, Stream: {:?}", client_request, self.request))); + self.handle_optimistic_fetch_error(client_request, request_error) } - - Ok(()) } fn transform_client_response_into_notification( @@ -911,13 +1245,18 @@ impl DataStreamEngine for ContinuousTransactionStreamEngine { client_response_payload: ResponsePayload, notification_id_generator: Arc, ) -> Result, Error> { - // We reset the pending requests to prevent malicious responses from blocking the streams + // Reset the pending requests to prevent malicious responses from + // blocking the streams. Note: these request types are mutually + // exclusive and only a single request will exist at any given time. if self.end_of_epoch_requested { self.end_of_epoch_requested = false; } else if self.optimistic_fetch_requested { self.optimistic_fetch_requested = false; } + // Update the metrics for the number of received items + update_response_chunk_size_metrics(client_request, &client_response_payload); + // Handle and transform the response match client_request { EpochEndingLedgerInfos(_) => { @@ -957,6 +1296,39 @@ impl DataStreamEngine for ContinuousTransactionStreamEngine { }, request => invalid_stream_request!(request), }, + SubscribeTransactionOutputsWithProof(request) => match &self.request { + StreamRequest::ContinuouslyStreamTransactionOutputs(_) => { + let data_notification = self.create_notification_for_subscription_data( + request.subscription_stream_index, + client_response_payload, + notification_id_generator, + )?; + Ok(Some(data_notification)) + }, + request => invalid_stream_request!(request), + }, + SubscribeTransactionsOrOutputsWithProof(request) => match &self.request { + StreamRequest::ContinuouslyStreamTransactionsOrOutputs(_) => { + let data_notification = self.create_notification_for_subscription_data( + request.subscription_stream_index, + client_response_payload, + notification_id_generator, + )?; + Ok(Some(data_notification)) + }, + request => invalid_stream_request!(request), + }, + SubscribeTransactionsWithProof(request) => match &self.request { + StreamRequest::ContinuouslyStreamTransactions(_) => { + let data_notification = self.create_notification_for_subscription_data( + request.subscription_stream_index, + client_response_payload, + notification_id_generator, + )?; + Ok(Some(data_notification)) + }, + request => invalid_stream_request!(request), + }, TransactionsWithProof(request) => match &self.request { StreamRequest::ContinuouslyStreamTransactions(_) => { let data_notification = self.create_notification_for_continuous_data( @@ -1081,9 +1453,10 @@ impl DataStreamEngine for EpochEndingStreamEngine { &mut self, max_number_of_requests: u64, global_data_summary: &GlobalDataSummary, + _unique_id_generator: Arc, ) -> Result, Error> { // Create the client requests - let client_requests = create_data_client_requests( + let client_requests = create_data_client_request_batch( self.next_request_epoch, self.end_epoch, max_number_of_requests, @@ -1115,21 +1488,48 @@ impl DataStreamEngine for EpochEndingStreamEngine { client_response_payload: ResponsePayload, notification_id_generator: Arc, ) -> Result, Error> { + // Update the metrics for the number of received items + update_response_chunk_size_metrics(client_request, &client_response_payload); + + // Handle and transform the response match client_request { EpochEndingLedgerInfos(request) => { + // Verify the client request indices verify_client_request_indices( self.next_stream_epoch, request.start_epoch, request.end_epoch, )?; + // Identify the last received epoch and bound it appropriately + let last_received_epoch = match &client_response_payload { + ResponsePayload::EpochEndingLedgerInfos(ledger_infos) => { + // Verify that we received at least one ledger info + if ledger_infos.is_empty() { + return Err(Error::AptosDataClientResponseIsInvalid(format!( + "Received an empty epoch ending ledger info response! Request: {:?}", + client_request + ))); + } + + // Return the last epoch + ledger_infos + .last() + .map(|ledger_info| ledger_info.ledger_info().epoch()) + .unwrap_or(request.start_epoch) + }, + _ => invalid_response_type!(client_response_payload), + }; + let last_received_epoch = + bound_by_range(last_received_epoch, request.start_epoch, request.end_epoch); + // Update the local stream notification tracker - self.next_stream_epoch = request.end_epoch.checked_add(1).ok_or_else(|| { + self.next_stream_epoch = last_received_epoch.checked_add(1).ok_or_else(|| { Error::IntegerOverflow("Next stream epoch has overflown!".into()) })?; // Check if the stream is complete - if request.end_epoch == self.end_epoch { + if last_received_epoch >= self.end_epoch { self.stream_is_complete = true; } @@ -1195,20 +1595,50 @@ impl TransactionStreamEngine { request_start_version: Version, request_end_version: Version, stream_end_version: Version, + client_response_payload: &ResponsePayload, ) -> Result<(), Error> { + // Verify the client request indices verify_client_request_indices( self.next_stream_version, request_start_version, request_end_version, )?; + // Check the number of received versions + let num_received_versions = match client_response_payload { + ResponsePayload::TransactionsWithProof(transactions_with_proof) => { + transactions_with_proof.transactions.len() + }, + ResponsePayload::TransactionOutputsWithProof(outputs_with_proof) => { + outputs_with_proof.transactions_and_outputs.len() + }, + _ => invalid_response_type!(client_response_payload), + }; + if num_received_versions == 0 { + return Err(Error::AptosDataClientResponseIsInvalid(format!( + "Received an empty response! Request: {:?}", + self.request + ))); + } + + // Identify the last received version and bound it appropriately + let last_received_version = request_start_version + .checked_add(num_received_versions as u64) + .and_then(|version| version.checked_sub(1)) + .ok_or_else(|| Error::IntegerOverflow("Last received version has overflown!".into()))?; + let last_received_version = bound_by_range( + last_received_version, + request_start_version, + request_end_version, + ); + // Update the local stream notification tracker - self.next_stream_version = request_end_version + self.next_stream_version = last_received_version .checked_add(1) .ok_or_else(|| Error::IntegerOverflow("Next stream version has overflown!".into()))?; // Check if the stream is complete - if request_end_version == stream_end_version { + if last_received_version >= stream_end_version { self.stream_is_complete = true; } @@ -1269,6 +1699,7 @@ impl DataStreamEngine for TransactionStreamEngine { &mut self, max_number_of_requests: u64, global_data_summary: &GlobalDataSummary, + _unique_id_generator: Arc, ) -> Result, Error> { let (request_end_version, optimal_chunk_sizes) = match &self.request { StreamRequest::GetAllTransactions(request) => ( @@ -1293,7 +1724,7 @@ impl DataStreamEngine for TransactionStreamEngine { }; // Create the client requests - let client_requests = create_data_client_requests( + let client_requests = create_data_client_request_batch( self.next_request_version, request_end_version, max_number_of_requests, @@ -1335,42 +1766,45 @@ impl DataStreamEngine for TransactionStreamEngine { client_response_payload: ResponsePayload, notification_id_generator: Arc, ) -> Result, Error> { - match &self.request { + // Update the metrics for the number of received items + update_response_chunk_size_metrics(client_request, &client_response_payload); + + // Identify the version information of the stream and client requests + let (stream_end_version, request_start_version, request_end_version) = match &self.request { StreamRequest::GetAllTransactions(stream_request) => match client_request { - TransactionsWithProof(request) => { - let stream_end_version = stream_request.end_version; - self.update_stream_version( - request.start_version, - request.end_version, - stream_end_version, - )?; - }, + TransactionsWithProof(request) => ( + stream_request.end_version, + request.start_version, + request.end_version, + ), request => invalid_client_request!(request, self), }, StreamRequest::GetAllTransactionOutputs(stream_request) => match client_request { - TransactionOutputsWithProof(request) => { - let stream_end_version = stream_request.end_version; - self.update_stream_version( - request.start_version, - request.end_version, - stream_end_version, - )?; - }, + TransactionOutputsWithProof(request) => ( + stream_request.end_version, + request.start_version, + request.end_version, + ), request => invalid_client_request!(request, self), }, StreamRequest::GetAllTransactionsOrOutputs(stream_request) => match client_request { - TransactionsOrOutputsWithProof(request) => { - let stream_end_version = stream_request.end_version; - self.update_stream_version( - request.start_version, - request.end_version, - stream_end_version, - )?; - }, + TransactionsOrOutputsWithProof(request) => ( + stream_request.end_version, + request.start_version, + request.end_version, + ), request => invalid_client_request!(request, self), }, request => invalid_stream_request!(request), - } + }; + + // Update the stream version + self.update_stream_version( + request_start_version, + request_end_version, + stream_end_version, + &client_response_payload, + )?; // Create a new data notification let data_notification = create_data_notification( @@ -1383,6 +1817,85 @@ impl DataStreamEngine for TransactionStreamEngine { } } +/// A simple struct that tracks the state of a subscription stream. +#[derive(Clone, Debug)] +struct SubscriptionStream { + known_version_at_stream_start: u64, // The highest known transaction version at stream start + known_epoch_at_stream_start: u64, // The highest known epoch at stream start + subscription_stream_id: u64, // The unique id of the subscription stream + + next_subscription_stream_index: u64, // The next request index to send for the stream + max_subscription_stream_index: u64, // The maximum request index to send for the stream +} + +impl SubscriptionStream { + pub fn new( + data_streaming_config: DataStreamingServiceConfig, + unique_id_generator: Arc, + known_version_at_stream_start: u64, + known_epoch_at_stream_start: u64, + ) -> Self { + // Generate a new subscription stream ID + let subscription_stream_id = unique_id_generator.next(); + + // Log the creation of the subscription stream + debug!( + (LogSchema::new(LogEntry::CreatedSubscriptionStream).message(&format!( + "Created new subscription stream. Stream ID: {:?}", + subscription_stream_id + ))) + ); + + // Calculate the maximum subscription stream index + let max_subscription_stream_index = data_streaming_config + .max_num_consecutive_subscriptions + .saturating_sub(1); + + Self { + known_version_at_stream_start, + known_epoch_at_stream_start, + subscription_stream_id, + next_subscription_stream_index: 0, + max_subscription_stream_index, + } + } + + /// Returns the known version and epoch at stream start + pub fn get_known_version_and_epoch_at_stream_start(&self) -> (u64, u64) { + ( + self.known_version_at_stream_start, + self.known_epoch_at_stream_start, + ) + } + + /// Returns the maximum subscription stream index + pub fn get_max_subscription_stream_index(&self) -> u64 { + self.max_subscription_stream_index + } + + /// Returns the next subscription stream index + pub fn get_next_subscription_stream_index(&self) -> u64 { + self.next_subscription_stream_index + } + + /// Returns the subscription stream ID + pub fn get_subscription_stream_id(&self) -> u64 { + self.subscription_stream_id + } + + /// Increments the next subscription stream index + pub fn increment_subscription_stream_index(&mut self) { + self.next_subscription_stream_index += 1; + } +} + +/// Bounds the given number by the specified min and max values, inclusive. +/// If the number is less than the min, the min is returned. If the number is +/// greater than the max, the max is returned. Otherwise, the number is returned. +pub(crate) fn bound_by_range(number: u64, min: u64, max: u64) -> u64 { + number.clamp(min, max) +} + /// Verifies that the `expected_next_index` matches the `start_index` and that /// the `end_index` is greater than or equal to `expected_next_index`. fn verify_client_request_indices( @@ -1408,7 +1921,7 @@ fn verify_client_request_indices( } /// Creates a batch of data client requests for the given stream engine -fn create_data_client_requests( +fn create_data_client_request_batch( start_index: u64, end_index: u64, max_number_of_requests: u64, @@ -1624,3 +2137,62 @@ fn create_data_notification( data_payload, }) } + +/// Extracts the number of new versions and target +/// ledger info for the given client response payload. +fn extract_new_versions_and_target( + client_response_payload: &ResponsePayload, +) -> Result<(usize, LedgerInfoWithSignatures), Error> { + // Extract the number of new versions and the target ledger info + let (num_versions, target_ledger_info) = match &client_response_payload { + ResponsePayload::NewTransactionsWithProof(( + transactions_with_proof, + target_ledger_info, + )) => ( + transactions_with_proof.transactions.len(), + target_ledger_info.clone(), + ), + ResponsePayload::NewTransactionOutputsWithProof(( + outputs_with_proof, + target_ledger_info, + )) => ( + outputs_with_proof.transactions_and_outputs.len(), + target_ledger_info.clone(), + ), + response_payload => { + // TODO(joshlind): eventually we want to notify the data client of the bad response + return Err(Error::AptosDataClientResponseIsInvalid(format!( + "Expected new transactions or outputs but got: {:?}", + response_payload + ))); + }, + }; + + // Ensure that we have at least one data item + if num_versions == 0 { + // TODO(joshlind): eventually we want to notify the data client of the bad response + return Err(Error::AptosDataClientResponseIsInvalid( + "Received an empty transaction or output list!".into(), + )); + } + + Ok((num_versions, target_ledger_info)) +} + +/// Updates the response chunk size metrics for the given request and response +fn update_response_chunk_size_metrics( + client_request: &DataClientRequest, + client_response_payload: &ResponsePayload, +) { + metrics::observe_value( + &metrics::RECEIVED_DATA_RESPONSE_CHUNK_SIZE, + client_request.get_label(), + client_response_payload.get_label(), + client_response_payload.get_data_chunk_size() as u64, + ); +} + +/// Updates the metrics with a terminated subscription event and reason +fn update_terminated_subscription_metrics(termination_reason: &str) { + metrics::increment_counter(&metrics::TERMINATE_SUBSCRIPTION_STREAM, termination_reason); +} diff --git a/state-sync/state-sync-v2/data-streaming-service/src/streaming_client.rs b/state-sync/data-streaming-service/src/streaming_client.rs similarity index 100% rename from state-sync/state-sync-v2/data-streaming-service/src/streaming_client.rs rename to state-sync/data-streaming-service/src/streaming_client.rs diff --git a/state-sync/state-sync-v2/data-streaming-service/src/streaming_service.rs b/state-sync/data-streaming-service/src/streaming_service.rs similarity index 71% rename from state-sync/state-sync-v2/data-streaming-service/src/streaming_service.rs rename to state-sync/data-streaming-service/src/streaming_service.rs index b1c78c83bba06..1f684b0b725b9 100644 --- a/state-sync/state-sync-v2/data-streaming-service/src/streaming_service.rs +++ b/state-sync/data-streaming-service/src/streaming_service.rs @@ -410,109 +410,71 @@ mod streaming_service_tests { #[tokio::test(flavor = "multi_thread")] async fn test_drop_data_streams() { - // Create a new streaming service - let (_, mut streaming_service) = - tests::streaming_service::create_streaming_client_and_server(false, false, true); - - // Create multiple data streams - let num_data_streams = 10; - let mut stream_ids = vec![]; - for _ in 0..num_data_streams { - // Create a new data stream - let (new_stream_request, response_receiver) = create_new_stream_request(); - streaming_service.handle_stream_request_message(new_stream_request); - let data_stream_listener = response_receiver.now_or_never().unwrap().unwrap().unwrap(); - let data_stream_id = data_stream_listener.data_stream_id; - - // Remember the data stream id and drop the listener - stream_ids.push(data_stream_id); - } + for enable_subscription_stream in [false, true] { + // Create a new streaming service + let (_, mut streaming_service) = + tests::streaming_service::create_streaming_client_and_server( + None, + false, + false, + true, + enable_subscription_stream, + ); - // Verify the number of active data streams - assert_eq!( - streaming_service.get_all_data_stream_ids().len(), - num_data_streams - ); + // Create multiple data streams + let num_data_streams = 10; + let mut stream_ids = vec![]; + for _ in 0..num_data_streams { + // Create a new data stream + let (new_stream_request, response_receiver) = create_new_stream_request(); + streaming_service.handle_stream_request_message(new_stream_request); + let data_stream_listener = + response_receiver.now_or_never().unwrap().unwrap().unwrap(); + let data_stream_id = data_stream_listener.data_stream_id; - // Drive progress of the streaming service (the streaming service - // should detect the dropped listeners and remove the streams). - let timeout_deadline = Instant::now().add(Duration::from_secs(MAX_STREAM_WAIT_SECS)); - while Instant::now() < timeout_deadline { - streaming_service.check_progress_of_all_data_streams().await; - if streaming_service.get_all_data_stream_ids().is_empty() { - return; // All streams were dropped! + // Remember the data stream id and drop the listener + stream_ids.push(data_stream_id); } - tokio::time::sleep(Duration::from_millis(100)).await; - } - panic!("The streaming service failed to drop the data streams!"); - } - #[tokio::test(flavor = "multi_thread")] - async fn test_terminate_data_streams() { - // Create a new streaming service - let (_, mut streaming_service) = - tests::streaming_service::create_streaming_client_and_server(false, false, true); - - // Verify there are no data streams - assert!(streaming_service.get_all_data_stream_ids().is_empty()); - - // Create multiple data streams - let num_data_streams = 10; - let mut stream_ids_and_listeners = vec![]; - for _ in 0..num_data_streams { - // Create a new data stream - let (new_stream_request, response_receiver) = create_new_stream_request(); - streaming_service.handle_stream_request_message(new_stream_request); - let data_stream_listener = response_receiver.now_or_never().unwrap().unwrap().unwrap(); - let data_stream_id = data_stream_listener.data_stream_id; - - // Verify the data stream is actively held by the streaming service - let all_data_stream_ids = streaming_service.get_all_data_stream_ids(); - assert!(all_data_stream_ids.contains(&data_stream_id)); - - // Remember the data stream id and listener - stream_ids_and_listeners.push((data_stream_id, data_stream_listener)); - } + // Verify the number of active data streams + assert_eq!( + streaming_service.get_all_data_stream_ids().len(), + num_data_streams + ); - // Verify the number of active data streams - assert_eq!( - streaming_service.get_all_data_stream_ids().len(), - num_data_streams - ); + // Drive progress of the streaming service (the streaming service + // should detect the dropped listeners and remove the streams). + let timeout_deadline = Instant::now().add(Duration::from_secs(MAX_STREAM_WAIT_SECS)); + while Instant::now() < timeout_deadline { + streaming_service.check_progress_of_all_data_streams().await; + if streaming_service.get_all_data_stream_ids().is_empty() { + break; // All streams were dropped! + } + tokio::time::sleep(Duration::from_millis(100)).await; + } - // Try to terminate a data stream with an incorrect ID and verify - // an error is returned. - let terminate_stream_request = TerminateStreamRequest { - data_stream_id: 1919123, - notification_and_feedback: None, - }; - streaming_service - .process_terminate_stream_request(&terminate_stream_request) - .unwrap_err(); - - // Terminate all the streams and verify they're no longer held - for (data_stream_id, _) in stream_ids_and_listeners { - // Terminate the data stream (with no feedback) - let (terminate_stream_request, _) = - create_terminate_stream_request(data_stream_id, None); - streaming_service.handle_stream_request_message(terminate_stream_request); - - // Verify the stream has been removed - let all_data_stream_ids = streaming_service.get_all_data_stream_ids(); - assert!(!all_data_stream_ids.contains(&data_stream_id)); + // Verify the streams were terminated + if !streaming_service.get_all_data_stream_ids().is_empty() { + panic!("The streaming service failed to drop the data streams!"); + } } - - // Verify there are no data streams - assert!(streaming_service.get_all_data_stream_ids().is_empty()); } - #[tokio::test] - async fn test_terminate_data_streams_feedback() { - // Verify stream termination even if invalid feedback is given (i.e., id mismatch) - for invalid_feedback in [false, true] { + #[tokio::test(flavor = "multi_thread")] + async fn test_terminate_data_streams() { + for enable_subscription_stream in [false, true] { // Create a new streaming service let (_, mut streaming_service) = - tests::streaming_service::create_streaming_client_and_server(false, false, true); + tests::streaming_service::create_streaming_client_and_server( + None, + false, + false, + true, + enable_subscription_stream, + ); + + // Verify there are no data streams + assert!(streaming_service.get_all_data_stream_ids().is_empty()); // Create multiple data streams let num_data_streams = 10; @@ -525,44 +487,40 @@ mod streaming_service_tests { response_receiver.now_or_never().unwrap().unwrap().unwrap(); let data_stream_id = data_stream_listener.data_stream_id; + // Verify the data stream is actively held by the streaming service + let all_data_stream_ids = streaming_service.get_all_data_stream_ids(); + assert!(all_data_stream_ids.contains(&data_stream_id)); + // Remember the data stream id and listener stream_ids_and_listeners.push((data_stream_id, data_stream_listener)); } - // Fetch a notification from each data stream and terminate the stream - for (data_stream_id, data_stream_listener) in &mut stream_ids_and_listeners { - let timeout_deadline = - Instant::now().add(Duration::from_secs(MAX_STREAM_WAIT_SECS)); - while Instant::now() < timeout_deadline { - streaming_service.check_progress_of_all_data_streams().await; - if let Ok(data_notification) = timeout( - Duration::from_secs(1), - data_stream_listener.select_next_some(), - ) - .await - { - // Terminate the data stream - let notification_id = if invalid_feedback { - 10101010 // Invalid notification id - } else { - data_notification.notification_id - }; - let notification_and_feedback = Some(NotificationAndFeedback { - notification_id, - notification_feedback: NotificationFeedback::InvalidPayloadData, - }); - let (terminate_stream_request, _) = create_terminate_stream_request( - *data_stream_id, - notification_and_feedback, - ); - streaming_service.handle_stream_request_message(terminate_stream_request); - - // Verify the stream has been removed - let all_data_stream_ids = streaming_service.get_all_data_stream_ids(); - assert!(!all_data_stream_ids.contains(data_stream_id)); - break; - } - } + // Verify the number of active data streams + assert_eq!( + streaming_service.get_all_data_stream_ids().len(), + num_data_streams + ); + + // Try to terminate a data stream with an incorrect ID and verify + // an error is returned. + let terminate_stream_request = TerminateStreamRequest { + data_stream_id: 1919123, + notification_and_feedback: None, + }; + streaming_service + .process_terminate_stream_request(&terminate_stream_request) + .unwrap_err(); + + // Terminate all the streams and verify they're no longer held + for (data_stream_id, _) in stream_ids_and_listeners { + // Terminate the data stream (with no feedback) + let (terminate_stream_request, _) = + create_terminate_stream_request(data_stream_id, None); + streaming_service.handle_stream_request_message(terminate_stream_request); + + // Verify the stream has been removed + let all_data_stream_ids = streaming_service.get_all_data_stream_ids(); + assert!(!all_data_stream_ids.contains(&data_stream_id)); } // Verify there are no data streams @@ -570,6 +528,79 @@ mod streaming_service_tests { } } + #[tokio::test] + async fn test_terminate_data_streams_feedback() { + // Verify stream termination even if invalid feedback is given (i.e., id mismatch) + for enable_subscription_stream in [false, true] { + for invalid_feedback in [false, true] { + // Create a new streaming service + let (_, mut streaming_service) = + tests::streaming_service::create_streaming_client_and_server( + None, + false, + false, + true, + enable_subscription_stream, + ); + + // Create multiple data streams + let num_data_streams = 10; + let mut stream_ids_and_listeners = vec![]; + for _ in 0..num_data_streams { + // Create a new data stream + let (new_stream_request, response_receiver) = create_new_stream_request(); + streaming_service.handle_stream_request_message(new_stream_request); + let data_stream_listener = + response_receiver.now_or_never().unwrap().unwrap().unwrap(); + let data_stream_id = data_stream_listener.data_stream_id; + + // Remember the data stream id and listener + stream_ids_and_listeners.push((data_stream_id, data_stream_listener)); + } + + // Fetch a notification from each data stream and terminate the stream + for (data_stream_id, data_stream_listener) in &mut stream_ids_and_listeners { + let timeout_deadline = + Instant::now().add(Duration::from_secs(MAX_STREAM_WAIT_SECS)); + while Instant::now() < timeout_deadline { + streaming_service.check_progress_of_all_data_streams().await; + if let Ok(data_notification) = timeout( + Duration::from_secs(1), + data_stream_listener.select_next_some(), + ) + .await + { + // Terminate the data stream + let notification_id = if invalid_feedback { + 10101010 // Invalid notification id + } else { + data_notification.notification_id + }; + let notification_and_feedback = Some(NotificationAndFeedback { + notification_id, + notification_feedback: NotificationFeedback::InvalidPayloadData, + }); + let (terminate_stream_request, _) = create_terminate_stream_request( + *data_stream_id, + notification_and_feedback, + ); + streaming_service + .handle_stream_request_message(terminate_stream_request); + + // Verify the stream has been removed + let all_data_stream_ids = streaming_service.get_all_data_stream_ids(); + assert!(!all_data_stream_ids.contains(data_stream_id)); + break; + } + } + } + + // Verify there are no data streams + assert!(streaming_service.get_all_data_stream_ids().is_empty()); + } + } + } + /// Creates a new stream request message for state values fn create_new_stream_request() -> ( StreamRequestMessage, diff --git a/state-sync/state-sync-v2/data-streaming-service/src/tests/data_stream.rs b/state-sync/data-streaming-service/src/tests/data_stream.rs similarity index 66% rename from state-sync/state-sync-v2/data-streaming-service/src/tests/data_stream.rs rename to state-sync/data-streaming-service/src/tests/data_stream.rs index 6416981f4e9f8..dab0f75e8783a 100644 --- a/state-sync/state-sync-v2/data-streaming-service/src/tests/data_stream.rs +++ b/state-sync/data-streaming-service/src/tests/data_stream.rs @@ -6,8 +6,11 @@ use crate::{ data_notification::{ DataClientRequest, DataPayload, EpochEndingLedgerInfosRequest, NewTransactionOutputsWithProofRequest, NewTransactionsOrOutputsWithProofRequest, - NewTransactionsWithProofRequest, PendingClientResponse, TransactionOutputsWithProofRequest, - TransactionsOrOutputsWithProofRequest, TransactionsWithProofRequest, + NewTransactionsWithProofRequest, PendingClientResponse, + SubscribeTransactionOutputsWithProofRequest, + SubscribeTransactionsOrOutputsWithProofRequest, SubscribeTransactionsWithProofRequest, + TransactionOutputsWithProofRequest, TransactionsOrOutputsWithProofRequest, + TransactionsWithProofRequest, }, data_stream::{DataStream, DataStreamListener}, streaming_client::{ @@ -35,8 +38,13 @@ use aptos_id_generator::U64IdGenerator; use aptos_infallible::Mutex; use aptos_storage_service_types::responses::CompleteDataRange; use aptos_types::{ - ledger_info::LedgerInfoWithSignatures, proof::SparseMerkleRangeProof, - state_store::state_value::StateValueChunkWithProof, transaction::Version, + ledger_info::LedgerInfoWithSignatures, + proof::SparseMerkleRangeProof, + state_store::{ + state_key::StateKey, + state_value::{StateValue, StateValueChunkWithProof}, + }, + transaction::Version, }; use claims::{assert_err, assert_ge, assert_matches, assert_none, assert_ok}; use futures::{FutureExt, StreamExt}; @@ -69,13 +77,10 @@ async fn test_stream_blocked() { id: 0, response_callback: Box::new(NoopResponseCallback), }; - let pending_response = PendingClientResponse { - client_request: client_request.clone(), - client_response: Some(Ok(Response { - context, - payload: ResponsePayload::NumberOfStates(10), - })), - }; + let pending_response = PendingClientResponse::new_with_response( + client_request.clone(), + Ok(Response::new(context, ResponsePayload::NumberOfStates(10))), + ); insert_response_into_pending_queue(&mut data_stream, pending_response); // Process the data responses and force a data re-fetch @@ -196,12 +201,12 @@ async fn test_stream_data_error() { start_epoch: MIN_ADVERTISED_EPOCH_END, end_epoch: MIN_ADVERTISED_EPOCH_END + 1, }); - let pending_response = PendingClientResponse { - client_request: client_request.clone(), - client_response: Some(Err(aptos_data_client::error::Error::DataIsUnavailable( + let pending_response = PendingClientResponse::new_with_response( + client_request.clone(), + Err(aptos_data_client::error::Error::DataIsUnavailable( "Missing data!".into(), - ))), - }; + )), + ); insert_response_into_pending_queue(&mut data_stream, pending_response); // Process the responses and verify the data client request was resent to the network @@ -233,11 +238,10 @@ async fn test_stream_invalid_response() { id: 0, response_callback: Box::new(NoopResponseCallback), }; - let client_response = Response::new(context, ResponsePayload::NumberOfStates(10)); - let pending_response = PendingClientResponse { - client_request: client_request.clone(), - client_response: Some(Ok(client_response)), - }; + let pending_response = PendingClientResponse::new_with_response( + client_request.clone(), + Ok(Response::new(context, ResponsePayload::NumberOfStates(10))), + ); insert_response_into_pending_queue(&mut data_stream, pending_response); // Process the responses and verify the data client request was resent to the network @@ -349,12 +353,12 @@ async fn test_state_stream_out_of_order_responses() { ); // Set a response for the second request and verify no notifications - set_state_value_response_in_queue(&mut data_stream, 1); + set_state_value_response_in_queue(&mut data_stream, 1, 1, 1); process_data_responses(&mut data_stream, &global_data_summary).await; assert_none!(stream_listener.select_next_some().now_or_never()); // Set a response for the first request and verify two notifications - set_state_value_response_in_queue(&mut data_stream, 0); + set_state_value_response_in_queue(&mut data_stream, 0, 0, 0); process_data_responses(&mut data_stream, &global_data_summary).await; for _ in 0..2 { let data_notification = get_data_notification(&mut stream_listener).await.unwrap(); @@ -366,8 +370,8 @@ async fn test_state_stream_out_of_order_responses() { assert_none!(stream_listener.select_next_some().now_or_never()); // Set the response for the first and third request and verify one notification sent - set_state_value_response_in_queue(&mut data_stream, 0); - set_state_value_response_in_queue(&mut data_stream, 2); + set_state_value_response_in_queue(&mut data_stream, 2, 2, 0); + set_state_value_response_in_queue(&mut data_stream, 4, 4, 2); process_data_responses(&mut data_stream, &global_data_summary).await; let data_notification = get_data_notification(&mut stream_listener).await.unwrap(); assert_matches!( @@ -377,8 +381,8 @@ async fn test_state_stream_out_of_order_responses() { assert_none!(stream_listener.select_next_some().now_or_never()); // Set the response for the first and third request and verify three notifications sent - set_state_value_response_in_queue(&mut data_stream, 0); - set_state_value_response_in_queue(&mut data_stream, 2); + set_state_value_response_in_queue(&mut data_stream, 3, 3, 0); + set_state_value_response_in_queue(&mut data_stream, 5, 5, 2); process_data_responses(&mut data_stream, &global_data_summary).await; for _ in 0..3 { let data_notification = get_data_notification(&mut stream_listener).await.unwrap(); @@ -401,7 +405,7 @@ async fn test_continuous_stream_epoch_change_retry() { ..Default::default() }; - // Test both types of continuous data streams + // Test all types of continuous data streams let (data_stream_1, _stream_listener_1) = create_continuous_transaction_stream( AptosDataClientConfig::default(), streaming_service_config, @@ -462,80 +466,43 @@ async fn test_continuous_stream_epoch_change_retry() { #[tokio::test] async fn test_continuous_stream_optimistic_fetch_retry() { - // Create a test streaming service config + // Create a test streaming service config with subscriptions disabled let max_request_retry = 3; let max_concurrent_requests = 3; let streaming_service_config = DataStreamingServiceConfig { + enable_subscription_streaming: false, max_concurrent_requests, max_request_retry, ..Default::default() }; - // Test both types of continuous data streams - let (data_stream_1, stream_listener_1) = create_continuous_transaction_stream( + // Test all types of continuous data streams + let continuous_data_streams = enumerate_continuous_data_streams( AptosDataClientConfig::default(), streaming_service_config, - MAX_ADVERTISED_TRANSACTION, - MAX_ADVERTISED_EPOCH_END, ); - let (data_stream_2, stream_listener_2) = create_continuous_transaction_output_stream( - AptosDataClientConfig::default(), - streaming_service_config, - MAX_ADVERTISED_TRANSACTION_OUTPUT, - MAX_ADVERTISED_EPOCH_END, - ); - let (data_stream_3, stream_listener_3) = create_continuous_transaction_or_output_stream( - AptosDataClientConfig::default(), - streaming_service_config, - MAX_ADVERTISED_TRANSACTION_OUTPUT, - MAX_ADVERTISED_EPOCH_END, - ); - for (mut data_stream, mut stream_listener, transactions_only, allow_transactions_or_outputs) in [ - (data_stream_1, stream_listener_1, true, false), - (data_stream_2, stream_listener_2, false, false), - (data_stream_3, stream_listener_3, false, true), - ] { + for (mut data_stream, mut stream_listener, transactions_only, allow_transactions_or_outputs) in + continuous_data_streams + { // Initialize the data stream let global_data_summary = create_global_data_summary(1); initialize_data_requests(&mut data_stream, &global_data_summary); - // Verify a single request is made - let (sent_requests, _) = data_stream.get_sent_requests_and_notifications(); - assert_eq!(sent_requests.as_ref().unwrap().len(), 1); - - // Verify the request is for the correct data - let client_request = get_pending_client_request(&mut data_stream, 0); - let expected_request = if allow_transactions_or_outputs { - DataClientRequest::NewTransactionsOrOutputsWithProof( - NewTransactionsOrOutputsWithProofRequest { - known_version: MAX_ADVERTISED_TRANSACTION_OUTPUT, - known_epoch: MAX_ADVERTISED_EPOCH_END, - include_events: false, - }, - ) - } else if transactions_only { - DataClientRequest::NewTransactionsWithProof(NewTransactionsWithProofRequest { - known_version: MAX_ADVERTISED_TRANSACTION, - known_epoch: MAX_ADVERTISED_EPOCH_END, - include_events: false, - }) - } else { - DataClientRequest::NewTransactionOutputsWithProof( - NewTransactionOutputsWithProofRequest { - known_version: MAX_ADVERTISED_TRANSACTION_OUTPUT, - known_epoch: MAX_ADVERTISED_EPOCH_END, - }, - ) - }; - assert_eq!(client_request, expected_request); + // Verify a single request is made and that it contains the correct data + verify_pending_optimistic_fetch( + &mut data_stream, + transactions_only, + allow_transactions_or_outputs, + 0, + ); // Set a timeout response for the optimistic fetch request and process it set_timeout_response_in_queue(&mut data_stream, 0); process_data_responses(&mut data_stream, &global_data_summary).await; assert_none!(stream_listener.select_next_some().now_or_never()); - // Handle multiple timeouts and retries because no new data is known - // about, so the best we can do is send optimistic fetches + // Handle multiple timeouts and retries (because no new data is known) + let client_request = get_pending_client_request(&mut data_stream, 0); for _ in 0..max_request_retry * 3 { // Set a timeout response for the request and process it set_timeout_response_in_queue(&mut data_stream, 0); @@ -547,7 +514,7 @@ async fn test_continuous_stream_optimistic_fetch_retry() { } // Set an optimistic fetch response in the queue and process it - set_optimistic_fetch_response_in_queue( + set_new_data_response_in_queue( &mut data_stream, 0, MAX_ADVERTISED_TRANSACTION + 1, @@ -555,161 +522,286 @@ async fn test_continuous_stream_optimistic_fetch_retry() { ); process_data_responses(&mut data_stream, &global_data_summary).await; - // Verify another optimistic fetch request is now sent (for data beyond the previous target) - let (sent_requests, _) = data_stream.get_sent_requests_and_notifications(); - assert_eq!(sent_requests.as_ref().unwrap().len(), 1); - let client_request = get_pending_client_request(&mut data_stream, 0); - let expected_request = if allow_transactions_or_outputs { - DataClientRequest::NewTransactionsOrOutputsWithProof( - NewTransactionsOrOutputsWithProofRequest { - known_version: MAX_ADVERTISED_TRANSACTION_OUTPUT + 1, - known_epoch: MAX_ADVERTISED_EPOCH_END, - include_events: false, - }, - ) - } else if transactions_only { - DataClientRequest::NewTransactionsWithProof(NewTransactionsWithProofRequest { - known_version: MAX_ADVERTISED_TRANSACTION + 1, - known_epoch: MAX_ADVERTISED_EPOCH_END, - include_events: false, - }) - } else { - DataClientRequest::NewTransactionOutputsWithProof( - NewTransactionOutputsWithProofRequest { - known_version: MAX_ADVERTISED_TRANSACTION_OUTPUT + 1, - known_epoch: MAX_ADVERTISED_EPOCH_END, - }, - ) - }; - assert_eq!(client_request, expected_request); + // Verify another optimistic fetch request is now sent + verify_pending_optimistic_fetch( + &mut data_stream, + transactions_only, + allow_transactions_or_outputs, + 1, // Offset by 1 (for data beyond the previous target) + ); - // Set a timeout response for the optimistic fetch request and process it. + // Set an error response for the optimistic fetch request and process it. // This will cause the same request to be re-sent. - set_timeout_response_in_queue(&mut data_stream, 0); + set_failure_response_in_queue(&mut data_stream, 0); process_data_responses(&mut data_stream, &global_data_summary).await; - // Set a timeout response for the optimistic fetch request and process it, - // but this time the node knows about new data to fetch. - set_timeout_response_in_queue(&mut data_stream, 0); - let mut new_global_data_summary = global_data_summary.clone(); - let new_highest_synced_version = MAX_ADVERTISED_TRANSACTION + 1000; - new_global_data_summary.advertised_data.synced_ledger_infos = vec![create_ledger_info( - new_highest_synced_version, - MAX_ADVERTISED_EPOCH_END, - false, - )]; - process_data_responses(&mut data_stream, &new_global_data_summary).await; - - // Verify multiple data requests have now been sent to fetch the missing data - let (sent_requests, _) = data_stream.get_sent_requests_and_notifications(); - assert_eq!(sent_requests.as_ref().unwrap().len(), 3); - for i in 0..3 { - let client_request = get_pending_client_request(&mut data_stream, i); - let expected_version = MAX_ADVERTISED_TRANSACTION + 2 + i as u64; - let expected_request = if allow_transactions_or_outputs { - DataClientRequest::TransactionsOrOutputsWithProof( - TransactionsOrOutputsWithProofRequest { - start_version: expected_version, - end_version: expected_version, - proof_version: new_highest_synced_version, - include_events: false, - }, - ) - } else if transactions_only { - DataClientRequest::TransactionsWithProof(TransactionsWithProofRequest { - start_version: expected_version, - end_version: expected_version, - proof_version: new_highest_synced_version, - include_events: false, - }) - } else { - DataClientRequest::TransactionOutputsWithProof(TransactionOutputsWithProofRequest { - start_version: expected_version, - end_version: expected_version, - proof_version: new_highest_synced_version, - }) - }; - assert_eq!(client_request, expected_request); - } + // Advertise new data and verify the data is requested + advertise_new_data_and_verify_requests( + &mut data_stream, + global_data_summary, + transactions_only, + allow_transactions_or_outputs, + max_concurrent_requests, + ) + .await; } } #[tokio::test(flavor = "multi_thread")] async fn test_continuous_stream_optimistic_fetch_timeout() { // Create a test data client config - let optimistic_fetch_timeout_ms = 2022; let data_client_config = AptosDataClientConfig { - optimistic_fetch_timeout_ms, + optimistic_fetch_timeout_ms: 1005, ..Default::default() }; - // Test both types of continuous data streams - let (data_stream_1, stream_listener_1) = create_continuous_transaction_stream( - data_client_config, - DataStreamingServiceConfig::default(), - MAX_ADVERTISED_TRANSACTION, - MAX_ADVERTISED_EPOCH_END, - ); - let (data_stream_2, stream_listener_2) = create_continuous_transaction_output_stream( - data_client_config, - DataStreamingServiceConfig::default(), - MAX_ADVERTISED_TRANSACTION_OUTPUT, - MAX_ADVERTISED_EPOCH_END, - ); - let (data_stream_3, stream_listener_3) = create_continuous_transaction_or_output_stream( + // Create a test streaming service config with subscriptions disabled + let streaming_service_config = DataStreamingServiceConfig { + enable_subscription_streaming: false, + ..Default::default() + }; + + // Verify the timeouts of all continuous data streams + verify_continuous_stream_request_timeouts( data_client_config, - DataStreamingServiceConfig::default(), - MAX_ADVERTISED_TRANSACTION_OUTPUT, - MAX_ADVERTISED_EPOCH_END, + streaming_service_config, + 1, // Optimistic fetch requests are only sent one at a time + ) + .await; +} + +#[tokio::test] +async fn test_continuous_stream_subscription_failures() { + // Create a test streaming service config with subscriptions enabled + let max_request_retry = 3; + let max_concurrent_requests = 3; + let streaming_service_config = DataStreamingServiceConfig { + enable_subscription_streaming: true, + max_concurrent_requests, + max_request_retry, + ..Default::default() + }; + + // Test all types of continuous data streams + let continuous_data_streams = enumerate_continuous_data_streams( + AptosDataClientConfig::default(), + streaming_service_config, ); - for (mut data_stream, mut stream_listener, transactions_only, allow_transactions_or_outputs) in [ - (data_stream_1, stream_listener_1, true, false), - (data_stream_2, stream_listener_2, false, false), - (data_stream_3, stream_listener_3, false, true), - ] { + for (mut data_stream, mut stream_listener, transactions_only, allow_transactions_or_outputs) in + continuous_data_streams + { // Initialize the data stream let global_data_summary = create_global_data_summary(1); initialize_data_requests(&mut data_stream, &global_data_summary); - // Verify a single request is made - let (sent_requests, _) = data_stream.get_sent_requests_and_notifications(); - assert_eq!(sent_requests.as_ref().unwrap().len(), 1); + // Fetch the subscription stream ID from the first pending request + let mut subscription_stream_id = get_subscription_stream_id(&mut data_stream, 0); - // Wait until a notification is sent. The mock data client - // will verify the timeout. - wait_for_notification_and_verify( + // Verify the pending requests are for the correct data and correctly formed + verify_pending_subscription_requests( &mut data_stream, - &mut stream_listener, - transactions_only, + max_concurrent_requests, allow_transactions_or_outputs, - true, - &global_data_summary, - ) - .await; + transactions_only, + 0, + subscription_stream_id, + 0, + ); - // Handle multiple timeouts and retries because no new data is known - // about, so the best we can do is send optimistic fetch requests. - for _ in 0..3 { + // Set a failure response for the first subscription request and process it + set_failure_response_in_queue(&mut data_stream, 0); + process_data_responses(&mut data_stream, &global_data_summary).await; + assert_none!(stream_listener.select_next_some().now_or_never()); + + // Handle multiple timeouts and retries + for _ in 0..max_request_retry * 3 { + // Set a timeout response for the first request and process it set_timeout_response_in_queue(&mut data_stream, 0); process_data_responses(&mut data_stream, &global_data_summary).await; + + // Fetch the subscription stream ID from the first pending request + let next_subscription_stream_id = get_subscription_stream_id(&mut data_stream, 0); + + // Verify the next stream ID is different from the previous one + assert_ne!(subscription_stream_id, next_subscription_stream_id); + subscription_stream_id = next_subscription_stream_id; + + // Verify the pending requests are for the correct data and correctly formed + verify_pending_subscription_requests( + &mut data_stream, + max_concurrent_requests, + allow_transactions_or_outputs, + transactions_only, + 0, + subscription_stream_id, + 0, + ); } - // Wait until a notification is sent. The mock data client - // will verify the timeout. - wait_for_notification_and_verify( + // Set a failure response for the first request and process it + set_failure_response_in_queue(&mut data_stream, 0); + process_data_responses(&mut data_stream, &global_data_summary).await; + + // Fetch the next subscription stream ID from the first pending request + let next_subscription_stream_id = get_subscription_stream_id(&mut data_stream, 0); + + // Verify the next stream ID is different from the previous one + assert_ne!(subscription_stream_id, next_subscription_stream_id); + subscription_stream_id = next_subscription_stream_id; + + // Verify the pending requests are for the correct data and correctly formed + verify_pending_subscription_requests( &mut data_stream, - &mut stream_listener, + max_concurrent_requests, + allow_transactions_or_outputs, transactions_only, + 0, + subscription_stream_id, + 0, + ); + + // Set a subscription response in the queue and process it + set_new_data_response_in_queue( + &mut data_stream, + 0, + MAX_ADVERTISED_TRANSACTION + 1, + transactions_only, + ); + process_data_responses(&mut data_stream, &global_data_summary).await; + + // Verify the pending requests are for the correct data and correctly formed + verify_pending_subscription_requests( + &mut data_stream, + max_concurrent_requests, allow_transactions_or_outputs, - true, - &global_data_summary, + transactions_only, + 1, + subscription_stream_id, // The subscription stream ID should be the same + 0, + ); + + // Set a timeout response for the subscription request and process it. + // This will cause the same request to be re-sent. + set_timeout_response_in_queue(&mut data_stream, 0); + process_data_responses(&mut data_stream, &global_data_summary).await; + + // Advertise new data and verify the data is requested + advertise_new_data_and_verify_requests( + &mut data_stream, + global_data_summary, + transactions_only, + allow_transactions_or_outputs, + max_concurrent_requests, ) .await; } } +#[tokio::test] +async fn test_continuous_stream_subscription_max() { + // Create a test streaming service config with subscriptions enabled + let max_concurrent_requests = 3; + let max_num_consecutive_subscriptions = 5; + let streaming_service_config = DataStreamingServiceConfig { + enable_subscription_streaming: true, + max_concurrent_requests, + max_num_consecutive_subscriptions, + ..Default::default() + }; + + // Test all types of continuous data streams + let continuous_data_streams = enumerate_continuous_data_streams( + AptosDataClientConfig::default(), + streaming_service_config, + ); + for (mut data_stream, _stream_listener, transactions_only, allow_transactions_or_outputs) in + continuous_data_streams + { + // Initialize the data stream + let global_data_summary = create_global_data_summary(1); + initialize_data_requests(&mut data_stream, &global_data_summary); + + // Iterate through several changes in subscription streams + let num_subscription_stream_changes = 5; + for stream_number in 0..num_subscription_stream_changes { + // Fetch the subscription stream ID from the first pending request + let subscription_stream_id = get_subscription_stream_id(&mut data_stream, 0); + + // Verify the pending requests are for the correct data and correctly formed + verify_pending_subscription_requests( + &mut data_stream, + max_concurrent_requests, + allow_transactions_or_outputs, + transactions_only, + 0, + subscription_stream_id, + stream_number * max_num_consecutive_subscriptions, + ); + + // Set valid responses for all pending requests and process the responses + for request_index in 0..max_concurrent_requests { + set_new_data_response_in_queue( + &mut data_stream, + request_index as usize, + MAX_ADVERTISED_TRANSACTION + request_index, + transactions_only, + ); + } + process_data_responses(&mut data_stream, &global_data_summary).await; + + // Verify the number of pending requests + let (sent_requests, _) = data_stream.get_sent_requests_and_notifications(); + assert_eq!( + sent_requests.as_ref().unwrap().len(), + (max_num_consecutive_subscriptions - max_concurrent_requests) as usize + ); + + // Set valid responses for all pending requests and process the responses + for request_index in 0..(max_num_consecutive_subscriptions - max_concurrent_requests) { + set_new_data_response_in_queue( + &mut data_stream, + request_index as usize, + MAX_ADVERTISED_TRANSACTION + request_index + max_concurrent_requests, + transactions_only, + ); + } + process_data_responses(&mut data_stream, &global_data_summary).await; + + // Fetch the next subscription stream ID from the first pending request + let next_subscription_stream_id = get_subscription_stream_id(&mut data_stream, 0); + + // Verify the subscription stream ID has changed (because we hit the max number of requests) + assert_ne!(subscription_stream_id, next_subscription_stream_id); + } + } +} + +#[tokio::test(flavor = "multi_thread")] +async fn test_continuous_stream_subscription_timeout() { + // Create a test data client config + let data_client_config = AptosDataClientConfig { + subscription_response_timeout_ms: 2022, + ..Default::default() + }; + + // Create a test streaming service config with subscriptions enabled + let streaming_service_config = DataStreamingServiceConfig { + enable_subscription_streaming: true, + max_concurrent_requests: 7, + ..Default::default() + }; + + // Verify the timeouts of all continuous data streams + verify_continuous_stream_request_timeouts( + data_client_config, + streaming_service_config, + streaming_service_config.max_concurrent_requests, + ) + .await; +} + #[tokio::test(flavor = "multi_thread")] -async fn test_transactions_and_output_stream_timeout() { +async fn test_stream_timeouts() { // Create a test data client config let max_response_timeout_ms = 85; let response_timeout_ms = 7; @@ -764,8 +856,8 @@ async fn test_transactions_and_output_stream_timeout() { ); // Wait for the data client to satisfy all requests - for i in 0..max_concurrent_requests as usize { - wait_for_data_client_to_respond(&mut data_stream, i).await; + for request_index in 0..max_concurrent_requests as usize { + wait_for_data_client_to_respond(&mut data_stream, request_index).await; } // Handle multiple timeouts and retries on the first request @@ -787,8 +879,8 @@ async fn test_transactions_and_output_stream_timeout() { .await; // Wait for the data client to satisfy all requests - for i in 0..max_concurrent_requests as usize { - wait_for_data_client_to_respond(&mut data_stream, i).await; + for request_index in 0..max_concurrent_requests as usize { + wait_for_data_client_to_respond(&mut data_stream, request_index).await; } // Set a timeout on the second request @@ -796,14 +888,14 @@ async fn test_transactions_and_output_stream_timeout() { // Handle multiple invalid type responses on the first request for _ in 0..max_request_retry / 2 { - set_state_value_response_in_queue(&mut data_stream, 0); + set_state_value_response_in_queue(&mut data_stream, 0, 0, 0); process_data_responses(&mut data_stream, &global_data_summary).await; wait_for_data_client_to_respond(&mut data_stream, 0).await; } // Handle multiple invalid type responses on the third request for _ in 0..max_request_retry / 2 { - set_state_value_response_in_queue(&mut data_stream, 2); + set_state_value_response_in_queue(&mut data_stream, 2, 2, 2); process_data_responses(&mut data_stream, &global_data_summary).await; wait_for_data_client_to_respond(&mut data_stream, 2).await; } @@ -880,6 +972,66 @@ async fn test_stream_listener_dropped() { assert_eq!(sent_notifications.len(), 2); } +/// Advertises new data (beyond the highest advertised data) and verifies +/// that data client requests are sent to fetch the missing data. +async fn advertise_new_data_and_verify_requests( + data_stream: &mut DataStream, + global_data_summary: GlobalDataSummary, + transactions_only: bool, + allow_transactions_or_outputs: bool, + max_concurrent_requests: u64, +) { + // Advertise new data beyond the currently advertised data + let mut new_global_data_summary = global_data_summary.clone(); + let new_highest_synced_version = MAX_ADVERTISED_TRANSACTION + 1000; + new_global_data_summary.advertised_data.synced_ledger_infos = vec![create_ledger_info( + new_highest_synced_version, + MAX_ADVERTISED_EPOCH_END, + false, + )]; + + // Set a timeout response at the head of the queue and process the response + set_timeout_response_in_queue(data_stream, 0); + process_data_responses(data_stream, &new_global_data_summary).await; + + // Verify multiple data requests have now been sent to fetch the missing data + let (sent_requests, _) = data_stream.get_sent_requests_and_notifications(); + assert_eq!( + sent_requests.as_ref().unwrap().len(), + max_concurrent_requests as usize, + ); + + // Verify the pending requests are for the correct data and correctly formed + for request_index in 0..max_concurrent_requests { + let client_request = get_pending_client_request(data_stream, request_index as usize); + let expected_version = MAX_ADVERTISED_TRANSACTION + 2 + request_index; + let expected_request = if allow_transactions_or_outputs { + DataClientRequest::TransactionsOrOutputsWithProof( + TransactionsOrOutputsWithProofRequest { + start_version: expected_version, + end_version: expected_version, + proof_version: new_highest_synced_version, + include_events: false, + }, + ) + } else if transactions_only { + DataClientRequest::TransactionsWithProof(TransactionsWithProofRequest { + start_version: expected_version, + end_version: expected_version, + proof_version: new_highest_synced_version, + include_events: false, + }) + } else { + DataClientRequest::TransactionOutputsWithProof(TransactionOutputsWithProofRequest { + start_version: expected_version, + end_version: expected_version, + proof_version: new_highest_synced_version, + }) + }; + assert_eq!(client_request, expected_request); + } +} + /// Creates a state value stream for the given `version`. fn create_state_value_stream( data_client_config: AptosDataClientConfig, @@ -1083,6 +1235,71 @@ fn create_optimal_chunk_sizes(chunk_sizes: u64) -> OptimalChunkSizes { } } +/// A utility function that creates and returns all types of +/// continuous data streams. This is useful for tests that verify +/// all stream types. +fn enumerate_continuous_data_streams( + data_client_config: AptosDataClientConfig, + streaming_service_config: DataStreamingServiceConfig, +) -> Vec<( + DataStream, + DataStreamListener, + bool, + bool, +)> { + let mut continuous_data_streams = vec![]; + + // Create a continuous transaction stream + let transactions_only = true; + let allow_transactions_or_outputs = false; + let (data_stream, stream_listener) = create_continuous_transaction_stream( + data_client_config, + streaming_service_config, + MAX_ADVERTISED_TRANSACTION, + MAX_ADVERTISED_EPOCH_END, + ); + continuous_data_streams.push(( + data_stream, + stream_listener, + transactions_only, + allow_transactions_or_outputs, + )); + + // Create a continuous transaction output stream + let transactions_only = false; + let allow_transactions_or_outputs = false; + let (data_stream, stream_listener) = create_continuous_transaction_output_stream( + data_client_config, + streaming_service_config, + MAX_ADVERTISED_TRANSACTION_OUTPUT, + MAX_ADVERTISED_EPOCH_END, + ); + continuous_data_streams.push(( + data_stream, + stream_listener, + transactions_only, + allow_transactions_or_outputs, + )); + + // Create a continuous transaction or output stream + let transactions_only = false; + let allow_transactions_or_outputs = true; + let (data_stream, stream_listener) = create_continuous_transaction_or_output_stream( + data_client_config, + streaming_service_config, + MAX_ADVERTISED_TRANSACTION_OUTPUT, + MAX_ADVERTISED_EPOCH_END, + ); + continuous_data_streams.push(( + data_stream, + stream_listener, + transactions_only, + allow_transactions_or_outputs, + )); + + continuous_data_streams +} + /// Sets the client response at the index in the pending queue to contain an /// epoch ending data response. fn set_epoch_ending_response_in_queue( @@ -1120,17 +1337,23 @@ fn set_num_state_values_response_in_queue( /// state value data response. fn set_state_value_response_in_queue( data_stream: &mut DataStream, - index: usize, + first_state_value_index: u64, + last_state_value_index: u64, + request_index: usize, ) { let (sent_requests, _) = data_stream.get_sent_requests_and_notifications(); - let pending_response = sent_requests.as_mut().unwrap().get_mut(index).unwrap(); + let pending_response = sent_requests + .as_mut() + .unwrap() + .get_mut(request_index) + .unwrap(); let client_response = Some(Ok(create_data_client_response( ResponsePayload::StateValuesWithProof(StateValueChunkWithProof { - first_index: 0, - last_index: 0, + first_index: first_state_value_index, + last_index: last_state_value_index, first_key: Default::default(), last_key: Default::default(), - raw_values: vec![], + raw_values: vec![(StateKey::raw(vec![]), StateValue::new_legacy(vec![].into()))], proof: SparseMerkleRangeProof::new(vec![]), root_hash: Default::default(), }), @@ -1138,9 +1361,9 @@ fn set_state_value_response_in_queue( pending_response.lock().client_response = client_response; } -/// Sets the client response at the index in the pending queue to contain -/// an optimistic fetch response. -fn set_optimistic_fetch_response_in_queue( +/// Sets the client response at the index in the pending +/// queue to contain new data. +fn set_new_data_response_in_queue( data_stream: &mut DataStream, index: usize, single_data_version: u64, @@ -1166,15 +1389,37 @@ fn set_optimistic_fetch_response_in_queue( pending_response.lock().client_response = client_response; } +/// Sets the client response at the index in the pending queue to contain a failure +fn set_failure_response_in_queue(data_stream: &mut DataStream, index: usize) { + set_response_in_queue( + data_stream, + index, + aptos_data_client::error::Error::UnexpectedErrorEncountered("Oops!".into()), + ); +} + /// Sets the client response at the index in the pending queue to contain a /// timeout response. fn set_timeout_response_in_queue(data_stream: &mut DataStream, index: usize) { + set_response_in_queue( + data_stream, + index, + aptos_data_client::error::Error::TimeoutWaitingForResponse("Timed out!".into()), + ); +} + +/// Sets the given error response at the index in the pending queue +fn set_response_in_queue( + data_stream: &mut DataStream, + index: usize, + error_response: aptos_data_client::error::Error, +) { + // Get the pending response at the specified index let (sent_requests, _) = data_stream.get_sent_requests_and_notifications(); let pending_response = sent_requests.as_mut().unwrap().get_mut(index).unwrap(); - let client_response = Some(Err( - aptos_data_client::error::Error::TimeoutWaitingForResponse("Timed out!".into()), - )); - pending_response.lock().client_response = client_response; + + // Set the response + pending_response.lock().client_response = Some(Err(error_response)); } /// Waits for the data client to set the response at the index in the @@ -1221,10 +1466,10 @@ fn insert_response_into_pending_queue( pending_response: PendingClientResponse, ) { // Clear the queue - let (sent_requests, _) = data_stream.get_sent_requests_and_notifications(); - sent_requests.as_mut().unwrap().clear(); + data_stream.clear_sent_data_requests_queue(); // Insert the pending response + let (sent_requests, _) = data_stream.get_sent_requests_and_notifications(); let pending_response = Arc::new(Mutex::new(Box::new(pending_response))); sent_requests.as_mut().unwrap().push_front(pending_response); } @@ -1241,6 +1486,71 @@ fn verify_client_request_resubmitted( assert_none!(pending_response.lock().client_response.as_ref()); } +/// Verifies the timeouts of all continuous data stream requests +/// in the presence of RPC timeouts and failures. +async fn verify_continuous_stream_request_timeouts( + data_client_config: AptosDataClientConfig, + streaming_service_config: DataStreamingServiceConfig, + num_expected_requests: u64, +) { + // Test all types of continuous data streams + let continuous_data_streams = + enumerate_continuous_data_streams(data_client_config, streaming_service_config); + for (mut data_stream, mut stream_listener, transactions_only, allow_transactions_or_outputs) in + continuous_data_streams + { + // Initialize the data stream + let global_data_summary = create_global_data_summary(1); + initialize_data_requests(&mut data_stream, &global_data_summary); + + // Verify that the expected number of requests are made + let (sent_requests, _) = data_stream.get_sent_requests_and_notifications(); + assert_eq!( + sent_requests.as_ref().unwrap().len(), + num_expected_requests as usize + ); + + // Wait until a notification is sent. The mock data client + // will verify the timeout. + wait_for_notification_and_verify( + &mut data_stream, + &mut stream_listener, + transactions_only, + allow_transactions_or_outputs, + true, + &global_data_summary, + ) + .await; + + // Handle multiple timeouts and retries because no new data is known, + // so the best we can do is resend the same requests. + for _ in 0..3 { + // Set a timeout response for the subscription request and process it + set_timeout_response_in_queue(&mut data_stream, 0); + process_data_responses(&mut data_stream, &global_data_summary).await; + + // Verify more requests are made + let (sent_requests, _) = data_stream.get_sent_requests_and_notifications(); + assert_eq!( + sent_requests.as_ref().unwrap().len(), + num_expected_requests as usize + ); + } + + // Wait until a notification is sent. The mock data client + // will verify the timeout. + wait_for_notification_and_verify( + &mut data_stream, + &mut stream_listener, + transactions_only, + allow_transactions_or_outputs, + true, + &global_data_summary, + ) + .await; + } +} + /// Verifies that a single epoch ending notification is received by the /// data listener and that it contains the `expected_ledger_info`. async fn verify_epoch_ending_notification( @@ -1291,23 +1601,137 @@ fn get_pending_client_request( client_request } -/// Waits for an optimistic fetch notification along the given -/// listener and continues to drive progress until one is received. -/// Verifies the notification when it is received. +/// Returns the subscription stream ID from the pending client request at the given index +fn get_subscription_stream_id( + data_stream: &mut DataStream, + index: usize, +) -> u64 { + // Get the pending client request + let client_request = get_pending_client_request(data_stream, index); + + // Extract the subscription stream ID from the request + match client_request { + DataClientRequest::SubscribeTransactionsOrOutputsWithProof(request) => { + request.subscription_stream_id + }, + DataClientRequest::SubscribeTransactionsWithProof(request) => { + request.subscription_stream_id + }, + DataClientRequest::SubscribeTransactionOutputsWithProof(request) => { + request.subscription_stream_id + }, + _ => panic!("Unexpected client request type found! {:?}", client_request), + } +} + +/// Verifies that a single pending optimistic fetch exists and +/// that it is for the correct data. +fn verify_pending_optimistic_fetch( + data_stream: &mut DataStream, + transactions_only: bool, + allow_transactions_or_outputs: bool, + known_version_offset: u64, +) { + // Verify a single request is pending + let (sent_requests, _) = data_stream.get_sent_requests_and_notifications(); + assert_eq!(sent_requests.as_ref().unwrap().len(), 1); + + // Verify the request is for the correct data + let client_request = get_pending_client_request(data_stream, 0); + let expected_request = if allow_transactions_or_outputs { + DataClientRequest::NewTransactionsOrOutputsWithProof( + NewTransactionsOrOutputsWithProofRequest { + known_version: MAX_ADVERTISED_TRANSACTION_OUTPUT + known_version_offset, + known_epoch: MAX_ADVERTISED_EPOCH_END, + include_events: false, + }, + ) + } else if transactions_only { + DataClientRequest::NewTransactionsWithProof(NewTransactionsWithProofRequest { + known_version: MAX_ADVERTISED_TRANSACTION + known_version_offset, + known_epoch: MAX_ADVERTISED_EPOCH_END, + include_events: false, + }) + } else { + DataClientRequest::NewTransactionOutputsWithProof(NewTransactionOutputsWithProofRequest { + known_version: MAX_ADVERTISED_TRANSACTION_OUTPUT + known_version_offset, + known_epoch: MAX_ADVERTISED_EPOCH_END, + }) + }; + assert_eq!(client_request, expected_request); +} + +/// Verifies that the pending subscription requests are well formed +/// and for the correct data. +fn verify_pending_subscription_requests( + data_stream: &mut DataStream, + max_concurrent_requests: u64, + allow_transactions_or_outputs: bool, + transactions_only: bool, + starting_stream_index: u64, + subscription_stream_id: u64, + known_version_offset: u64, +) { + // Verify the correct number of pending requests + let (sent_requests, _) = data_stream.get_sent_requests_and_notifications(); + assert_eq!( + sent_requests.as_ref().unwrap().len(), + max_concurrent_requests as usize + ); + + // Verify the pending requests are for the correct data and correctly formed + for request_index in 0..max_concurrent_requests { + let client_request = get_pending_client_request(data_stream, request_index as usize); + let expected_request = if allow_transactions_or_outputs { + DataClientRequest::SubscribeTransactionsOrOutputsWithProof( + SubscribeTransactionsOrOutputsWithProofRequest { + known_version: MAX_ADVERTISED_TRANSACTION_OUTPUT + known_version_offset, + known_epoch: MAX_ADVERTISED_EPOCH_END, + subscription_stream_index: starting_stream_index + request_index, + include_events: false, + subscription_stream_id, + }, + ) + } else if transactions_only { + DataClientRequest::SubscribeTransactionsWithProof( + SubscribeTransactionsWithProofRequest { + known_version: MAX_ADVERTISED_TRANSACTION + known_version_offset, + known_epoch: MAX_ADVERTISED_EPOCH_END, + subscription_stream_index: starting_stream_index + request_index, + include_events: false, + subscription_stream_id, + }, + ) + } else { + DataClientRequest::SubscribeTransactionOutputsWithProof( + SubscribeTransactionOutputsWithProofRequest { + known_version: MAX_ADVERTISED_TRANSACTION_OUTPUT + known_version_offset, + known_epoch: MAX_ADVERTISED_EPOCH_END, + subscription_stream_index: starting_stream_index + request_index, + subscription_stream_id, + }, + ) + }; + assert_eq!(client_request, expected_request); + } +} + +/// Verifies a notification along the given listener and +/// continues to drive progress until one is received. async fn wait_for_notification_and_verify( data_stream: &mut DataStream, stream_listener: &mut DataStreamListener, transaction_syncing: bool, allow_transactions_or_outputs: bool, - optimistic_fetch_notification: bool, + new_data_notification: bool, global_data_summary: &GlobalDataSummary, ) { loop { if let Ok(data_notification) = timeout(Duration::from_secs(1), stream_listener.select_next_some()).await { - if optimistic_fetch_notification { - // Verify we got the correct optimistic fetch data + if new_data_notification { + // Verify we got the correct new data match data_notification.data_payload { DataPayload::ContinuousTransactionsWithProof(..) => { assert!(allow_transactions_or_outputs || transaction_syncing); diff --git a/state-sync/data-streaming-service/src/tests/missing_data.rs b/state-sync/data-streaming-service/src/tests/missing_data.rs new file mode 100644 index 0000000000000..de1b7781e2031 --- /dev/null +++ b/state-sync/data-streaming-service/src/tests/missing_data.rs @@ -0,0 +1,871 @@ +// Copyright © Aptos Foundation +// SPDX-License-Identifier: Apache-2.0 + +use crate::{ + data_notification::{ + DataClientRequest, DataPayload, EpochEndingLedgerInfosRequest, + NewTransactionOutputsWithProofRequest, NewTransactionsOrOutputsWithProofRequest, + NewTransactionsWithProofRequest, NumberOfStatesRequest, StateValuesWithProofRequest, + SubscribeTransactionOutputsWithProofRequest, + SubscribeTransactionsOrOutputsWithProofRequest, SubscribeTransactionsWithProofRequest, + TransactionOutputsWithProofRequest, TransactionsOrOutputsWithProofRequest, + TransactionsWithProofRequest, + }, + data_stream::create_missing_data_request, + stream_engine::{bound_by_range, DataStreamEngine, StreamEngine}, + streaming_client::{ + ContinuouslyStreamTransactionOutputsRequest, GetAllEpochEndingLedgerInfosRequest, + GetAllStatesRequest, GetAllTransactionsRequest, StreamRequest, + }, + tests::utils::{create_ledger_info, create_transaction_list_with_proof}, +}; +use aptos_config::config::DataStreamingServiceConfig; +use aptos_crypto::HashValue; +use aptos_data_client::{global_summary::GlobalDataSummary, interface::ResponsePayload}; +use aptos_id_generator::U64IdGenerator; +use aptos_storage_service_types::responses::CompleteDataRange; +use aptos_types::{ + proof::{SparseMerkleRangeProof, TransactionInfoListWithProof}, + state_store::{ + state_key::StateKey, + state_value::{StateValue, StateValueChunkWithProof}, + }, + transaction::{ + Transaction, TransactionListWithProof, TransactionOutput, TransactionOutputListWithProof, + TransactionStatus, + }, + write_set::WriteSet, +}; +use std::sync::Arc; + +#[test] +fn test_bound_by_range() { + // Test numbers beyond max + assert_eq!(bound_by_range(11, 5, 10), 10); + assert_eq!(bound_by_range(100, 10, 10), 10); + assert_eq!(bound_by_range(1000, 12, 15), 15); + + // Test numbers below min + assert_eq!(bound_by_range(4, 5, 10), 5); + assert_eq!(bound_by_range(0, 10, 10), 10); + assert_eq!(bound_by_range(11, 12, 15), 12); + + // Test numbers within the range + assert_eq!(bound_by_range(9, 5, 10), 9); + assert_eq!(bound_by_range(14, 5, 15), 14); + assert_eq!(bound_by_range(20, 0, 20), 20); + assert_eq!(bound_by_range(10, 10, 15), 10); + assert_eq!(bound_by_range(13, 12, 15), 13); +} + +#[test] +fn create_missing_data_request_trivial_request_types() { + // Enumerate all data request types that are trivially satisfied + let trivial_client_requests = vec![ + DataClientRequest::NewTransactionOutputsWithProof(NewTransactionOutputsWithProofRequest { + known_version: 0, + known_epoch: 0, + }), + DataClientRequest::NewTransactionsWithProof(NewTransactionsWithProofRequest { + known_version: 0, + known_epoch: 0, + include_events: false, + }), + DataClientRequest::NewTransactionsOrOutputsWithProof( + NewTransactionsOrOutputsWithProofRequest { + known_version: 0, + known_epoch: 0, + include_events: false, + }, + ), + DataClientRequest::NumberOfStates(NumberOfStatesRequest { version: 0 }), + DataClientRequest::SubscribeTransactionOutputsWithProof( + SubscribeTransactionOutputsWithProofRequest { + known_version: 0, + known_epoch: 0, + subscription_stream_id: 0, + subscription_stream_index: 0, + }, + ), + DataClientRequest::SubscribeTransactionsWithProof(SubscribeTransactionsWithProofRequest { + known_version: 0, + known_epoch: 0, + include_events: false, + subscription_stream_id: 0, + subscription_stream_index: 0, + }), + DataClientRequest::SubscribeTransactionsOrOutputsWithProof( + SubscribeTransactionsOrOutputsWithProofRequest { + known_version: 0, + known_epoch: 0, + subscription_stream_id: 0, + subscription_stream_index: 0, + include_events: false, + }, + ), + ]; + + // Verify that the missing data request is empty + for data_client_request in trivial_client_requests { + let missing_data_request = + create_missing_data_request(&data_client_request, &ResponsePayload::NumberOfStates(0)) + .unwrap(); + assert!(missing_data_request.is_none()); + } +} + +#[test] +fn create_missing_data_request_epoch_ending_ledger_infos() { + // Create the data client request + let start_epoch = 10; + let end_epoch = 15; + let data_client_request = + DataClientRequest::EpochEndingLedgerInfos(EpochEndingLedgerInfosRequest { + start_epoch, + end_epoch, + }); + + // Create the partial response payload + let last_response_epoch = end_epoch - 1; + let epoch_ending_ledger_infos = (start_epoch..last_response_epoch + 1) + .map(|epoch| create_ledger_info(epoch * 100, epoch, true)) + .collect::>(); + let response_payload = ResponsePayload::EpochEndingLedgerInfos(epoch_ending_ledger_infos); + + // Create the missing data request and verify that it's valid + let missing_data_request = + create_missing_data_request(&data_client_request, &response_payload).unwrap(); + let expected_missing_data_request = + DataClientRequest::EpochEndingLedgerInfos(EpochEndingLedgerInfosRequest { + start_epoch: last_response_epoch + 1, + end_epoch, + }); + assert_eq!(missing_data_request.unwrap(), expected_missing_data_request); + + // Create a complete response payload + let last_response_epoch = end_epoch; + let epoch_ending_ledger_infos = (start_epoch..last_response_epoch + 1) + .map(|epoch| create_ledger_info(epoch * 100, epoch, true)) + .collect::>(); + let response_payload = ResponsePayload::EpochEndingLedgerInfos(epoch_ending_ledger_infos); + + // Create the missing data request and verify that it's empty + let missing_data_request = + create_missing_data_request(&data_client_request, &response_payload).unwrap(); + assert!(missing_data_request.is_none()); +} + +#[test] +fn create_missing_data_request_state_values() { + // Create the data client request + let version = 10; + let start_index = 100; + let end_index = 200; + let data_client_request = + DataClientRequest::StateValuesWithProof(StateValuesWithProofRequest { + version, + start_index, + end_index, + }); + + // Create the partial response payload + let last_response_index = end_index - 1; + let raw_values = (start_index..last_response_index + 1) + .map(|_| (StateKey::raw(vec![]), StateValue::new_legacy(vec![].into()))) + .collect::>(); + let response_payload = ResponsePayload::StateValuesWithProof(StateValueChunkWithProof { + first_index: start_index, + last_index: last_response_index, + first_key: HashValue::zero(), + last_key: HashValue::zero(), + raw_values, + proof: SparseMerkleRangeProof::new(vec![]), + root_hash: HashValue::zero(), + }); + + // Create the missing data request and verify that it's valid + let missing_data_request = + create_missing_data_request(&data_client_request, &response_payload).unwrap(); + let expected_missing_data_request = + DataClientRequest::StateValuesWithProof(StateValuesWithProofRequest { + version, + start_index: last_response_index + 1, + end_index, + }); + assert_eq!(missing_data_request.unwrap(), expected_missing_data_request); + + // Create a complete response payload + let last_response_index = end_index; + let raw_values = (start_index..last_response_index + 1) + .map(|_| (StateKey::raw(vec![]), StateValue::new_legacy(vec![].into()))) + .collect::>(); + let response_payload = ResponsePayload::StateValuesWithProof(StateValueChunkWithProof { + first_index: start_index, + last_index: last_response_index, + first_key: HashValue::zero(), + last_key: HashValue::zero(), + raw_values, + proof: SparseMerkleRangeProof::new(vec![]), + root_hash: HashValue::zero(), + }); + + // Create the missing data request and verify that it's empty + let missing_data_request = + create_missing_data_request(&data_client_request, &response_payload).unwrap(); + assert!(missing_data_request.is_none()); +} + +#[test] +fn create_missing_data_request_transactions() { + // Create the data client request + let start_version = 100; + let end_version = 200; + let data_client_request = + DataClientRequest::TransactionsWithProof(TransactionsWithProofRequest { + start_version, + end_version, + proof_version: end_version, + include_events: true, + }); + + // Create the partial response payload + let last_response_version = end_version - 50; + let transactions = (start_version..last_response_version + 1) + .map(|_| create_test_transaction()) + .collect::>(); + let response_payload = ResponsePayload::TransactionsWithProof(TransactionListWithProof { + transactions, + events: None, + first_transaction_version: Some(start_version), + proof: TransactionInfoListWithProof::new_empty(), + }); + + // Create the missing data request and verify that it's valid + let missing_data_request = + create_missing_data_request(&data_client_request, &response_payload).unwrap(); + let expected_missing_data_request = + DataClientRequest::TransactionsWithProof(TransactionsWithProofRequest { + start_version: last_response_version + 1, + end_version, + proof_version: end_version, + include_events: true, + }); + assert_eq!(missing_data_request.unwrap(), expected_missing_data_request); + + // Create a complete response payload + let last_response_version = end_version; + let transactions = (start_version..last_response_version + 1) + .map(|_| create_test_transaction()) + .collect::>(); + let response_payload = ResponsePayload::TransactionsWithProof(TransactionListWithProof { + transactions, + events: None, + first_transaction_version: Some(start_version), + proof: TransactionInfoListWithProof::new_empty(), + }); + + // Create the missing data request and verify that it's empty + let missing_data_request = + create_missing_data_request(&data_client_request, &response_payload).unwrap(); + assert!(missing_data_request.is_none()); +} + +#[test] +fn create_missing_data_request_transaction_outputs() { + // Create the data client request + let start_version = 1000; + let end_version = 2000; + let data_client_request = + DataClientRequest::TransactionOutputsWithProof(TransactionOutputsWithProofRequest { + start_version, + end_version, + proof_version: end_version, + }); + + // Create the partial response payload + let last_response_version = end_version - 1000; + let transactions_and_outputs = (start_version..last_response_version + 1) + .map(|_| (create_test_transaction(), create_test_transaction_output())) + .collect::>(); + let response_payload = + ResponsePayload::TransactionOutputsWithProof(TransactionOutputListWithProof { + transactions_and_outputs, + proof: TransactionInfoListWithProof::new_empty(), + first_transaction_output_version: Some(start_version), + }); + + // Create the missing data request and verify that it's valid + let missing_data_request = + create_missing_data_request(&data_client_request, &response_payload).unwrap(); + let expected_missing_data_request = + DataClientRequest::TransactionOutputsWithProof(TransactionOutputsWithProofRequest { + start_version: last_response_version + 1, + end_version, + proof_version: end_version, + }); + assert_eq!(missing_data_request.unwrap(), expected_missing_data_request); + + // Create a complete response payload + let last_response_version = end_version; + let transactions_and_outputs = (start_version..last_response_version + 1) + .map(|_| (create_test_transaction(), create_test_transaction_output())) + .collect::>(); + let response_payload = + ResponsePayload::TransactionOutputsWithProof(TransactionOutputListWithProof { + transactions_and_outputs, + proof: TransactionInfoListWithProof::new_empty(), + first_transaction_output_version: Some(start_version), + }); + + // Create the missing data request and verify that it's empty + let missing_data_request = + create_missing_data_request(&data_client_request, &response_payload).unwrap(); + assert!(missing_data_request.is_none()); +} + +#[test] +fn create_missing_data_request_transactions_or_outputs() { + // Create the data client request + let start_version = 0; + let end_version = 2000; + let data_client_request = + DataClientRequest::TransactionsOrOutputsWithProof(TransactionsOrOutputsWithProofRequest { + start_version, + end_version, + include_events: true, + proof_version: end_version, + }); + + // Create a partial response payload with transactions + let last_response_version = end_version - 500; + let transactions = (start_version..last_response_version + 1) + .map(|_| create_test_transaction()) + .collect::>(); + let response_payload_with_transactions = + ResponsePayload::TransactionsWithProof(TransactionListWithProof { + transactions, + events: None, + first_transaction_version: Some(start_version), + proof: TransactionInfoListWithProof::new_empty(), + }); + + // Create a partial response payload with transaction outputs + let transactions_and_outputs = (start_version..last_response_version + 1) + .map(|_| (create_test_transaction(), create_test_transaction_output())) + .collect::>(); + let response_payload_with_transaction_outputs = + ResponsePayload::TransactionOutputsWithProof(TransactionOutputListWithProof { + transactions_and_outputs, + proof: TransactionInfoListWithProof::new_empty(), + first_transaction_output_version: Some(start_version), + }); + + // Create the missing data requests and verify that they are valid + for response_payload in [ + response_payload_with_transactions, + response_payload_with_transaction_outputs, + ] { + let missing_data_request = + create_missing_data_request(&data_client_request, &response_payload).unwrap(); + let expected_missing_data_request = DataClientRequest::TransactionsOrOutputsWithProof( + TransactionsOrOutputsWithProofRequest { + start_version: last_response_version + 1, + end_version, + proof_version: end_version, + include_events: true, + }, + ); + assert_eq!(missing_data_request.unwrap(), expected_missing_data_request); + } + + // Create a complete response payload with transactions + let last_response_version = end_version; + let transactions = (start_version..last_response_version + 1) + .map(|_| create_test_transaction()) + .collect::>(); + let response_payload_with_transactions = + ResponsePayload::TransactionsWithProof(TransactionListWithProof { + transactions, + events: None, + first_transaction_version: Some(start_version), + proof: TransactionInfoListWithProof::new_empty(), + }); + + // Create a complete response payload with transaction outputs + let transactions_and_outputs = (start_version..last_response_version + 1) + .map(|_| (create_test_transaction(), create_test_transaction_output())) + .collect::>(); + let response_payload_with_transaction_outputs = + ResponsePayload::TransactionOutputsWithProof(TransactionOutputListWithProof { + transactions_and_outputs, + proof: TransactionInfoListWithProof::new_empty(), + first_transaction_output_version: Some(start_version), + }); + + // Create the missing data requests and verify that they are empty + for response_payload in [ + response_payload_with_transactions, + response_payload_with_transaction_outputs, + ] { + let missing_data_request = + create_missing_data_request(&data_client_request, &response_payload).unwrap(); + assert!(missing_data_request.is_none()); + } +} + +#[test] +fn transform_epoch_ending_stream_notifications() { + // Create an epoch ending stream request + let start_epoch = 100; + let stream_request = + StreamRequest::GetAllEpochEndingLedgerInfos(GetAllEpochEndingLedgerInfosRequest { + start_epoch, + }); + + // Create a global data summary with a single epoch range + let end_epoch = 199; + let mut global_data_summary = GlobalDataSummary::empty(); + global_data_summary + .advertised_data + .epoch_ending_ledger_infos = vec![CompleteDataRange::new(start_epoch, end_epoch).unwrap()]; + global_data_summary.optimal_chunk_sizes.epoch_chunk_size = 100; + + // Create a new epoch ending stream engine + let mut stream_engine = match StreamEngine::new( + DataStreamingServiceConfig::default(), + &stream_request, + &global_data_summary.advertised_data, + ) + .unwrap() + { + StreamEngine::EpochEndingStreamEngine(stream_engine) => stream_engine, + unexpected_engine => { + panic!( + "Expected epoch ending stream engine but got {:?}", + unexpected_engine + ); + }, + }; + + // Verify the tracked stream indices + assert_eq!(stream_engine.next_stream_epoch, start_epoch); + assert_eq!(stream_engine.next_request_epoch, start_epoch); + + // Create a single data client request + let notification_id_generator = create_notification_id_generator(); + let data_client_request = stream_engine + .create_data_client_requests(1, &global_data_summary, notification_id_generator.clone()) + .unwrap(); + assert_eq!(data_client_request.len(), 1); + + // Create an empty client response + let client_response_payload = ResponsePayload::EpochEndingLedgerInfos(vec![]); + + // Transform the client response into a notification and verify an error is returned + let _ = stream_engine + .transform_client_response_into_notification( + &data_client_request[0].clone(), + client_response_payload, + notification_id_generator.clone(), + ) + .unwrap_err(); + + // Create a client response with an invalid epoch + let invalid_ledger_infos = vec![create_ledger_info(0, start_epoch - 1, true)]; + let client_response_payload = + ResponsePayload::EpochEndingLedgerInfos(invalid_ledger_infos.clone()); + + // Transform the client response into a notification and verify the notification + let data_notification = stream_engine + .transform_client_response_into_notification( + &data_client_request[0].clone(), + client_response_payload, + notification_id_generator.clone(), + ) + .unwrap(); + assert_eq!( + data_notification.unwrap().data_payload, + DataPayload::EpochEndingLedgerInfos(invalid_ledger_infos) + ); + + // Verify the tracked stream indices + assert_eq!(stream_engine.next_stream_epoch, start_epoch + 1); + assert_eq!(stream_engine.next_request_epoch, end_epoch + 1); + + // Create a partial client response + let partial_ledger_infos = (start_epoch + 1..end_epoch) + .map(|epoch| create_ledger_info(epoch * 100, epoch, true)) + .collect::>(); + let client_response_payload = + ResponsePayload::EpochEndingLedgerInfos(partial_ledger_infos.clone()); + + // Transform the client response into a notification + let data_client_request = + DataClientRequest::EpochEndingLedgerInfos(EpochEndingLedgerInfosRequest { + start_epoch: start_epoch + 1, + end_epoch, + }); + let _ = stream_engine + .transform_client_response_into_notification( + &data_client_request, + client_response_payload, + notification_id_generator, + ) + .unwrap(); + + // Verify the tracked stream indices + assert_eq!(stream_engine.next_stream_epoch, end_epoch); + assert_eq!(stream_engine.next_request_epoch, end_epoch + 1); +} + +#[test] +fn transform_state_values_stream_notifications() { + // Create a state values stream request + let version = 100; + let start_index = 1000; + let stream_request = StreamRequest::GetAllStates(GetAllStatesRequest { + version, + start_index, + }); + + // Create a global data summary with a single state range + let mut global_data_summary = GlobalDataSummary::empty(); + global_data_summary.advertised_data.states = + vec![CompleteDataRange::new(start_index, start_index).unwrap()]; + global_data_summary.optimal_chunk_sizes.state_chunk_size = 20_000; + + // Create a new state values stream engine + let mut stream_engine = match StreamEngine::new( + DataStreamingServiceConfig::default(), + &stream_request, + &global_data_summary.advertised_data, + ) + .unwrap() + { + StreamEngine::StateStreamEngine(stream_engine) => stream_engine, + unexpected_engine => { + panic!( + "Expected state values stream engine but got {:?}", + unexpected_engine + ); + }, + }; + + // Update the number of states for the stream + let number_of_states = 10_000; + stream_engine.number_of_states = Some(number_of_states); + + // Verify the tracked stream indices + assert_eq!(stream_engine.next_stream_index, start_index); + assert_eq!(stream_engine.next_request_index, start_index); + + // Create a single data client request + let notification_id_generator = create_notification_id_generator(); + let data_client_request = stream_engine + .create_data_client_requests(1, &global_data_summary, notification_id_generator.clone()) + .unwrap(); + assert_eq!(data_client_request.len(), 1); + + // Create an empty client response + let client_response_payload = ResponsePayload::StateValuesWithProof(create_state_value_chunk( + start_index, + start_index - 1, + 0, + )); + + // Transform the client response into a notification and verify an error is returned + let _ = stream_engine + .transform_client_response_into_notification( + &data_client_request[0].clone(), + client_response_payload, + notification_id_generator.clone(), + ) + .unwrap_err(); + + // Create a client response with an invalid last index + let state_value_chunk_with_proof = create_state_value_chunk(start_index, start_index - 1, 1); + let client_response_payload = + ResponsePayload::StateValuesWithProof(state_value_chunk_with_proof.clone()); + + // Transform the client response into a notification and verify the notification + let data_notification = stream_engine + .transform_client_response_into_notification( + &data_client_request[0].clone(), + client_response_payload, + notification_id_generator.clone(), + ) + .unwrap(); + assert_eq!( + data_notification.unwrap().data_payload, + DataPayload::StateValuesWithProof(state_value_chunk_with_proof) + ); + + // Verify the tracked stream indices + assert_eq!(stream_engine.next_stream_index, start_index + 1); + assert_eq!(stream_engine.next_request_index, number_of_states); + + // Create a partial client response + let last_index = number_of_states - 500; + let state_value_chunk_with_proof = + create_state_value_chunk(start_index, last_index, last_index - start_index); + let client_response_payload = + ResponsePayload::StateValuesWithProof(state_value_chunk_with_proof.clone()); + + // Transform the client response into a notification + let data_client_request = + DataClientRequest::StateValuesWithProof(StateValuesWithProofRequest { + version, + start_index: start_index + 1, + end_index: number_of_states - 1, + }); + let _ = stream_engine + .transform_client_response_into_notification( + &data_client_request, + client_response_payload, + notification_id_generator, + ) + .unwrap(); + + // Verify the tracked stream indices + assert_eq!(stream_engine.next_stream_index, last_index + 1); + assert_eq!(stream_engine.next_request_index, number_of_states); +} + +#[test] +fn transform_transactions_stream_notifications() { + // Create a transactions stream request + let start_version = 100; + let end_version = 200; + let stream_request = StreamRequest::GetAllTransactions(GetAllTransactionsRequest { + start_version, + end_version, + proof_version: end_version, + include_events: true, + }); + + // Create a global data summary with a single transaction range + let mut global_data_summary = GlobalDataSummary::empty(); + global_data_summary.advertised_data.transactions = + vec![CompleteDataRange::new(start_version, end_version).unwrap()]; + global_data_summary + .optimal_chunk_sizes + .transaction_chunk_size = 10_000; + + // Create a new transactions stream engine + let mut stream_engine = match StreamEngine::new( + DataStreamingServiceConfig::default(), + &stream_request, + &global_data_summary.advertised_data, + ) + .unwrap() + { + StreamEngine::TransactionStreamEngine(stream_engine) => stream_engine, + unexpected_engine => { + panic!( + "Expected transactions stream engine but got {:?}", + unexpected_engine + ); + }, + }; + + // Verify the tracked stream indices + assert_eq!(stream_engine.next_stream_version, start_version); + assert_eq!(stream_engine.next_request_version, start_version); + + // Create a single data client request + let notification_id_generator = create_notification_id_generator(); + let data_client_request = stream_engine + .create_data_client_requests(1, &global_data_summary, notification_id_generator.clone()) + .unwrap(); + assert_eq!(data_client_request.len(), 1); + + // Create an empty client response + let client_response_payload = + ResponsePayload::TransactionsWithProof(TransactionListWithProof::new_empty()); + + // Transform the client response into a notification and verify an error is returned + let _ = stream_engine + .transform_client_response_into_notification( + &data_client_request[0].clone(), + client_response_payload, + notification_id_generator.clone(), + ) + .unwrap_err(); + + // Create a partial client response + let last_version = end_version - 50; + let transactions_with_proof = + create_transaction_list_with_proof(start_version, last_version, true); + let client_response_payload = + ResponsePayload::TransactionsWithProof(transactions_with_proof.clone()); + + // Transform the client response into a notification + let data_client_request = + DataClientRequest::TransactionsWithProof(TransactionsWithProofRequest { + start_version, + end_version, + proof_version: end_version, + include_events: true, + }); + let _ = stream_engine + .transform_client_response_into_notification( + &data_client_request, + client_response_payload, + notification_id_generator, + ) + .unwrap(); + + // Verify the tracked stream indices + assert_eq!(stream_engine.next_stream_version, last_version + 1); + assert_eq!(stream_engine.next_request_version, end_version + 1); +} + +#[test] +fn transform_continuous_outputs_stream_notifications() { + // Create a continuous outputs stream request + let known_version = 1000; + let known_epoch = 10; + let stream_request = StreamRequest::ContinuouslyStreamTransactionOutputs( + ContinuouslyStreamTransactionOutputsRequest { + known_version, + known_epoch, + target: None, + }, + ); + + // Create a global data summary with a single transaction range + let mut global_data_summary = GlobalDataSummary::empty(); + global_data_summary.advertised_data.transaction_outputs = + vec![CompleteDataRange::new(known_version, known_version).unwrap()]; + global_data_summary + .optimal_chunk_sizes + .transaction_output_chunk_size = 10_000; + + // Create a new continuous outputs stream engine + let mut stream_engine = match StreamEngine::new( + DataStreamingServiceConfig::default(), + &stream_request, + &global_data_summary.advertised_data, + ) + .unwrap() + { + StreamEngine::ContinuousTransactionStreamEngine(stream_engine) => stream_engine, + unexpected_engine => { + panic!( + "Expected continuous outputs stream engine but got {:?}", + unexpected_engine + ); + }, + }; + + // Set the target ledger info for the stream + let target_version = known_version + 1000; + stream_engine.current_target_ledger_info = + Some(create_ledger_info(target_version, known_epoch, false)); + + // Verify the tracked stream indices + assert_eq!( + stream_engine.next_request_version_and_epoch, + (known_version + 1, known_epoch) + ); + assert_eq!( + stream_engine.next_stream_version_and_epoch, + (known_version + 1, known_epoch) + ); + + // Create a single data client request + let notification_id_generator = create_notification_id_generator(); + let data_client_request = stream_engine + .create_data_client_requests(1, &global_data_summary, notification_id_generator.clone()) + .unwrap(); + assert_eq!(data_client_request.len(), 1); + + // Create an empty client response + let client_response_payload = + ResponsePayload::TransactionOutputsWithProof(TransactionOutputListWithProof::new_empty()); + + // Transform the client response into a notification and verify an error is returned + let _ = stream_engine + .transform_client_response_into_notification( + &data_client_request[0].clone(), + client_response_payload, + notification_id_generator.clone(), + ) + .unwrap_err(); + + // Create a partial client response + let last_version = target_version - 10; + let transactions_and_outputs = (known_version..last_version + 1) + .map(|_| (create_test_transaction(), create_test_transaction_output())) + .collect::>(); + let transaction_outputs_with_proof = TransactionOutputListWithProof { + transactions_and_outputs, + proof: TransactionInfoListWithProof::new_empty(), + first_transaction_output_version: Some(known_version), + }; + let client_response_payload = + ResponsePayload::TransactionOutputsWithProof(transaction_outputs_with_proof.clone()); + + // Transform the client response into a notification + let data_client_request = + DataClientRequest::TransactionOutputsWithProof(TransactionOutputsWithProofRequest { + start_version: known_version + 1, + end_version: last_version, + proof_version: target_version, + }); + let _ = stream_engine + .transform_client_response_into_notification( + &data_client_request, + client_response_payload, + notification_id_generator, + ) + .unwrap(); + + // Verify the tracked stream indices + assert_eq!( + stream_engine.next_stream_version_and_epoch, + (last_version + 1, known_epoch) + ); + assert_eq!( + stream_engine.next_request_version_and_epoch, + (target_version + 1, known_epoch) + ); +} + +/// Returns a simple notification ID generator for testing purposes +fn create_notification_id_generator() -> Arc { + Arc::new(U64IdGenerator::new()) +} + +/// Returns a state value chunk with proof for testing purposes +fn create_state_value_chunk( + first_index: u64, + last_index: u64, + num_values: u64, +) -> StateValueChunkWithProof { + // Create the raw values + let raw_values = (0..num_values) + .map(|_| (StateKey::raw(vec![]), StateValue::new_legacy(vec![].into()))) + .collect::>(); + + // Create the chunk of state values + StateValueChunkWithProof { + first_index, + last_index, + first_key: HashValue::zero(), + last_key: HashValue::zero(), + raw_values, + proof: SparseMerkleRangeProof::new(vec![]), + root_hash: HashValue::zero(), + } +} + +/// Returns a dummy transaction for testing purposes +fn create_test_transaction() -> Transaction { + Transaction::StateCheckpoint(HashValue::zero()) +} + +/// Returns a dummy transaction output for testing purposes +fn create_test_transaction_output() -> TransactionOutput { + TransactionOutput::new(WriteSet::default(), vec![], 0, TransactionStatus::Retry) +} diff --git a/state-sync/state-sync-v2/data-streaming-service/src/tests/mod.rs b/state-sync/data-streaming-service/src/tests/mod.rs similarity index 93% rename from state-sync/state-sync-v2/data-streaming-service/src/tests/mod.rs rename to state-sync/data-streaming-service/src/tests/mod.rs index bb6a9c3aa2115..08313eed6217e 100644 --- a/state-sync/state-sync-v2/data-streaming-service/src/tests/mod.rs +++ b/state-sync/data-streaming-service/src/tests/mod.rs @@ -3,6 +3,7 @@ // SPDX-License-Identifier: Apache-2.0 mod data_stream; +mod missing_data; mod stream_engine; mod streaming_client; pub mod streaming_service; diff --git a/state-sync/state-sync-v2/data-streaming-service/src/tests/stream_engine.rs b/state-sync/data-streaming-service/src/tests/stream_engine.rs similarity index 78% rename from state-sync/state-sync-v2/data-streaming-service/src/tests/stream_engine.rs rename to state-sync/data-streaming-service/src/tests/stream_engine.rs index f6a0f77b45d91..d16878ac9dc91 100644 --- a/state-sync/state-sync-v2/data-streaming-service/src/tests/stream_engine.rs +++ b/state-sync/data-streaming-service/src/tests/stream_engine.rs @@ -7,8 +7,12 @@ use crate::{ error::Error, stream_engine::{DataStreamEngine, EpochEndingStreamEngine, StreamEngine}, streaming_client::{GetAllEpochEndingLedgerInfosRequest, StreamRequest}, - tests::utils::initialize_logger, + tests::{ + utils, + utils::{create_ledger_info, initialize_logger}, + }, }; +use aptos_config::config::DataStreamingServiceConfig; use aptos_data_client::{ global_summary::{GlobalDataSummary, OptimalChunkSizes}, interface::ResponsePayload, @@ -24,7 +28,11 @@ fn test_create_epoch_ending_requests() { let highest_ending_epoch = 900; let mut stream_engine = create_epoch_ending_stream_engine(0, highest_ending_epoch); let client_requests = stream_engine - .create_data_client_requests(5, &create_epoch_ending_chunk_sizes(10000)) + .create_data_client_requests( + 5, + &create_epoch_ending_chunk_sizes(10000), + create_notification_id_generator(), + ) .unwrap(); let expected_requests = vec![DataClientRequest::EpochEndingLedgerInfos( EpochEndingLedgerInfosRequest { @@ -38,7 +46,11 @@ fn test_create_epoch_ending_requests() { let mut stream_engine = create_epoch_ending_stream_engine(0, highest_ending_epoch); let chunk_size = 50; let client_requests = stream_engine - .create_data_client_requests(3, &create_epoch_ending_chunk_sizes(chunk_size)) + .create_data_client_requests( + 3, + &create_epoch_ending_chunk_sizes(chunk_size), + create_notification_id_generator(), + ) .unwrap(); for (i, client_request) in client_requests.iter().enumerate() { let i = i as u64; @@ -54,7 +66,11 @@ fn test_create_epoch_ending_requests() { let mut stream_engine = create_epoch_ending_stream_engine(0, highest_ending_epoch); let chunk_size = 14; let client_requests = stream_engine - .create_data_client_requests(100, &create_epoch_ending_chunk_sizes(chunk_size)) + .create_data_client_requests( + 100, + &create_epoch_ending_chunk_sizes(chunk_size), + create_notification_id_generator(), + ) .unwrap(); for (i, client_request) in client_requests.iter().enumerate() { let i = i as u64; @@ -78,7 +94,11 @@ fn test_create_epoch_ending_requests_dynamic() { // Create a batch of client requests and verify the result let client_requests = stream_engine - .create_data_client_requests(5, &create_epoch_ending_chunk_sizes(700)) + .create_data_client_requests( + 5, + &create_epoch_ending_chunk_sizes(700), + create_notification_id_generator(), + ) .unwrap(); let expected_requests = vec![ DataClientRequest::EpochEndingLedgerInfos(EpochEndingLedgerInfosRequest { @@ -98,7 +118,11 @@ fn test_create_epoch_ending_requests_dynamic() { // Create a batch of client requests and verify the result let chunk_size = 50; let client_requests = stream_engine - .create_data_client_requests(10, &create_epoch_ending_chunk_sizes(chunk_size)) + .create_data_client_requests( + 10, + &create_epoch_ending_chunk_sizes(chunk_size), + create_notification_id_generator(), + ) .unwrap(); for (i, client_request) in client_requests.iter().enumerate() { let i = i as u64; @@ -115,7 +139,11 @@ fn test_create_epoch_ending_requests_dynamic() { // Create a batch of client requests and verify the result let client_requests = stream_engine - .create_data_client_requests(5, &create_epoch_ending_chunk_sizes(700)) + .create_data_client_requests( + 5, + &create_epoch_ending_chunk_sizes(700), + create_notification_id_generator(), + ) .unwrap(); let expected_requests = vec![DataClientRequest::EpochEndingLedgerInfos( EpochEndingLedgerInfosRequest { @@ -129,8 +157,11 @@ fn test_create_epoch_ending_requests_dynamic() { stream_engine.next_request_epoch = highest_ending_epoch; // Create a batch of client requests and verify no error - let client_requests = - stream_engine.create_data_client_requests(10, &create_epoch_ending_chunk_sizes(50)); + let client_requests = stream_engine.create_data_client_requests( + 10, + &create_epoch_ending_chunk_sizes(50), + create_notification_id_generator(), + ); assert_ok!(client_requests); } @@ -144,7 +175,12 @@ fn test_epoch_ending_stream_engine() { // Try to create a stream engine where there is no advertised data // and verify an error is returned. - let result = StreamEngine::new(&stream_request, &GlobalDataSummary::empty().advertised_data); + let data_streaming_config = DataStreamingServiceConfig::default(); + let result = StreamEngine::new( + data_streaming_config, + &stream_request, + &GlobalDataSummary::empty().advertised_data, + ); assert_matches!(result, Err(Error::DataIsUnavailable(_))); // Create a data summary with various advertised epoch ranges (highest is one) @@ -158,7 +194,11 @@ fn test_epoch_ending_stream_engine() { ]; // Try to create a stream engine where the highest epoch is one - let result = StreamEngine::new(&stream_request, &global_data_summary.advertised_data); + let result = StreamEngine::new( + data_streaming_config, + &stream_request, + &global_data_summary.advertised_data, + ); assert_ok!(result); // Create a global data summary with non-zero advertised epoch ranges @@ -173,7 +213,13 @@ fn test_epoch_ending_stream_engine() { ]; // Create a new data stream engine and verify the highest epoch is chosen - match StreamEngine::new(&stream_request, &global_data_summary.advertised_data).unwrap() { + match StreamEngine::new( + data_streaming_config, + &stream_request, + &global_data_summary.advertised_data, + ) + .unwrap() + { StreamEngine::EpochEndingStreamEngine(stream_engine) => { assert_eq!(stream_engine.end_epoch, 1000); }, @@ -201,7 +247,7 @@ fn test_update_epoch_ending_stream_progress() { start_epoch, end_epoch, }), - create_empty_client_response_payload(), + create_epoch_ending_ledger_info_payload(start_epoch, end_epoch), create_notification_id_generator(), ) .unwrap(); @@ -218,13 +264,15 @@ fn test_update_epoch_ending_stream_panic() { let mut stream_engine = create_epoch_ending_stream_engine(0, 1000); // Update the engine with a valid notification + let client_response_payload = + ResponsePayload::EpochEndingLedgerInfos(vec![create_ledger_info(0, 0, true)]); let _ = stream_engine .transform_client_response_into_notification( &DataClientRequest::EpochEndingLedgerInfos(EpochEndingLedgerInfosRequest { start_epoch: 0, end_epoch: 100, }), - create_empty_client_response_payload(), + client_response_payload, create_notification_id_generator(), ) .unwrap(); @@ -258,7 +306,14 @@ fn create_epoch_ending_stream_engine(start_epoch: u64, end_epoch: u64) -> EpochE .epoch_ending_ledger_infos = vec![CompleteDataRange::new(start_epoch, end_epoch).unwrap()]; // Create a new epoch ending stream engine - match StreamEngine::new(&stream_request, &global_data_summary.advertised_data).unwrap() { + let data_streaming_config = DataStreamingServiceConfig::default(); + match StreamEngine::new( + data_streaming_config, + &stream_request, + &global_data_summary.advertised_data, + ) + .unwrap() + { StreamEngine::EpochEndingStreamEngine(stream_engine) => stream_engine, unexpected_engine => { panic!( @@ -286,3 +341,11 @@ fn create_notification_id_generator() -> Arc { fn create_empty_client_response_payload() -> ResponsePayload { ResponsePayload::EpochEndingLedgerInfos(vec![]) } + +/// Creates a response payload with the given number of epoch ending ledger infos +fn create_epoch_ending_ledger_info_payload(start_epoch: u64, end_epoch: u64) -> ResponsePayload { + let epoch_ending_ledger_infos = (start_epoch..end_epoch + 1) + .map(|i| utils::create_ledger_info(i, i, true)) + .collect(); + ResponsePayload::EpochEndingLedgerInfos(epoch_ending_ledger_infos) +} diff --git a/state-sync/state-sync-v2/data-streaming-service/src/tests/streaming_client.rs b/state-sync/data-streaming-service/src/tests/streaming_client.rs similarity index 100% rename from state-sync/state-sync-v2/data-streaming-service/src/tests/streaming_client.rs rename to state-sync/data-streaming-service/src/tests/streaming_client.rs diff --git a/state-sync/state-sync-v2/data-streaming-service/src/tests/streaming_service.rs b/state-sync/data-streaming-service/src/tests/streaming_service.rs similarity index 54% rename from state-sync/state-sync-v2/data-streaming-service/src/tests/streaming_service.rs rename to state-sync/data-streaming-service/src/tests/streaming_service.rs index fc6054b91277d..8f275a0b13254 100644 --- a/state-sync/state-sync-v2/data-streaming-service/src/tests/streaming_service.rs +++ b/state-sync/data-streaming-service/src/tests/streaming_service.rs @@ -4,6 +4,7 @@ use crate::{ data_notification::DataPayload, + data_stream::DataStreamListener, error::Error, streaming_client::{ new_streaming_service_client_listener_pair, DataStreamingClient, NotificationAndFeedback, @@ -19,6 +20,10 @@ use crate::{ }, }; use aptos_config::config::{AptosDataClientConfig, DataStreamingServiceConfig}; +use aptos_types::{ + ledger_info::LedgerInfoWithSignatures, + transaction::{TransactionListWithProof, TransactionOutputListWithProof}, +}; use claims::{assert_le, assert_matches, assert_ok, assert_some}; macro_rules! unexpected_payload_type { @@ -38,91 +43,23 @@ async fn test_notifications_state_values() { .await .unwrap(); - // Read the data notifications from the stream and verify index ordering - let mut next_expected_index = 0; - loop { - let data_notification = get_data_notification(&mut stream_listener).await.unwrap(); - match data_notification.data_payload { - DataPayload::StateValuesWithProof(state_values_with_proof) => { - // Verify the start index matches the expected index - assert_eq!(state_values_with_proof.first_index, next_expected_index); - - // Verify the last index matches the state value list length - let num_state_values = state_values_with_proof.raw_values.len() as u64; - assert_eq!( - state_values_with_proof.last_index, - next_expected_index + num_state_values - 1, - ); - - // Verify the number of state values is as expected - assert_eq!( - state_values_with_proof.raw_values.len() as u64, - num_state_values - ); - - next_expected_index += num_state_values; - }, - DataPayload::EndOfStream => { - return assert_eq!(next_expected_index, TOTAL_NUM_STATE_VALUES) - }, - data_payload => unexpected_payload_type!(data_payload), - } - } + // Verify that the stream listener receives all state value notifications + verify_continuous_state_value_notifications(&mut stream_listener).await } #[tokio::test(flavor = "multi_thread")] async fn test_notifications_state_values_limited_chunks() { - // Create a new streaming client and service + // Create a new streaming client and service where chunks may be truncated let streaming_client = create_streaming_client_and_service_with_chunk_limits(); // Request a new state value stream starting at the next expected index - let mut next_expected_index = 0; let mut stream_listener = streaming_client - .get_all_state_values(MAX_ADVERTISED_STATES, Some(next_expected_index)) + .get_all_state_values(MAX_ADVERTISED_STATES, Some(0)) .await .unwrap(); - // Terminate and request streams when the chunks are no longer contiguous - loop { - let data_notification = get_data_notification(&mut stream_listener).await.unwrap(); - let reset_stream = match data_notification.data_payload { - DataPayload::StateValuesWithProof(state_values_with_proof) => { - if state_values_with_proof.first_index == next_expected_index { - next_expected_index += state_values_with_proof.raw_values.len() as u64; - false - } else { - true // We hit a non-contiguous chunk - } - }, - DataPayload::EndOfStream => { - if next_expected_index != TOTAL_NUM_STATE_VALUES { - true // The stream thought it had completed, but the chunk was incomplete - } else { - return; // All data was received! - } - }, - data_payload => unexpected_payload_type!(data_payload), - }; - - if reset_stream { - // Terminate the stream and fetch a new one (we hit non-contiguous data) - streaming_client - .terminate_stream_with_feedback( - stream_listener.data_stream_id, - Some(NotificationAndFeedback::new( - data_notification.notification_id, - NotificationFeedback::InvalidPayloadData, - )), - ) - .await - .unwrap(); - - stream_listener = streaming_client - .get_all_state_values(MAX_ADVERTISED_STATES, Some(next_expected_index)) - .await - .unwrap(); - } - } + // Verify that the stream listener receives all state value notifications + verify_continuous_state_value_notifications(&mut stream_listener).await } #[tokio::test(flavor = "multi_thread")] @@ -144,8 +81,11 @@ async fn test_notifications_state_values_multiple_streams() { DataPayload::StateValuesWithProof(state_values_with_proof) => { // Verify the indices assert_eq!(state_values_with_proof.first_index, next_expected_index); + + // Update the next expected index next_expected_index += state_values_with_proof.raw_values.len() as u64; + // Terminate the stream if we haven't reached the end if next_expected_index < TOTAL_NUM_STATE_VALUES { // Terminate the stream streaming_client @@ -167,7 +107,8 @@ async fn test_notifications_state_values_multiple_streams() { } }, DataPayload::EndOfStream => { - return assert_eq!(next_expected_index, TOTAL_NUM_STATE_VALUES) + assert_eq!(next_expected_index, TOTAL_NUM_STATE_VALUES); + return; // We've reached the end }, data_payload => unexpected_payload_type!(data_payload), } @@ -189,40 +130,27 @@ async fn test_notifications_continuous_outputs() { .await .unwrap(); - // Read the data notifications from the stream and verify the payloads - let mut next_expected_epoch = MIN_ADVERTISED_EPOCH_END; - let mut next_expected_version = MIN_ADVERTISED_TRANSACTION_OUTPUT; - loop { - if let Ok(data_notification) = get_data_notification(&mut stream_listener).await { - match data_notification.data_payload { - DataPayload::ContinuousTransactionOutputsWithProof( - ledger_info_with_sigs, - outputs_with_proofs, - ) => { - let ledger_info = ledger_info_with_sigs.ledger_info(); - // Verify the epoch of the ledger info - assert_eq!(ledger_info.epoch(), next_expected_epoch); + // Verify that the stream listener receives all output notifications + verify_continuous_output_notifications(&mut stream_listener, false).await +} - // Verify the output start version matches the expected version - let first_output_version = outputs_with_proofs.first_transaction_output_version; - assert_eq!(Some(next_expected_version), first_output_version); +#[tokio::test(flavor = "multi_thread")] +async fn test_notifications_continuous_outputs_limited_chunks() { + // Create a new streaming client and service where chunks may be truncated + let streaming_client = create_streaming_client_and_service_with_chunk_limits(); - let num_outputs = outputs_with_proofs.transactions_and_outputs.len() as u64; - next_expected_version += num_outputs; + // Request a continuous output stream starting at the next expected version + let mut stream_listener = streaming_client + .continuously_stream_transaction_outputs( + MIN_ADVERTISED_TRANSACTION_OUTPUT - 1, + MIN_ADVERTISED_EPOCH_END, + None, + ) + .await + .unwrap(); - // Update epochs if we've hit the epoch end - let last_output_version = first_output_version.unwrap() + num_outputs - 1; - if ledger_info.version() == last_output_version && ledger_info.ends_epoch() { - next_expected_epoch += 1; - } - }, - data_payload => unexpected_payload_type!(data_payload), - } - } else { - assert_eq!(next_expected_epoch, MAX_ADVERTISED_EPOCH_END + 1); - return assert_eq!(next_expected_version, MAX_ADVERTISED_TRANSACTION_OUTPUT + 1); - } - } + // Verify that the stream listener receives all output notifications + verify_continuous_output_notifications(&mut stream_listener, false).await } #[tokio::test(flavor = "multi_thread")] @@ -252,14 +180,15 @@ async fn test_notifications_continuous_outputs_target() { ledger_info_with_sigs, outputs_with_proofs, ) => { - let ledger_info = ledger_info_with_sigs.ledger_info(); // Verify the epoch of the ledger info + let ledger_info = ledger_info_with_sigs.ledger_info(); assert_eq!(ledger_info.epoch(), next_expected_epoch); // Verify the output start version matches the expected version let first_output_version = outputs_with_proofs.first_transaction_output_version; assert_eq!(Some(next_expected_version), first_output_version); + // Update the next expected version let num_outputs = outputs_with_proofs.transactions_and_outputs.len() as u64; next_expected_version += num_outputs; @@ -277,84 +206,6 @@ async fn test_notifications_continuous_outputs_target() { } } -#[tokio::test(flavor = "multi_thread")] -async fn test_notifications_continuous_outputs_limited_chunks() { - // Create a new streaming client and service - let streaming_client = create_streaming_client_and_service_with_chunk_limits(); - let end_epoch = MIN_ADVERTISED_EPOCH_END + 5; - - // Request a continuous output stream starting at the next expected version - let mut next_expected_version = MIN_ADVERTISED_TRANSACTION_OUTPUT; - let mut next_expected_epoch = MIN_ADVERTISED_EPOCH_END; - let mut stream_listener = streaming_client - .continuously_stream_transaction_outputs( - next_expected_version - 1, - next_expected_epoch, - None, - ) - .await - .unwrap(); - - // Terminate and request new streams when the chunks are no longer contiguous - loop { - let data_notification = get_data_notification(&mut stream_listener).await.unwrap(); - let reset_stream = match data_notification.data_payload { - DataPayload::ContinuousTransactionOutputsWithProof( - ledger_info_with_sigs, - outputs_with_proofs, - ) => { - let first_output_version = outputs_with_proofs - .first_transaction_output_version - .unwrap(); - let num_outputs = outputs_with_proofs.transactions_and_outputs.len() as u64; - let last_output_version = first_output_version + num_outputs - 1; - - if first_output_version == next_expected_version { - // Update the next version and epoch (if applicable) - next_expected_version += num_outputs; - let ledger_info = ledger_info_with_sigs.ledger_info(); - if ledger_info.version() == last_output_version && ledger_info.ends_epoch() { - next_expected_epoch += 1; - } - - // Check if we've hit the target epoch - if next_expected_epoch > end_epoch { - return; // All data was received! - } - - false - } else { - true // We hit a non-contiguous chunk - } - }, - data_payload => unexpected_payload_type!(data_payload), - }; - - if reset_stream { - // Terminate the stream and fetch a new one (we hit non-contiguous data) - streaming_client - .terminate_stream_with_feedback( - stream_listener.data_stream_id, - Some(NotificationAndFeedback::new( - data_notification.notification_id, - NotificationFeedback::InvalidPayloadData, - )), - ) - .await - .unwrap(); - - stream_listener = streaming_client - .continuously_stream_transaction_outputs( - next_expected_version - 1, - next_expected_epoch, - None, - ) - .await - .unwrap(); - } - } -} - #[tokio::test(flavor = "multi_thread")] async fn test_notifications_continuous_outputs_multiple_streams() { // Create a new streaming client and service @@ -381,17 +232,22 @@ async fn test_notifications_continuous_outputs_multiple_streams() { ledger_info_with_sigs, outputs_with_proofs, ) => { + // Verify the first output version let first_output_version = outputs_with_proofs.first_transaction_output_version; assert_eq!(Some(next_expected_version), first_output_version); + // Update the next expected version let num_outputs = outputs_with_proofs.transactions_and_outputs.len() as u64; next_expected_version += num_outputs; + // Update the next expected epoch if we've hit the epoch end let last_output_version = first_output_version.unwrap() + num_outputs - 1; let ledger_info = ledger_info_with_sigs.ledger_info(); if ledger_info.version() == last_output_version && ledger_info.ends_epoch() { next_expected_epoch += 1; } + + // Terminate the stream if we haven't reached the end if next_expected_version < MAX_ADVERTISED_TRANSACTION_OUTPUT { // Terminate the stream streaming_client @@ -415,6 +271,8 @@ async fn test_notifications_continuous_outputs_multiple_streams() { .await .unwrap(); } + + // Check if we've reached the end if next_expected_epoch > end_epoch { return; } @@ -440,126 +298,28 @@ async fn test_notifications_continuous_transactions() { .await .unwrap(); - // Read the data notifications from the stream and verify the payloads - let mut next_expected_epoch = MIN_ADVERTISED_EPOCH_END; - let mut next_expected_version = MIN_ADVERTISED_TRANSACTION; - loop { - if let Ok(data_notification) = get_data_notification(&mut stream_listener).await { - match data_notification.data_payload { - DataPayload::ContinuousTransactionsWithProof( - ledger_info_with_sigs, - transactions_with_proof, - ) => { - let ledger_info = ledger_info_with_sigs.ledger_info(); - // Verify the epoch of the ledger info - assert_eq!(ledger_info.epoch(), next_expected_epoch); - - // Verify the transaction start version matches the expected version - let first_transaction_version = - transactions_with_proof.first_transaction_version; - assert_eq!(Some(next_expected_version), first_transaction_version); - - // Verify the payload contains events - assert_some!(transactions_with_proof.events); - - let num_transactions = transactions_with_proof.transactions.len() as u64; - next_expected_version += num_transactions; - - // Update epochs if we've hit the epoch end - let last_transaction_version = - first_transaction_version.unwrap() + num_transactions - 1; - if ledger_info.version() == last_transaction_version && ledger_info.ends_epoch() - { - next_expected_epoch += 1; - } - }, - data_payload => unexpected_payload_type!(data_payload), - } - } else { - assert_eq!(next_expected_epoch, MAX_ADVERTISED_EPOCH_END + 1); - return assert_eq!(next_expected_version, MAX_ADVERTISED_TRANSACTION + 1); - } - } + // Verify that the stream listener receives all transaction notifications + verify_continuous_transaction_notifications(&mut stream_listener, false).await } #[tokio::test(flavor = "multi_thread")] async fn test_notifications_continuous_transactions_limited_chunks() { - // Create a new streaming client and service + // Create a new streaming client and service where chunks may be truncated let streaming_client = create_streaming_client_and_service_with_chunk_limits(); - let end_epoch = MIN_ADVERTISED_EPOCH_END + 5; // Request a continuous transaction stream and get a data stream listener - let mut next_expected_epoch = MIN_ADVERTISED_EPOCH_END; - let mut next_expected_version = MIN_ADVERTISED_TRANSACTION; let mut stream_listener = streaming_client .continuously_stream_transactions( - next_expected_version - 1, - next_expected_epoch, + MIN_ADVERTISED_TRANSACTION - 1, + MIN_ADVERTISED_EPOCH_END, true, None, ) .await .unwrap(); - // Terminate and request new streams when the chunks are no longer contiguous - loop { - let data_notification = get_data_notification(&mut stream_listener).await.unwrap(); - let reset_stream = match data_notification.data_payload { - DataPayload::ContinuousTransactionsWithProof( - ledger_info_with_sigs, - transactions_with_proofs, - ) => { - let first_transaction_version = - transactions_with_proofs.first_transaction_version.unwrap(); - let num_transactions = transactions_with_proofs.transactions.len() as u64; - let last_transaction_version = first_transaction_version + num_transactions - 1; - - if first_transaction_version == next_expected_version { - // Update the next version and epoch (if applicable) - next_expected_version += num_transactions; - let ledger_info = ledger_info_with_sigs.ledger_info(); - if ledger_info.version() == last_transaction_version && ledger_info.ends_epoch() - { - next_expected_epoch += 1; - } - - // Check if we've hit the target epoch - if next_expected_epoch > end_epoch { - return; // All data was received! - } - - false - } else { - true // We hit a non-contiguous chunk - } - }, - data_payload => unexpected_payload_type!(data_payload), - }; - - if reset_stream { - // Terminate the stream and fetch a new one (we hit non-contiguous data) - streaming_client - .terminate_stream_with_feedback( - stream_listener.data_stream_id, - Some(NotificationAndFeedback::new( - data_notification.notification_id, - NotificationFeedback::InvalidPayloadData, - )), - ) - .await - .unwrap(); - - stream_listener = streaming_client - .continuously_stream_transactions( - next_expected_version - 1, - next_expected_epoch, - true, - None, - ) - .await - .unwrap(); - } - } + // Verify that the stream listener receives all transaction notifications + verify_continuous_transaction_notifications(&mut stream_listener, false).await } #[tokio::test(flavor = "multi_thread")] @@ -590,8 +350,8 @@ async fn test_notifications_continuous_transactions_target() { ledger_info_with_sigs, transactions_with_proof, ) => { - let ledger_info = ledger_info_with_sigs.ledger_info(); // Verify the epoch of the ledger info + let ledger_info = ledger_info_with_sigs.ledger_info(); assert_eq!(ledger_info.epoch(), next_expected_epoch); // Verify the transaction start version matches the expected version @@ -601,6 +361,7 @@ async fn test_notifications_continuous_transactions_target() { // Verify the payload contains events assert_some!(transactions_with_proof.events); + // Update the next expected version let num_transactions = transactions_with_proof.transactions.len() as u64; next_expected_version += num_transactions; @@ -620,75 +381,281 @@ async fn test_notifications_continuous_transactions_target() { } #[tokio::test(flavor = "multi_thread")] -async fn test_notifications_epoch_ending() { +async fn test_notifications_continuous_transactions_multiple_streams() { // Create a new streaming client and service let streaming_client = create_streaming_client_and_service(); + let end_epoch = MIN_ADVERTISED_EPOCH_END + 5; - // Request an epoch ending stream and get a data stream listener + // Request a continuous transaction stream starting at the next expected version + let mut next_expected_version = MIN_ADVERTISED_TRANSACTION; + let mut next_expected_epoch = MIN_ADVERTISED_EPOCH_END; let mut stream_listener = streaming_client - .get_all_epoch_ending_ledger_infos(MIN_ADVERTISED_EPOCH_END) + .continuously_stream_transactions( + next_expected_version - 1, + next_expected_epoch, + true, + None, + ) .await .unwrap(); - // Read the data notifications from the stream and verify epoch ordering - let mut next_expected_epoch = MIN_ADVERTISED_EPOCH_END; + // Terminate and request new transaction streams at increasing versions loop { let data_notification = get_data_notification(&mut stream_listener).await.unwrap(); match data_notification.data_payload { - DataPayload::EpochEndingLedgerInfos(ledger_infos_with_sigs) => { - // Verify the epochs of the ledger infos are contiguous - for ledger_info_with_sigs in ledger_infos_with_sigs { - let epoch = ledger_info_with_sigs.ledger_info().commit_info().epoch(); - assert_eq!(next_expected_epoch, epoch); - assert_le!(epoch, MAX_ADVERTISED_EPOCH_END); + DataPayload::ContinuousTransactionsWithProof( + ledger_info_with_sigs, + transactions_with_proofs, + ) => { + // Verify the first transaction version + let first_transaction_version = transactions_with_proofs.first_transaction_version; + assert_eq!(Some(next_expected_version), first_transaction_version); + + // Update the next expected version + let num_transactions = transactions_with_proofs.transactions.len() as u64; + next_expected_version += num_transactions; + + // Update the next expected epoch if we've hit the epoch end + let last_transaction_version = + first_transaction_version.unwrap() + num_transactions - 1; + let ledger_info = ledger_info_with_sigs.ledger_info(); + if ledger_info.version() == last_transaction_version && ledger_info.ends_epoch() { next_expected_epoch += 1; } - }, - DataPayload::EndOfStream => { - return assert_eq!(next_expected_epoch, MAX_ADVERTISED_EPOCH_END + 1) - }, - data_payload => unexpected_payload_type!(data_payload), - } - } -} - -#[tokio::test(flavor = "multi_thread")] -async fn test_notifications_epoch_ending_limited_chunks() { - // Create a new streaming client and service - let streaming_client = create_streaming_client_and_service_with_chunk_limits(); - // Request a new epoch ending stream starting at the next expected index. - let mut next_expected_epoch = MIN_ADVERTISED_EPOCH_END; - let mut stream_listener = streaming_client - .get_all_epoch_ending_ledger_infos(next_expected_epoch) - .await - .unwrap(); + // Terminate the stream if we haven't reached the end + if next_expected_version < MAX_ADVERTISED_TRANSACTION_OUTPUT { + // Terminate the stream + streaming_client + .terminate_stream_with_feedback( + stream_listener.data_stream_id, + Some(NotificationAndFeedback::new( + data_notification.notification_id, + NotificationFeedback::InvalidPayloadData, + )), + ) + .await + .unwrap(); - // Terminate and request streams when the chunks are no longer contiguous - loop { - let data_notification = get_data_notification(&mut stream_listener).await.unwrap(); - let reset_stream = match data_notification.data_payload { - DataPayload::EpochEndingLedgerInfos(ledger_infos_with_sigs) => { - let first_ledger_info_epoch = ledger_infos_with_sigs[0].ledger_info().epoch(); - if first_ledger_info_epoch == next_expected_epoch { - next_expected_epoch += ledger_infos_with_sigs.len() as u64; - false - } else { - true // We hit a non-contiguous chunk + // Fetch a new stream + stream_listener = streaming_client + .continuously_stream_transactions( + next_expected_version - 1, + next_expected_epoch, + true, + None, + ) + .await + .unwrap(); } - }, - DataPayload::EndOfStream => { - if next_expected_epoch != MAX_ADVERTISED_EPOCH_END + 1 { - true // The stream thought it had completed, but the chunk was incomplete - } else { - return; // All data was received! + + // Check if we've reached the end + if next_expected_epoch > end_epoch { + return; } }, data_payload => unexpected_payload_type!(data_payload), + } + } +} + +#[tokio::test(flavor = "multi_thread")] +async fn test_notifications_continuous_transactions_or_outputs() { + // Create a new streaming client and service + let streaming_client = create_streaming_client_and_service(); + + // Request a continuous transaction or output stream and get a data stream listener + let mut stream_listener = streaming_client + .continuously_stream_transactions_or_outputs( + MIN_ADVERTISED_TRANSACTION - 1, + MIN_ADVERTISED_EPOCH_END, + true, + None, + ) + .await + .unwrap(); + + // Verify that the stream listener receives all transaction or output notifications + verify_continuous_transaction_or_output_notifications(&mut stream_listener, false).await +} + +#[tokio::test(flavor = "multi_thread")] +async fn test_notifications_continuous_transactions_or_outputs_limited_chunks() { + // Create a new streaming client and service where chunks may be truncated + let streaming_client = create_streaming_client_and_service_with_chunk_limits(); + + // Request a continuous transaction or output stream and get a data stream listener + let mut stream_listener = streaming_client + .continuously_stream_transactions_or_outputs( + MIN_ADVERTISED_TRANSACTION - 1, + MIN_ADVERTISED_EPOCH_END, + true, + None, + ) + .await + .unwrap(); + + // Verify that the stream listener receives all transaction or output notifications + verify_continuous_transaction_or_output_notifications(&mut stream_listener, false).await +} + +#[tokio::test(flavor = "multi_thread")] +async fn test_notifications_continuous_transactions_or_outputs_target() { + // Create a new streaming client and service + let streaming_client = create_streaming_client_and_service(); + + // Request a continuous transaction or output stream and get a data stream listener + let target_version = MAX_ADVERTISED_TRANSACTION - 101; + let target = create_ledger_info(target_version, MAX_ADVERTISED_EPOCH_END, true); + let mut stream_listener = streaming_client + .continuously_stream_transactions_or_outputs( + MIN_ADVERTISED_TRANSACTION - 1, + MIN_ADVERTISED_EPOCH_END, + true, + Some(target), + ) + .await + .unwrap(); + + // Read the data notifications from the stream and verify the payloads + let mut next_expected_epoch = MIN_ADVERTISED_EPOCH_END; + let mut next_expected_version = MIN_ADVERTISED_TRANSACTION; + loop { + let data_notification = get_data_notification(&mut stream_listener).await.unwrap(); + + // Extract the ledger info and transactions or outputs with proof + let (ledger_info_with_sigs, transactions_with_proof, outputs_with_proof) = + match data_notification.data_payload { + DataPayload::ContinuousTransactionsWithProof( + ledger_info_with_sigs, + transactions_with_proof, + ) => (ledger_info_with_sigs, Some(transactions_with_proof), None), + DataPayload::ContinuousTransactionOutputsWithProof( + ledger_info_with_sigs, + outputs_with_proof, + ) => (ledger_info_with_sigs, None, Some(outputs_with_proof)), + DataPayload::EndOfStream => { + return assert_eq!(next_expected_version, target_version + 1) + }, + data_payload => unexpected_payload_type!(data_payload), + }; + + // Verify the epoch of the ledger info + let ledger_info = ledger_info_with_sigs.ledger_info(); + assert_eq!(ledger_info.epoch(), next_expected_epoch); + + // Verify the transactions or outputs start version matches the expected version + let first_version = if transactions_with_proof.is_some() { + transactions_with_proof + .clone() + .unwrap() + .first_transaction_version + } else { + outputs_with_proof + .clone() + .unwrap() + .first_transaction_output_version }; + assert_eq!(Some(next_expected_version), first_version); + + // Verify the payload contains events + if transactions_with_proof.is_some() { + assert_some!(transactions_with_proof.clone().unwrap().events); + } - if reset_stream { - // Terminate the stream and fetch a new one (we hit non-contiguous data) + // Update the next expected version + let num_transactions = if transactions_with_proof.is_some() { + transactions_with_proof.clone().unwrap().transactions.len() as u64 + } else { + outputs_with_proof + .clone() + .unwrap() + .transactions_and_outputs + .len() as u64 + }; + next_expected_version += num_transactions; + + // Update epochs if we've hit the epoch end + let last_transaction_version = first_version.unwrap() + num_transactions - 1; + if ledger_info.version() == last_transaction_version && ledger_info.ends_epoch() { + next_expected_epoch += 1; + } + } +} + +#[tokio::test(flavor = "multi_thread")] +async fn test_notifications_continuous_transactions_or_outputs_multiple_streams() { + // Create a new streaming client and service + let streaming_client = create_streaming_client_and_service(); + let end_epoch = MIN_ADVERTISED_EPOCH_END + 5; + + // Request a continuous transaction or output stream starting at the next expected version + let mut next_expected_version = MIN_ADVERTISED_TRANSACTION; + let mut next_expected_epoch = MIN_ADVERTISED_EPOCH_END; + let mut stream_listener = streaming_client + .continuously_stream_transactions_or_outputs( + next_expected_version - 1, + next_expected_epoch, + true, + None, + ) + .await + .unwrap(); + + // Terminate and request new transaction or output streams at increasing versions + loop { + let data_notification = get_data_notification(&mut stream_listener).await.unwrap(); + + // Extract the ledger info and transactions or outputs with proof + let (ledger_info_with_sigs, transactions_with_proof, outputs_with_proof) = + match data_notification.data_payload { + DataPayload::ContinuousTransactionsWithProof( + ledger_info_with_sigs, + transactions_with_proof, + ) => (ledger_info_with_sigs, Some(transactions_with_proof), None), + DataPayload::ContinuousTransactionOutputsWithProof( + ledger_info_with_sigs, + outputs_with_proof, + ) => (ledger_info_with_sigs, None, Some(outputs_with_proof)), + data_payload => unexpected_payload_type!(data_payload), + }; + + // Verify the first transaction or output version + let first_version = if transactions_with_proof.is_some() { + transactions_with_proof + .clone() + .unwrap() + .first_transaction_version + } else { + outputs_with_proof + .clone() + .unwrap() + .first_transaction_output_version + }; + assert_eq!(Some(next_expected_version), first_version); + + // Update the next expected version + let num_transactions = if transactions_with_proof.is_some() { + transactions_with_proof.clone().unwrap().transactions.len() as u64 + } else { + outputs_with_proof + .clone() + .unwrap() + .transactions_and_outputs + .len() as u64 + }; + next_expected_version += num_transactions; + + // Update the next expected epoch if we've hit the epoch end + let last_transaction_version = first_version.unwrap() + num_transactions - 1; + let ledger_info = ledger_info_with_sigs.ledger_info(); + if ledger_info.version() == last_transaction_version && ledger_info.ends_epoch() { + next_expected_epoch += 1; + } + + // Terminate the stream if we haven't reached the end + if next_expected_version < MAX_ADVERTISED_TRANSACTION_OUTPUT { + // Terminate the stream streaming_client .terminate_stream_with_feedback( stream_listener.data_stream_id, @@ -700,14 +667,55 @@ async fn test_notifications_epoch_ending_limited_chunks() { .await .unwrap(); + // Fetch a new stream stream_listener = streaming_client - .get_all_epoch_ending_ledger_infos(next_expected_epoch) + .continuously_stream_transactions_or_outputs( + next_expected_version - 1, + next_expected_epoch, + true, + None, + ) .await .unwrap(); } + + // Check if we've reached the end + if next_expected_epoch > end_epoch { + return; + } } } +#[tokio::test(flavor = "multi_thread")] +async fn test_notifications_epoch_ending() { + // Create a new streaming client and service + let streaming_client = create_streaming_client_and_service(); + + // Request an epoch ending stream and get a data stream listener + let mut stream_listener = streaming_client + .get_all_epoch_ending_ledger_infos(MIN_ADVERTISED_EPOCH_END) + .await + .unwrap(); + + // Verify that the stream listener receives all epoch ending notifications + verify_continuous_epoch_ending_notifications(&mut stream_listener).await +} + +#[tokio::test(flavor = "multi_thread")] +async fn test_notifications_epoch_ending_limited_chunks() { + // Create a new streaming client and service where chunks may be truncated + let streaming_client = create_streaming_client_and_service_with_chunk_limits(); + + // Request a new epoch ending stream starting at the next expected index. + let mut stream_listener = streaming_client + .get_all_epoch_ending_ledger_infos(MIN_ADVERTISED_EPOCH_END) + .await + .unwrap(); + + // Verify that the stream listener receives all epoch ending notifications + verify_continuous_epoch_ending_notifications(&mut stream_listener).await +} + #[tokio::test(flavor = "multi_thread")] async fn test_notifications_epoch_ending_multiple_streams() { // Create a new streaming client and service @@ -725,7 +733,10 @@ async fn test_notifications_epoch_ending_multiple_streams() { let data_notification = get_data_notification(&mut stream_listener).await.unwrap(); match data_notification.data_payload { DataPayload::EpochEndingLedgerInfos(ledger_infos_with_sigs) => { + // Update the next expected epoch next_expected_epoch += ledger_infos_with_sigs.len() as u64; + + // Terminate the stream if we haven't reached the end if next_expected_epoch < MAX_ADVERTISED_EPOCH_END { // Terminate the stream streaming_client @@ -747,7 +758,8 @@ async fn test_notifications_epoch_ending_multiple_streams() { } }, DataPayload::EndOfStream => { - return assert_eq!(next_expected_epoch, MAX_ADVERTISED_EPOCH_END + 1) + assert_eq!(next_expected_epoch, MAX_ADVERTISED_EPOCH_END + 1); + return; // We've reached the end }, data_payload => unexpected_payload_type!(data_payload), } @@ -757,7 +769,9 @@ async fn test_notifications_epoch_ending_multiple_streams() { #[tokio::test(flavor = "multi_thread")] async fn test_notifications_optimistic_fetch_outputs() { // Create a new streaming client and service - let streaming_client = create_streaming_client_and_service_with_data_delay(); + let enable_subscription_streaming = false; + let streaming_client = + create_streaming_client_and_service_with_data_delay(enable_subscription_streaming); // Request a continuous output stream and get a data stream listener let mut stream_listener = streaming_client @@ -769,46 +783,117 @@ async fn test_notifications_optimistic_fetch_outputs() { .await .unwrap(); - // Read the data notifications from the stream and verify the payloads - let mut next_expected_epoch = MIN_ADVERTISED_EPOCH_END; - let mut next_expected_version = MIN_ADVERTISED_TRANSACTION_OUTPUT; - loop { - if let Ok(data_notification) = get_data_notification(&mut stream_listener).await { - match data_notification.data_payload { - DataPayload::ContinuousTransactionOutputsWithProof( - ledger_info_with_sigs, - outputs_with_proofs, - ) => { - let ledger_info = ledger_info_with_sigs.ledger_info(); - // Verify the epoch of the ledger info - assert_eq!(ledger_info.epoch(), next_expected_epoch); + // Verify that the stream listener receives all output notifications + verify_continuous_output_notifications(&mut stream_listener, true).await; +} - // Verify the output start version matches the expected version - let first_output_version = outputs_with_proofs.first_transaction_output_version; - assert_eq!(Some(next_expected_version), first_output_version); +#[tokio::test(flavor = "multi_thread")] +async fn test_notifications_optimistic_fetch_transactions() { + // Create a new streaming client and service + let enable_subscription_streaming = false; + let streaming_client = + create_streaming_client_and_service_with_data_delay(enable_subscription_streaming); - let num_outputs = outputs_with_proofs.transactions_and_outputs.len() as u64; - next_expected_version += num_outputs; + // Request a continuous transaction stream and get a data stream listener + let mut stream_listener = streaming_client + .continuously_stream_transactions( + MIN_ADVERTISED_TRANSACTION - 1, + MIN_ADVERTISED_EPOCH_END, + false, + None, + ) + .await + .unwrap(); - // Update epochs if we've hit the epoch end - let last_output_version = first_output_version.unwrap() + num_outputs - 1; - if ledger_info.version() == last_output_version && ledger_info.ends_epoch() { - next_expected_epoch += 1; - } - }, - data_payload => unexpected_payload_type!(data_payload), - } - } else { - assert_eq!(next_expected_epoch, MAX_REAL_EPOCH_END + 1); - return assert_eq!(next_expected_version, MAX_REAL_TRANSACTION_OUTPUT + 1); - } - } + // Verify that the stream listener receives all transaction notifications + verify_continuous_transaction_notifications(&mut stream_listener, true).await; } #[tokio::test(flavor = "multi_thread")] -async fn test_notifications_optimistic_fetch_transactions() { +async fn test_notifications_optimistic_fetch_transactions_or_outputs() { + // Create a new streaming client and service + let enable_subscription_streaming = false; + let streaming_client = + create_streaming_client_and_service_with_data_delay(enable_subscription_streaming); + + // Request a continuous transaction or output stream and get a data stream listener + let mut stream_listener = streaming_client + .continuously_stream_transactions_or_outputs( + MIN_ADVERTISED_TRANSACTION - 1, + MIN_ADVERTISED_EPOCH_END, + false, + None, + ) + .await + .unwrap(); + + // Verify that the stream listener receives all transaction or output notifications + verify_continuous_transaction_or_output_notifications(&mut stream_listener, true).await; +} + +#[tokio::test(flavor = "multi_thread")] +async fn test_notifications_subscribe_outputs() { // Create a new streaming client and service - let streaming_client = create_streaming_client_and_service_with_data_delay(); + let enable_subscription_streaming = true; + let streaming_client = + create_streaming_client_and_service_with_data_delay(enable_subscription_streaming); + + // Request a continuous output stream and get a data stream listener + let mut stream_listener = streaming_client + .continuously_stream_transaction_outputs( + MIN_ADVERTISED_TRANSACTION_OUTPUT - 1, + MIN_ADVERTISED_EPOCH_END, + None, + ) + .await + .unwrap(); + + // Verify that the stream listener receives all output notifications + verify_continuous_output_notifications(&mut stream_listener, true).await; +} + +#[tokio::test(flavor = "multi_thread")] +async fn test_notifications_subscribe_outputs_small_max() { + // Create a data streaming service config with subscription + // syncing enabled and a small max consecutive subscriptions. + let enable_subscription_streaming = true; + let streaming_service_config = DataStreamingServiceConfig { + enable_subscription_streaming, + max_num_consecutive_subscriptions: 2, + ..Default::default() + }; + + // Create a new streaming client and service + let data_beyond_highest_advertised = false; + let streaming_client = create_streaming_client_and_spawn_server( + Some(streaming_service_config), + data_beyond_highest_advertised, + false, + false, + enable_subscription_streaming, + ); + + // Request a continuous output stream and get a data stream listener + let mut stream_listener = streaming_client + .continuously_stream_transaction_outputs( + MIN_ADVERTISED_TRANSACTION_OUTPUT - 1, + MIN_ADVERTISED_EPOCH_END, + None, + ) + .await + .unwrap(); + + // Verify that the stream listener receives all output notifications + verify_continuous_output_notifications(&mut stream_listener, data_beyond_highest_advertised) + .await; +} + +#[tokio::test(flavor = "multi_thread")] +async fn test_notifications_subscribe_transactions() { + // Create a new streaming client and service + let enable_subscription_streaming = true; + let streaming_client = + create_streaming_client_and_service_with_data_delay(enable_subscription_streaming); // Request a continuous transaction stream and get a data stream listener let mut stream_listener = streaming_client @@ -821,43 +906,110 @@ async fn test_notifications_optimistic_fetch_transactions() { .await .unwrap(); - // Read the data notifications from the stream and verify the payloads - let mut next_expected_epoch = MIN_ADVERTISED_EPOCH_END; - let mut next_expected_version = MIN_ADVERTISED_TRANSACTION; - loop { - if let Ok(data_notification) = get_data_notification(&mut stream_listener).await { - match data_notification.data_payload { - DataPayload::ContinuousTransactionsWithProof( - ledger_info_with_sigs, - transactions_with_proofs, - ) => { - let ledger_info = ledger_info_with_sigs.ledger_info(); - // Verify the epoch of the ledger info - assert_eq!(ledger_info.epoch(), next_expected_epoch); - - // Verify the transaction start version matches the expected version - let first_transaction_version = - transactions_with_proofs.first_transaction_version; - assert_eq!(Some(next_expected_version), first_transaction_version); - - let num_transactions = transactions_with_proofs.transactions.len() as u64; - next_expected_version += num_transactions; - - // Update epochs if we've hit the epoch end - let last_transaction_version = - first_transaction_version.unwrap() + num_transactions - 1; - if ledger_info.version() == last_transaction_version && ledger_info.ends_epoch() - { - next_expected_epoch += 1; - } - }, - data_payload => unexpected_payload_type!(data_payload), - } - } else { - assert_eq!(next_expected_epoch, MAX_REAL_EPOCH_END + 1); - return assert_eq!(next_expected_version, MAX_REAL_TRANSACTION + 1); - } - } + // Verify that the stream listener receives all transaction notifications + verify_continuous_transaction_notifications(&mut stream_listener, true).await; +} + +#[tokio::test(flavor = "multi_thread")] +async fn test_notifications_subscribe_transactions_small_max() { + // Create a data streaming service config with subscription + // syncing enabled and a small max consecutive subscriptions. + let enable_subscription_streaming = true; + let streaming_service_config = DataStreamingServiceConfig { + enable_subscription_streaming, + max_num_consecutive_subscriptions: 2, + ..Default::default() + }; + + // Create a new streaming client and service + let data_beyond_highest_advertised = true; + let streaming_client = create_streaming_client_and_spawn_server( + Some(streaming_service_config), + true, + false, + false, + enable_subscription_streaming, + ); + + // Request a continuous transaction stream and get a data stream listener + let mut stream_listener = streaming_client + .continuously_stream_transactions( + MIN_ADVERTISED_TRANSACTION - 1, + MIN_ADVERTISED_EPOCH_END, + false, + None, + ) + .await + .unwrap(); + + // Verify that the stream listener receives all transaction notifications + verify_continuous_transaction_notifications( + &mut stream_listener, + data_beyond_highest_advertised, + ) + .await; +} + +#[tokio::test(flavor = "multi_thread")] +async fn test_notifications_subscribe_transactions_or_outputs() { + // Create a new streaming client and service + let enable_subscription_streaming = true; + let streaming_client = + create_streaming_client_and_service_with_data_delay(enable_subscription_streaming); + + // Request a continuous transaction or output stream and get a data stream listener + let mut stream_listener = streaming_client + .continuously_stream_transactions_or_outputs( + MIN_ADVERTISED_TRANSACTION - 1, + MIN_ADVERTISED_EPOCH_END, + false, + None, + ) + .await + .unwrap(); + + // Verify that the stream listener receives all transaction or output notifications + verify_continuous_transaction_or_output_notifications(&mut stream_listener, true).await; +} + +#[tokio::test(flavor = "multi_thread")] +async fn test_notifications_subscribe_transactions_or_outputs_small_max() { + // Create a data streaming service config with subscription + // syncing enabled and a small max consecutive subscriptions. + let enable_subscription_streaming = true; + let streaming_service_config = DataStreamingServiceConfig { + enable_subscription_streaming, + max_num_consecutive_subscriptions: 2, + ..Default::default() + }; + + // Create a new streaming client and service + let data_beyond_highest_advertised = true; + let streaming_client = create_streaming_client_and_spawn_server( + Some(streaming_service_config), + data_beyond_highest_advertised, + false, + false, + enable_subscription_streaming, + ); + + // Request a continuous transaction or output stream and get a data stream listener + let mut stream_listener = streaming_client + .continuously_stream_transactions_or_outputs( + MIN_ADVERTISED_TRANSACTION - 1, + MIN_ADVERTISED_EPOCH_END, + false, + None, + ) + .await + .unwrap(); + + // Verify that the stream listener receives all transaction or output notifications + verify_continuous_transaction_or_output_notifications( + &mut stream_listener, + data_beyond_highest_advertised, + ) + .await; } #[tokio::test(flavor = "multi_thread")] @@ -866,41 +1018,53 @@ async fn test_notifications_transaction_outputs() { let streaming_client = create_streaming_client_and_service(); // Request a transaction output stream and get a data stream listener - let mut next_expected_version = MIN_ADVERTISED_TRANSACTION_OUTPUT; let mut stream_listener = streaming_client .get_all_transaction_outputs( - next_expected_version, + MIN_ADVERTISED_TRANSACTION_OUTPUT, MAX_ADVERTISED_TRANSACTION_OUTPUT, MAX_ADVERTISED_TRANSACTION_OUTPUT, ) .await .unwrap(); - // Read the data notifications from the stream and verify the payloads - loop { - let data_notification = get_data_notification(&mut stream_listener).await.unwrap(); - match data_notification.data_payload { - DataPayload::TransactionOutputsWithProof(outputs_with_proof) => { - // Verify the transaction output start version matches the expected version - let first_output_version = outputs_with_proof.first_transaction_output_version; - assert_eq!(Some(next_expected_version), first_output_version); - - let num_outputs = outputs_with_proof.transactions_and_outputs.len(); - next_expected_version += num_outputs as u64; - }, - DataPayload::EndOfStream => { - return assert_eq!(next_expected_version, MAX_ADVERTISED_TRANSACTION_OUTPUT + 1) - }, - data_payload => unexpected_payload_type!(data_payload), - } - } + // Verify that the stream listener receives all output notifications + verify_output_notifications( + &mut stream_listener, + MIN_ADVERTISED_TRANSACTION_OUTPUT, + MAX_ADVERTISED_TRANSACTION_OUTPUT, + ) + .await; } #[tokio::test(flavor = "multi_thread")] async fn test_notifications_transaction_outputs_limited_chunks() { - // Create a new streaming client and service + // Create a new streaming client and service where chunks may be truncated let streaming_client = create_streaming_client_and_service_with_chunk_limits(); + // Request a transaction output stream starting at the next expected version + let mut stream_listener = streaming_client + .get_all_transaction_outputs( + MIN_ADVERTISED_TRANSACTION_OUTPUT, + MAX_ADVERTISED_TRANSACTION_OUTPUT, + MAX_ADVERTISED_TRANSACTION_OUTPUT, + ) + .await + .unwrap(); + + // Verify that the stream listener receives all output notifications + verify_output_notifications( + &mut stream_listener, + MIN_ADVERTISED_TRANSACTION_OUTPUT, + MAX_ADVERTISED_TRANSACTION_OUTPUT, + ) + .await; +} + +#[tokio::test(flavor = "multi_thread")] +async fn test_notifications_transaction_outputs_multiple_streams() { + // Create a new streaming client and service + let streaming_client = create_streaming_client_and_service(); + // Request a transaction output stream starting at the next expected version let mut next_expected_version = MIN_ADVERTISED_TRANSACTION_OUTPUT; let mut stream_listener = streaming_client @@ -912,52 +1076,48 @@ async fn test_notifications_transaction_outputs_limited_chunks() { .await .unwrap(); - // Terminate and request streams when the chunks are no longer contiguous + // Terminate and request new output streams at increasing versions loop { let data_notification = get_data_notification(&mut stream_listener).await.unwrap(); - let reset_stream = match data_notification.data_payload { + match data_notification.data_payload { DataPayload::TransactionOutputsWithProof(outputs_with_proof) => { - let first_output_version = - outputs_with_proof.first_transaction_output_version.unwrap(); - if first_output_version == next_expected_version { - next_expected_version += - outputs_with_proof.transactions_and_outputs.len() as u64; - false - } else { - true // We hit a non-contiguous chunk + // Verify the first transaction output version + let first_output_version = outputs_with_proof.first_transaction_output_version; + assert_eq!(Some(next_expected_version), first_output_version); + + // Update the next expected version + next_expected_version += outputs_with_proof.transactions_and_outputs.len() as u64; + + // Terminate the stream if we haven't reached the end + if next_expected_version < MAX_ADVERTISED_TRANSACTION_OUTPUT { + // Terminate the stream + streaming_client + .terminate_stream_with_feedback( + stream_listener.data_stream_id, + Some(NotificationAndFeedback::new( + data_notification.notification_id, + NotificationFeedback::InvalidPayloadData, + )), + ) + .await + .unwrap(); + + // Fetch a new stream + stream_listener = streaming_client + .get_all_transaction_outputs( + next_expected_version, + MAX_ADVERTISED_TRANSACTION_OUTPUT, + MAX_ADVERTISED_TRANSACTION_OUTPUT, + ) + .await + .unwrap(); } }, DataPayload::EndOfStream => { - if next_expected_version != MAX_ADVERTISED_TRANSACTION_OUTPUT + 1 { - true // The stream thought it had completed, but the chunk was incomplete - } else { - return; // All data was received! - } + assert_eq!(next_expected_version, MAX_ADVERTISED_TRANSACTION_OUTPUT + 1); + return; // We've reached the end }, data_payload => unexpected_payload_type!(data_payload), - }; - - if reset_stream { - // Terminate the stream and fetch a new one (we hit non-contiguous data) - streaming_client - .terminate_stream_with_feedback( - stream_listener.data_stream_id, - Some(NotificationAndFeedback::new( - data_notification.notification_id, - NotificationFeedback::InvalidPayloadData, - )), - ) - .await - .unwrap(); - - stream_listener = streaming_client - .get_all_transaction_outputs( - next_expected_version, - MAX_ADVERTISED_TRANSACTION_OUTPUT, - MAX_ADVERTISED_TRANSACTION_OUTPUT, - ) - .await - .unwrap(); } } } @@ -978,95 +1138,38 @@ async fn test_notifications_transactions() { .await .unwrap(); - // Read the data notifications from the stream and verify the payloads - let mut next_expected_transaction = MIN_ADVERTISED_TRANSACTION; - loop { - let data_notification = get_data_notification(&mut stream_listener).await.unwrap(); - match data_notification.data_payload { - DataPayload::TransactionsWithProof(transactions_with_proof) => { - // Verify the transaction start version matches the expected version - let first_transaction_version = transactions_with_proof.first_transaction_version; - assert_eq!(Some(next_expected_transaction), first_transaction_version); - - // Verify the payload contains events - assert_some!(transactions_with_proof.events); - - let num_transactions = transactions_with_proof.transactions.len(); - next_expected_transaction += num_transactions as u64; - }, - DataPayload::EndOfStream => { - return assert_eq!(next_expected_transaction, MAX_ADVERTISED_TRANSACTION + 1) - }, - data_payload => unexpected_payload_type!(data_payload), - } - } + // Verify that the stream listener receives all transaction notifications + verify_transaction_notifications( + &mut stream_listener, + MIN_ADVERTISED_TRANSACTION, + MAX_ADVERTISED_TRANSACTION, + ) + .await; } #[tokio::test(flavor = "multi_thread")] async fn test_notifications_transactions_limited_chunks() { - // Create a new streaming client and service + // Create a new streaming client and service where chunks may be truncated let streaming_client = create_streaming_client_and_service_with_chunk_limits(); - // Request a transaction stream starting at the next expected version - let mut next_expected_version = MIN_ADVERTISED_TRANSACTION; + // Request a transaction stream (without events) and get a data stream listener let mut stream_listener = streaming_client .get_all_transactions( - next_expected_version, + MIN_ADVERTISED_TRANSACTION, MAX_ADVERTISED_TRANSACTION, MAX_ADVERTISED_TRANSACTION, - true, + false, ) .await .unwrap(); - // Terminate and request streams when the chunks are no longer contiguous - loop { - let data_notification = get_data_notification(&mut stream_listener).await.unwrap(); - let reset_stream = match data_notification.data_payload { - DataPayload::TransactionsWithProof(transactions_with_proof) => { - let first_transaction_version = - transactions_with_proof.first_transaction_version.unwrap(); - if first_transaction_version == next_expected_version { - next_expected_version += transactions_with_proof.transactions.len() as u64; - false - } else { - true // We hit a non-contiguous chunk - } - }, - DataPayload::EndOfStream => { - if next_expected_version != MAX_ADVERTISED_TRANSACTION + 1 { - true // The stream thought it had completed, but the chunk was incomplete - } else { - return; // All data was received! - } - }, - data_payload => unexpected_payload_type!(data_payload), - }; - - if reset_stream { - // Terminate the stream and fetch a new one (we hit non-contiguous data) - streaming_client - .terminate_stream_with_feedback( - stream_listener.data_stream_id, - Some(NotificationAndFeedback::new( - data_notification.notification_id, - NotificationFeedback::InvalidPayloadData, - )), - ) - .await - .unwrap(); - - stream_listener = streaming_client - .get_all_transactions( - next_expected_version, - MAX_ADVERTISED_TRANSACTION, - MAX_ADVERTISED_TRANSACTION, - true, - ) - .await - .unwrap(); - } - } + // Verify that the stream listener receives all transaction notifications + verify_transaction_notifications( + &mut stream_listener, + MIN_ADVERTISED_TRANSACTION, + MAX_ADVERTISED_TRANSACTION, + ) + .await; } #[tokio::test(flavor = "multi_thread")] @@ -1091,10 +1194,14 @@ async fn test_notifications_transactions_multiple_streams() { let data_notification = get_data_notification(&mut stream_listener).await.unwrap(); match data_notification.data_payload { DataPayload::TransactionsWithProof(transactions_with_proof) => { + // Verify the first transaction version let first_transaction_version = transactions_with_proof.first_transaction_version; assert_eq!(Some(next_expected_version), first_transaction_version); + // Update the next expected version next_expected_version += transactions_with_proof.transactions.len() as u64; + + // Terminate the stream if we haven't reached the end if next_expected_version < MAX_ADVERTISED_TRANSACTION { // Terminate the stream streaming_client @@ -1121,7 +1228,8 @@ async fn test_notifications_transactions_multiple_streams() { } }, DataPayload::EndOfStream => { - return assert_eq!(next_expected_version, MAX_ADVERTISED_TRANSACTION + 1) + assert_eq!(next_expected_version, MAX_ADVERTISED_TRANSACTION + 1); + return; // We've reached the end }, data_payload => unexpected_payload_type!(data_payload), } @@ -1481,35 +1589,49 @@ async fn test_terminate_stream() { } fn create_streaming_client_and_service() -> StreamingServiceClient { - create_streaming_client_and_spawn_server(false, false, false) + create_streaming_client_and_spawn_server(None, false, false, false, false) } -fn create_streaming_client_and_service_with_data_delay() -> StreamingServiceClient { - create_streaming_client_and_spawn_server(true, false, false) +fn create_streaming_client_and_service_with_data_delay( + enable_subscription_streaming: bool, +) -> StreamingServiceClient { + create_streaming_client_and_spawn_server( + None, + true, + false, + false, + enable_subscription_streaming, + ) } fn create_streaming_client_and_service_with_chunk_limits() -> StreamingServiceClient { - create_streaming_client_and_spawn_server(false, true, true) + create_streaming_client_and_spawn_server(None, false, true, true, false) } fn create_streaming_client_and_spawn_server( + data_streaming_service_config: Option, data_beyond_highest_advertised: bool, limit_chunk_sizes: bool, skip_emulate_network_latencies: bool, + enable_subscription_streaming: bool, ) -> StreamingServiceClient { let (client, service) = create_streaming_client_and_server( + data_streaming_service_config, data_beyond_highest_advertised, limit_chunk_sizes, skip_emulate_network_latencies, + enable_subscription_streaming, ); tokio::spawn(service.start_service()); client } pub fn create_streaming_client_and_server( + data_streaming_service_config: Option, data_beyond_highest_advertised: bool, limit_chunk_sizes: bool, skip_emulate_network_latencies: bool, + enable_subscription_streaming: bool, ) -> ( StreamingServiceClient, DataStreamingService, @@ -1531,11 +1653,13 @@ pub fn create_streaming_client_and_server( ); // Create the data streaming service config - let data_streaming_service_config = DataStreamingServiceConfig { - max_concurrent_requests: 3, - max_concurrent_state_requests: 6, - ..Default::default() - }; + let data_streaming_service_config = + data_streaming_service_config.unwrap_or(DataStreamingServiceConfig { + enable_subscription_streaming, + max_concurrent_requests: 3, + max_concurrent_state_requests: 6, + ..Default::default() + }); // Create the streaming service and connect it to the listener let streaming_service = DataStreamingService::new( @@ -1547,3 +1671,353 @@ pub fn create_streaming_client_and_server( (streaming_client, streaming_service) } + +/// Verifies that the stream listener receives all epoch ending +/// notifications and that the payloads are contiguous. +async fn verify_continuous_epoch_ending_notifications(stream_listener: &mut DataStreamListener) { + let mut next_expected_epoch = MIN_ADVERTISED_EPOCH_END; + + // Read notifications until we reach the end of the stream + loop { + let data_notification = get_data_notification(stream_listener).await.unwrap(); + match data_notification.data_payload { + DataPayload::EpochEndingLedgerInfos(ledger_infos_with_sigs) => { + // Verify the epochs of the ledger infos are contiguous + for ledger_info_with_sigs in ledger_infos_with_sigs { + let ledger_info = ledger_info_with_sigs.ledger_info(); + let epoch = ledger_info.commit_info().epoch(); + assert!(ledger_info.ends_epoch()); + assert_eq!(next_expected_epoch, epoch); + assert_le!(epoch, MAX_ADVERTISED_EPOCH_END); + next_expected_epoch += 1; + } + }, + DataPayload::EndOfStream => { + assert_eq!(next_expected_epoch, MAX_ADVERTISED_EPOCH_END + 1); + return; // We've reached the end of the stream + }, + data_payload => unexpected_payload_type!(data_payload), + } + } +} + +/// Verifies that the stream listener receives all state value +/// notifications and that the payloads are contiguous. +async fn verify_continuous_state_value_notifications(stream_listener: &mut DataStreamListener) { + let mut next_expected_index = 0; + + // Read notifications until we reach the end of the stream + loop { + let data_notification = get_data_notification(stream_listener).await.unwrap(); + match data_notification.data_payload { + DataPayload::StateValuesWithProof(state_values_with_proof) => { + // Verify the start index matches the expected index + assert_eq!(state_values_with_proof.first_index, next_expected_index); + + // Verify the last index matches the state value list length + let num_state_values = state_values_with_proof.raw_values.len() as u64; + assert_eq!( + state_values_with_proof.last_index, + next_expected_index + num_state_values - 1, + ); + + // Verify the number of state values is as expected + assert_eq!( + state_values_with_proof.raw_values.len() as u64, + num_state_values + ); + + next_expected_index += num_state_values; + }, + DataPayload::EndOfStream => { + assert_eq!(next_expected_index, TOTAL_NUM_STATE_VALUES); + return; // We've reached the end of the stream + }, + data_payload => unexpected_payload_type!(data_payload), + } + } +} + +/// Verifies that the stream listener receives all transaction +/// output notifications and that the payloads are contiguous. +async fn verify_continuous_output_notifications( + stream_listener: &mut DataStreamListener, + data_beyond_advertised: bool, +) { + let mut next_expected_epoch = MIN_ADVERTISED_EPOCH_END; + let mut next_expected_version = MIN_ADVERTISED_TRANSACTION_OUTPUT; + + // Read notifications until we reach the end of the stream + loop { + if let Ok(data_notification) = get_data_notification(stream_listener).await { + match data_notification.data_payload { + DataPayload::ContinuousTransactionOutputsWithProof( + ledger_info_with_sigs, + outputs_with_proofs, + ) => { + // Verify the continuous outputs payload + let (new_expected_version, new_expected_epoch) = + verify_continuous_outputs_with_proof( + next_expected_epoch, + next_expected_version, + ledger_info_with_sigs, + outputs_with_proofs, + ); + + // Update the next expected version and epoch + next_expected_version = new_expected_version; + next_expected_epoch = new_expected_epoch; + }, + data_payload => unexpected_payload_type!(data_payload), + } + } else { + // Verify the next expected version and epoch depending on data availability + if data_beyond_advertised { + assert_eq!(next_expected_epoch, MAX_REAL_EPOCH_END + 1); + assert_eq!(next_expected_version, MAX_REAL_TRANSACTION_OUTPUT + 1); + } else { + assert_eq!(next_expected_epoch, MAX_ADVERTISED_EPOCH_END + 1); + assert_eq!(next_expected_version, MAX_ADVERTISED_TRANSACTION_OUTPUT + 1); + } + + return; // We've reached the end of the stream + } + } +} + +/// Verifies that the stream listener receives all transaction +/// notifications and that the payloads are contiguous. +async fn verify_continuous_transaction_notifications( + stream_listener: &mut DataStreamListener, + data_beyond_advertised: bool, +) { + let mut next_expected_epoch = MIN_ADVERTISED_EPOCH_END; + let mut next_expected_version = MIN_ADVERTISED_TRANSACTION; + + // Read notifications until we reach the end of the stream + loop { + if let Ok(data_notification) = get_data_notification(stream_listener).await { + match data_notification.data_payload { + DataPayload::ContinuousTransactionsWithProof( + ledger_info_with_sigs, + transactions_with_proofs, + ) => { + // Verify the continuous transactions payload + let (new_expected_version, new_expected_epoch) = + verify_continuous_transactions_with_proof( + next_expected_epoch, + next_expected_version, + ledger_info_with_sigs, + transactions_with_proofs, + ); + + // Update the next expected version and epoch + next_expected_version = new_expected_version; + next_expected_epoch = new_expected_epoch; + }, + data_payload => unexpected_payload_type!(data_payload), + } + } else { + // Verify the next expected version and epoch depending on data availability + if data_beyond_advertised { + assert_eq!(next_expected_epoch, MAX_REAL_EPOCH_END + 1); + assert_eq!(next_expected_version, MAX_REAL_TRANSACTION + 1); + } else { + assert_eq!(next_expected_epoch, MAX_ADVERTISED_EPOCH_END + 1); + assert_eq!(next_expected_version, MAX_ADVERTISED_TRANSACTION + 1); + } + + return; // We've reached the end of the stream + } + } +} + +/// Verifies that the stream listener receives all transaction +/// or output notifications and that the payloads are contiguous. +async fn verify_continuous_transaction_or_output_notifications( + stream_listener: &mut DataStreamListener, + data_beyond_advertised: bool, +) { + let mut next_expected_epoch = MIN_ADVERTISED_EPOCH_END; + let mut next_expected_version = MIN_ADVERTISED_TRANSACTION; + + // Read notifications until we reach the end of the stream + loop { + if let Ok(data_notification) = get_data_notification(stream_listener).await { + match data_notification.data_payload { + DataPayload::ContinuousTransactionsWithProof( + ledger_info_with_sigs, + transactions_with_proofs, + ) => { + // Verify the continuous transactions payload + let (new_expected_version, new_expected_epoch) = + verify_continuous_transactions_with_proof( + next_expected_epoch, + next_expected_version, + ledger_info_with_sigs, + transactions_with_proofs, + ); + + // Update the next expected version and epoch + next_expected_version = new_expected_version; + next_expected_epoch = new_expected_epoch; + }, + DataPayload::ContinuousTransactionOutputsWithProof( + ledger_info_with_sigs, + outputs_with_proofs, + ) => { + // Verify the continuous outputs payload + let (new_expected_version, new_expected_epoch) = + verify_continuous_outputs_with_proof( + next_expected_epoch, + next_expected_version, + ledger_info_with_sigs, + outputs_with_proofs, + ); + + // Update the next expected version and epoch + next_expected_version = new_expected_version; + next_expected_epoch = new_expected_epoch; + }, + data_payload => unexpected_payload_type!(data_payload), + } + } else { + // Verify the next expected version and epoch depending on data availability + if data_beyond_advertised { + assert_eq!(next_expected_epoch, MAX_REAL_EPOCH_END + 1); + assert_eq!(next_expected_version, MAX_REAL_TRANSACTION + 1); + } else { + assert_eq!(next_expected_epoch, MAX_ADVERTISED_EPOCH_END + 1); + assert_eq!(next_expected_version, MAX_ADVERTISED_TRANSACTION + 1); + } + + return; // We've reached the end of the stream + } + } +} + +/// Verifies the continuous transaction outputs payload +/// and returns the new expected version and epoch. +fn verify_continuous_outputs_with_proof( + expected_epoch: u64, + expected_version: u64, + ledger_info_with_sigs: LedgerInfoWithSignatures, + outputs_with_proofs: TransactionOutputListWithProof, +) -> (u64, u64) { + // Verify the ledger info epoch matches the expected epoch + let ledger_info = ledger_info_with_sigs.ledger_info(); + assert_eq!(ledger_info.epoch(), expected_epoch); + + // Verify the output start version matches the expected version + let first_output_version = outputs_with_proofs.first_transaction_output_version; + assert_eq!(Some(expected_version), first_output_version); + + // Calculate the next expected version + let num_outputs = outputs_with_proofs.transactions_and_outputs.len() as u64; + let next_expected_version = expected_version + num_outputs; + + // Update epochs if we've hit the epoch end + let last_output_version = first_output_version.unwrap() + num_outputs - 1; + let next_expected_epoch = + if ledger_info.version() == last_output_version && ledger_info.ends_epoch() { + expected_epoch + 1 + } else { + expected_epoch + }; + + // Return the new expected epoch and version + (next_expected_version, next_expected_epoch) +} + +/// Verifies the continuous transaction payload +/// and returns the new expected version and epoch. +fn verify_continuous_transactions_with_proof( + expected_epoch: u64, + expected_version: u64, + ledger_info_with_sigs: LedgerInfoWithSignatures, + transactions_with_proofs: TransactionListWithProof, +) -> (u64, u64) { + // Verify the ledger info epoch matches the expected epoch + let ledger_info = ledger_info_with_sigs.ledger_info(); + assert_eq!(ledger_info.epoch(), expected_epoch); + + // Verify the transaction start version matches the expected version + let first_transaction_version = transactions_with_proofs.first_transaction_version; + assert_eq!(Some(expected_version), first_transaction_version); + + // Calculate the next expected version + let num_transactions = transactions_with_proofs.transactions.len() as u64; + let next_expected_version = expected_version + num_transactions; + + // Update epochs if we've hit the epoch end + let last_transaction_version = first_transaction_version.unwrap() + num_transactions - 1; + let next_expected_epoch = + if ledger_info.version() == last_transaction_version && ledger_info.ends_epoch() { + expected_epoch + 1 + } else { + expected_epoch + }; + + // Return the new expected epoch and version + (next_expected_version, next_expected_epoch) +} + +/// Verifies that the stream listener receives all output notifications +/// (for the specified range) and that the payloads are contiguous. +async fn verify_output_notifications( + stream_listener: &mut DataStreamListener, + first_output_version: u64, + last_output_version: u64, +) { + let mut next_expected_version = first_output_version; + + // Read notifications until we reach the end of the stream + loop { + let data_notification = get_data_notification(stream_listener).await.unwrap(); + match data_notification.data_payload { + DataPayload::TransactionOutputsWithProof(outputs_with_proof) => { + // Verify the transaction output start version matches the expected version + let first_output_version = outputs_with_proof.first_transaction_output_version; + assert_eq!(Some(next_expected_version), first_output_version); + + // Calculate the next expected version + let num_outputs = outputs_with_proof.transactions_and_outputs.len(); + next_expected_version += num_outputs as u64; + }, + DataPayload::EndOfStream => { + return assert_eq!(next_expected_version, last_output_version + 1) + }, + data_payload => unexpected_payload_type!(data_payload), + } + } +} + +/// Verifies that the stream listener receives all transaction notifications +/// (for the specified range) and that the payloads are contiguous. +async fn verify_transaction_notifications( + stream_listener: &mut DataStreamListener, + first_transaction_version: u64, + last_transaction_version: u64, +) { + let mut next_expected_version = first_transaction_version; + + // Read notifications until we reach the end of the stream + loop { + let data_notification = get_data_notification(stream_listener).await.unwrap(); + match data_notification.data_payload { + DataPayload::TransactionsWithProof(transactions_with_proof) => { + // Verify the transaction start version matches the expected version + let first_transaction_version = transactions_with_proof.first_transaction_version; + assert_eq!(Some(next_expected_version), first_transaction_version); + + // Calculate the next expected version + let num_transactions = transactions_with_proof.transactions.len(); + next_expected_version += num_transactions as u64; + }, + DataPayload::EndOfStream => { + return assert_eq!(next_expected_version, last_transaction_version + 1) + }, + data_payload => unexpected_payload_type!(data_payload), + } + } +} diff --git a/state-sync/state-sync-v2/data-streaming-service/src/tests/utils.rs b/state-sync/data-streaming-service/src/tests/utils.rs similarity index 57% rename from state-sync/state-sync-v2/data-streaming-service/src/tests/utils.rs rename to state-sync/data-streaming-service/src/tests/utils.rs index 46550cb98e467..0f4467c660f04 100644 --- a/state-sync/state-sync-v2/data-streaming-service/src/tests/utils.rs +++ b/state-sync/data-streaming-service/src/tests/utils.rs @@ -9,6 +9,7 @@ use aptos_data_client::{ global_summary::{AdvertisedData, GlobalDataSummary, OptimalChunkSizes}, interface::{ AptosDataClientInterface, Response, ResponseCallback, ResponseContext, ResponseError, + SubscriptionRequestMetadata, }, }; use aptos_infallible::Mutex; @@ -17,7 +18,9 @@ use aptos_storage_service_types::{ requests::{ DataRequest, EpochEndingLedgerInfoRequest, NewTransactionOutputsWithProofRequest, NewTransactionsOrOutputsWithProofRequest, NewTransactionsWithProofRequest, - StateValuesWithProofRequest, TransactionOutputsWithProofRequest, + StateValuesWithProofRequest, SubscribeTransactionOutputsWithProofRequest, + SubscribeTransactionsOrOutputsWithProofRequest, SubscribeTransactionsWithProofRequest, + SubscriptionStreamMetadata, TransactionOutputsWithProofRequest, TransactionsOrOutputsWithProofRequest, TransactionsWithProofRequest, }, responses::{CompleteDataRange, TransactionOrOutputListWithProof}, @@ -45,13 +48,21 @@ use aptos_types::{ use async_trait::async_trait; use futures::StreamExt; use rand::{rngs::OsRng, Rng}; -use std::{cmp::min, collections::HashMap, ops::DerefMut, sync::Arc, thread, time::Duration}; +use std::{ + cmp::min, + collections::{BTreeMap, HashMap}, + ops::DerefMut, + sync::Arc, + time::Duration, +}; use tokio::time::timeout; // TODO(joshlind): provide a better way to mock the data client. // Especially around verifying timeouts! -/// The number of state values held at any version +/// Generic test constants +pub const MAX_NOTIFICATION_TIMEOUT_SECS: u64 = 10; +pub const MAX_RESPONSE_ID: u64 = 100000; pub const TOTAL_NUM_STATE_VALUES: u64 = 2000; /// Test constants for advertised data @@ -63,23 +74,21 @@ pub const MIN_ADVERTISED_TRANSACTION: u64 = 1000; pub const MAX_ADVERTISED_TRANSACTION: u64 = 10000; pub const MIN_ADVERTISED_TRANSACTION_OUTPUT: u64 = 1000; pub const MAX_ADVERTISED_TRANSACTION_OUTPUT: u64 = 10000; + +/// Test constants for data beyond the highest advertised pub const MAX_REAL_EPOCH_END: u64 = MAX_ADVERTISED_EPOCH_END + 2; pub const MAX_REAL_TRANSACTION: u64 = MAX_ADVERTISED_TRANSACTION + 10; pub const MAX_REAL_TRANSACTION_OUTPUT: u64 = MAX_REAL_TRANSACTION; -pub const MAX_RESPONSE_ID: u64 = 100000; - -/// Test timeout constant -pub const MAX_NOTIFICATION_TIMEOUT_SECS: u64 = 40; /// A simple mock of the Aptos Data Client #[derive(Clone, Debug)] pub struct MockAptosDataClient { pub aptos_data_client_config: AptosDataClientConfig, - pub advertised_epoch_ending_ledger_infos: HashMap, + pub advertised_epoch_ending_ledger_infos: BTreeMap, pub advertised_synced_ledger_infos: Vec, pub data_beyond_highest_advertised: bool, // If true, data exists beyond the highest advertised pub data_request_counter: Arc>>, // Tracks the number of times the same data request was made - pub highest_epoch_ending_ledger_infos: HashMap, + pub highest_epoch_ending_ledger_infos: BTreeMap, pub limit_chunk_sizes: bool, // If true, responses will be truncated to emulate chunk and network limits pub skip_emulate_network_latencies: bool, // If true, skips network latency emulation pub skip_timeout_verification: bool, // If true, skips timeout verification for incoming requests @@ -132,24 +141,40 @@ impl MockAptosDataClient { } } - fn emulate_network_latencies(&self) { - if self.skip_emulate_network_latencies { - return; - } + /// Clones the mock data client without timeout verification + fn clone_without_timeout_verification(&self) -> Self { + // Clone the mock data client and skip timeout verification + let mut aptos_data_client = self.clone(); + aptos_data_client.skip_timeout_verification = true; + + aptos_data_client + } - // Sleep for 10 - 50 ms to emulate variance - thread::sleep(Duration::from_millis(create_range_random_u64(10, 50))); + /// Emulates network latencies by sleeping for 10 -> 50 ms + async fn emulate_network_latencies(&self) { + if !self.skip_emulate_network_latencies { + tokio::time::sleep(Duration::from_millis(create_range_random_u64(10, 50))).await; + } } - fn emulate_optimistic_fetch_expiration(&self) -> aptos_data_client::error::Error { - thread::sleep(Duration::from_secs(MAX_NOTIFICATION_TIMEOUT_SECS)); + /// Emulates a timeout by sleeping for a long time and returning a timeout error + async fn emulate_network_request_timeout(&self) -> aptos_data_client::error::Error { + // Sleep for a while + tokio::time::sleep(Duration::from_secs(MAX_NOTIFICATION_TIMEOUT_SECS)).await; + + // Return a timeout error aptos_data_client::error::Error::TimeoutWaitingForResponse("RPC timed out!".into()) } + /// Calculates the last index for the given start and end indices (with + /// respect to a chunk size limit). fn calculate_last_index(&self, start_index: u64, end_index: u64) -> u64 { if self.limit_chunk_sizes { - let num_items_requested = (end_index - start_index) + 1; + // Limit the chunk size by a random factor let chunk_reduction_factor = create_range_random_u64(2, 9); + + // Calculate the number of items to request + let num_items_requested = (end_index - start_index) + 1; let num_reduced_items_requested = num_items_requested / chunk_reduction_factor; if num_reduced_items_requested <= 1 { start_index // Limit the chunk to a single item @@ -161,41 +186,60 @@ impl MockAptosDataClient { } } - fn verify_request_timeout( + /// Returns the known epoch for the given version using the + /// highest epoch ending ledger infos. + fn find_known_epoch_for_version(&self, known_version: u64) -> u64 { + // Find the epoch for the given version using the highest epoch ending ledger infos + for (epoch, ledger_info) in self.highest_epoch_ending_ledger_infos.iter() { + let epoch_ending_ledger_version = ledger_info.ledger_info().version(); + if epoch_ending_ledger_version > known_version { + return *epoch; + } + } + + // Otherwise, return the max epoch + 1 + MAX_REAL_EPOCH_END + 1 + } + + /// Verifies the request timeout value for the given request type + fn verify_request_timeout_value( &self, request_timeout_ms: u64, is_optimistic_fetch_request: bool, + is_subscription_request: bool, data_request: DataRequest, ) { - if self.skip_timeout_verification { - return; - } - - // Verify the given timeout for the request - let expected_timeout = if is_optimistic_fetch_request { - self.aptos_data_client_config.optimistic_fetch_timeout_ms - } else { - let min_timeout = self.aptos_data_client_config.response_timeout_ms; - let max_timeout = self.aptos_data_client_config.max_response_timeout_ms; - - // Check how many times the given request has been made - // and update the request counter. - let mut data_request_counter_lock = self.data_request_counter.lock(); - let num_times_requested = *data_request_counter_lock.get(&data_request).unwrap_or(&0); - let _ = data_request_counter_lock - .deref_mut() - .insert(data_request, num_times_requested + 1); - drop(data_request_counter_lock); - - // Calculate the expected timeout based on exponential backoff - min( - max_timeout, - min_timeout * (u32::pow(2, num_times_requested as u32) as u64), - ) - }; + if !self.skip_timeout_verification { + // Verify the given timeout for the request + let expected_timeout = if is_optimistic_fetch_request { + self.aptos_data_client_config.optimistic_fetch_timeout_ms + } else if is_subscription_request { + self.aptos_data_client_config + .subscription_response_timeout_ms + } else { + let min_timeout = self.aptos_data_client_config.response_timeout_ms; + let max_timeout = self.aptos_data_client_config.max_response_timeout_ms; + + // Check how many times the given request has been made + // and update the request counter. + let mut data_request_counter_lock = self.data_request_counter.lock(); + let num_times_requested = + *data_request_counter_lock.get(&data_request).unwrap_or(&0); + let _ = data_request_counter_lock + .deref_mut() + .insert(data_request, num_times_requested + 1); + drop(data_request_counter_lock); + + // Calculate the expected timeout based on exponential backoff + min( + max_timeout, + min_timeout * (u32::pow(2, num_times_requested as u32) as u64), + ) + }; - // Verify the request timeouts match - assert_eq!(request_timeout_ms, expected_timeout); + // Verify the request timeouts match + assert_eq!(request_timeout_ms, expected_timeout); + } } } @@ -245,16 +289,16 @@ impl AptosDataClientInterface for MockAptosDataClient { end_index: u64, request_timeout_ms: u64, ) -> Result, aptos_data_client::error::Error> { - self.verify_request_timeout( - request_timeout_ms, - false, - DataRequest::GetStateValuesWithProof(StateValuesWithProofRequest { - version, - start_index, - end_index, - }), - ); - self.emulate_network_latencies(); + // Verify the request timeout + let data_request = DataRequest::GetStateValuesWithProof(StateValuesWithProofRequest { + version, + start_index, + end_index, + }); + self.verify_request_timeout_value(request_timeout_ms, false, false, data_request); + + // Emulate network latencies + self.emulate_network_latencies().await; // Calculate the last index based on if we should limit the chunk size let end_index = self.calculate_last_index(start_index, end_index); @@ -278,6 +322,8 @@ impl AptosDataClientInterface for MockAptosDataClient { proof: SparseMerkleRangeProof::new(vec![]), root_hash: HashValue::zero(), }; + + // Create and send a data client response Ok(create_data_client_response(state_value_chunk_with_proof)) } @@ -287,15 +333,15 @@ impl AptosDataClientInterface for MockAptosDataClient { end_epoch: Epoch, request_timeout_ms: u64, ) -> Result>, aptos_data_client::error::Error> { - self.verify_request_timeout( - request_timeout_ms, - false, - DataRequest::GetEpochEndingLedgerInfos(EpochEndingLedgerInfoRequest { - start_epoch, - expected_end_epoch: end_epoch, - }), - ); - self.emulate_network_latencies(); + // Verify the request timeout + let data_request = DataRequest::GetEpochEndingLedgerInfos(EpochEndingLedgerInfoRequest { + start_epoch, + expected_end_epoch: end_epoch, + }); + self.verify_request_timeout_value(request_timeout_ms, false, false, data_request); + + // Emulate network latencies + self.emulate_network_latencies().await; // Calculate the last epoch based on if we should limit the chunk size let end_epoch = self.calculate_last_index(start_epoch, end_epoch); @@ -312,6 +358,8 @@ impl AptosDataClientInterface for MockAptosDataClient { }; epoch_ending_ledger_infos.push(ledger_info.clone()); } + + // Create and send a data client response Ok(create_data_client_response(epoch_ending_ledger_infos)) } @@ -324,34 +372,26 @@ impl AptosDataClientInterface for MockAptosDataClient { Response<(TransactionOutputListWithProof, LedgerInfoWithSignatures)>, aptos_data_client::error::Error, > { - self.verify_request_timeout( - request_timeout_ms, - true, + // Verify the request timeout + let data_request = DataRequest::GetNewTransactionOutputsWithProof(NewTransactionOutputsWithProofRequest { known_version, known_epoch, - }), - ); - self.emulate_network_latencies(); + }); + self.verify_request_timeout_value(request_timeout_ms, true, false, data_request); + + // Emulate network latencies + self.emulate_network_latencies().await; - // Attempt to fetch the new data + // Attempt to fetch and serve the new data if self.data_beyond_highest_advertised && known_version < MAX_REAL_TRANSACTION_OUTPUT { // Create a mock data client without timeout verification (to handle the internal requests) - let mut aptos_data_client = self.clone(); - aptos_data_client.skip_timeout_verification = true; - - let target_ledger_info = if known_epoch <= MAX_REAL_EPOCH_END { - // Fetch the epoch ending ledger info - aptos_data_client - .get_epoch_ending_ledger_infos(known_epoch, known_epoch, request_timeout_ms) - .await - .unwrap() - .payload[0] - .clone() - } else { - // Return a synced ledger info at the last version and highest epoch - create_ledger_info(MAX_REAL_TRANSACTION_OUTPUT, MAX_REAL_EPOCH_END + 1, false) - }; + let aptos_data_client = self.clone_without_timeout_verification(); + + // Determine the target ledger info + let target_ledger_info = + determine_target_ledger_info(known_epoch, request_timeout_ms, &aptos_data_client) + .await; // Fetch the new transaction outputs let outputs_with_proof = aptos_data_client @@ -365,14 +405,15 @@ impl AptosDataClientInterface for MockAptosDataClient { .unwrap() .payload; - // Return the new data - Ok(create_data_client_response(( + // Create and send a data client response + return Ok(create_data_client_response(( outputs_with_proof, target_ledger_info, - ))) - } else { - Err(self.emulate_optimistic_fetch_expiration()) + ))); } + + // Otherwise, emulate a network request timeout + Err(self.emulate_network_request_timeout().await) } async fn get_new_transactions_with_proof( @@ -385,35 +426,27 @@ impl AptosDataClientInterface for MockAptosDataClient { Response<(TransactionListWithProof, LedgerInfoWithSignatures)>, aptos_data_client::error::Error, > { - self.verify_request_timeout( - request_timeout_ms, - true, + // Verify the request timeout + let data_request = DataRequest::GetNewTransactionsWithProof(NewTransactionsWithProofRequest { known_version, known_epoch, include_events, - }), - ); - self.emulate_network_latencies(); + }); + self.verify_request_timeout_value(request_timeout_ms, true, false, data_request); - // Attempt to fetch the new data + // Emulate network latencies + self.emulate_network_latencies().await; + + // Attempt to fetch and serve the new data if self.data_beyond_highest_advertised && known_version < MAX_REAL_TRANSACTION { // Create a mock data client without timeout verification (to handle the internal requests) - let mut aptos_data_client = self.clone(); - aptos_data_client.skip_timeout_verification = true; - - let target_ledger_info = if known_epoch <= MAX_REAL_EPOCH_END { - // Fetch the epoch ending ledger info - aptos_data_client - .get_epoch_ending_ledger_infos(known_epoch, known_epoch, request_timeout_ms) - .await - .unwrap() - .payload[0] - .clone() - } else { - // Return a synced ledger info at the last version and highest epoch - create_ledger_info(MAX_REAL_TRANSACTION, MAX_REAL_EPOCH_END + 1, false) - }; + let aptos_data_client = self.clone_without_timeout_verification(); + + // Determine the target ledger info + let target_ledger_info = + determine_target_ledger_info(known_epoch, request_timeout_ms, &aptos_data_client) + .await; // Fetch the new transactions let transactions_with_proof = aptos_data_client @@ -428,14 +461,15 @@ impl AptosDataClientInterface for MockAptosDataClient { .unwrap() .payload; - // Return the new data - Ok(create_data_client_response(( + // Create and send a data client response + return Ok(create_data_client_response(( transactions_with_proof, target_ledger_info, - ))) - } else { - Err(self.emulate_optimistic_fetch_expiration()) + ))); } + + // Otherwise, emulate a network request timeout + Err(self.emulate_network_request_timeout().await) } async fn get_new_transactions_or_outputs_with_proof( @@ -447,23 +481,22 @@ impl AptosDataClientInterface for MockAptosDataClient { ) -> aptos_data_client::error::Result< Response<(TransactionOrOutputListWithProof, LedgerInfoWithSignatures)>, > { - self.verify_request_timeout( - request_timeout_ms, - true, - DataRequest::GetNewTransactionsOrOutputsWithProof( - NewTransactionsOrOutputsWithProofRequest { - known_version, - known_epoch, - include_events, - max_num_output_reductions: 3, - }, - ), + // Verify the request timeout + let data_request = DataRequest::GetNewTransactionsOrOutputsWithProof( + NewTransactionsOrOutputsWithProofRequest { + known_version, + known_epoch, + include_events, + max_num_output_reductions: 3, + }, ); - self.emulate_network_latencies(); + self.verify_request_timeout_value(request_timeout_ms, true, false, data_request); + + // Emulate network latencies + self.emulate_network_latencies().await; // Create a mock data client without timeout verification (to handle the internal requests) - let mut aptos_data_client = self.clone(); - aptos_data_client.skip_timeout_verification = true; + let aptos_data_client = self.clone_without_timeout_verification(); // Get the new transactions or outputs response let response = if return_transactions_instead_of_outputs() { @@ -488,6 +521,8 @@ impl AptosDataClientInterface for MockAptosDataClient { .payload; ((None, Some(outputs)), ledger_info) }; + + // Create and send a data client response Ok(create_data_client_response(response)) } @@ -496,13 +531,14 @@ impl AptosDataClientInterface for MockAptosDataClient { version: Version, request_timeout_ms: u64, ) -> Result, aptos_data_client::error::Error> { - self.verify_request_timeout( - request_timeout_ms, - false, - DataRequest::GetNumberOfStatesAtVersion(version), - ); - self.emulate_network_latencies(); + // Verify the request timeout + let data_request = DataRequest::GetNumberOfStatesAtVersion(version); + self.verify_request_timeout_value(request_timeout_ms, false, false, data_request); + // Emulate network latencies + self.emulate_network_latencies().await; + + // Create and send a data client response Ok(create_data_client_response(TOTAL_NUM_STATE_VALUES)) } @@ -513,22 +549,25 @@ impl AptosDataClientInterface for MockAptosDataClient { end_version: Version, request_timeout_ms: u64, ) -> Result, aptos_data_client::error::Error> { - self.verify_request_timeout( - request_timeout_ms, - false, + // Verify the request timeout + let data_request = DataRequest::GetTransactionOutputsWithProof(TransactionOutputsWithProofRequest { proof_version, start_version, end_version, - }), - ); - self.emulate_network_latencies(); + }); + self.verify_request_timeout_value(request_timeout_ms, false, false, data_request); + + // Emulate network latencies + self.emulate_network_latencies().await; // Calculate the last version based on if we should limit the chunk size let end_version = self.calculate_last_index(start_version, end_version); + // Create the output list with proof let output_list_with_proof = create_output_list_with_proof(start_version, end_version); + // Create and send a data client response Ok(create_data_client_response(output_list_with_proof)) } @@ -540,25 +579,26 @@ impl AptosDataClientInterface for MockAptosDataClient { include_events: bool, request_timeout_ms: u64, ) -> Result, aptos_data_client::error::Error> { - self.verify_request_timeout( - request_timeout_ms, - false, - DataRequest::GetTransactionsWithProof(TransactionsWithProofRequest { - proof_version, - start_version, - end_version, - include_events, - }), - ); - self.emulate_network_latencies(); + // Verify the request timeout + let data_request = DataRequest::GetTransactionsWithProof(TransactionsWithProofRequest { + proof_version, + start_version, + end_version, + include_events, + }); + self.verify_request_timeout_value(request_timeout_ms, false, false, data_request); + + // Emulate network latencies + self.emulate_network_latencies().await; // Calculate the last version based on if we should limit the chunk size let end_version = self.calculate_last_index(start_version, end_version); + // Create the transaction list with proof let transaction_list_with_proof = create_transaction_list_with_proof(start_version, end_version, include_events); - // Return the transaction list with proofs + // Create and send a data client response Ok(create_data_client_response(transaction_list_with_proof)) } @@ -570,22 +610,22 @@ impl AptosDataClientInterface for MockAptosDataClient { include_events: bool, request_timeout_ms: u64, ) -> aptos_data_client::error::Result> { - self.verify_request_timeout( - request_timeout_ms, - false, + // Verify the request timeout + let data_request = DataRequest::GetTransactionsOrOutputsWithProof(TransactionsOrOutputsWithProofRequest { proof_version, start_version, end_version, include_events, max_num_output_reductions: 3, - }), - ); - self.emulate_network_latencies(); + }); + self.verify_request_timeout_value(request_timeout_ms, false, false, data_request); + + // Emulate network latencies + self.emulate_network_latencies().await; // Create a mock data client without timeout verification (to handle the internal requests) - let mut aptos_data_client = self.clone(); - aptos_data_client.skip_timeout_verification = true; + let aptos_data_client = self.clone_without_timeout_verification(); // Get the transactions or outputs response let transactions_or_outputs = if return_transactions_instead_of_outputs() { @@ -610,8 +650,205 @@ impl AptosDataClientInterface for MockAptosDataClient { .await?; (None, Some(outputs_with_proof.payload)) }; + + // Create and send a data client response Ok(create_data_client_response(transactions_or_outputs)) } + + async fn subscribe_to_transaction_outputs_with_proof( + &self, + subscription_request_metadata: SubscriptionRequestMetadata, + request_timeout_ms: u64, + ) -> aptos_data_client::error::Result< + Response<(TransactionOutputListWithProof, LedgerInfoWithSignatures)>, + > { + // Extract the known version, known epoch and the subscription stream index + let known_version_at_stream_start = + subscription_request_metadata.known_version_at_stream_start; + let known_epoch_at_stream_start = subscription_request_metadata.known_epoch_at_stream_start; + let subscription_stream_index = subscription_request_metadata.subscription_stream_index; + + // Verify the request timeout + let data_request = DataRequest::SubscribeTransactionOutputsWithProof( + SubscribeTransactionOutputsWithProofRequest { + subscription_stream_metadata: SubscriptionStreamMetadata { + known_version_at_stream_start, + known_epoch_at_stream_start, + subscription_stream_id: subscription_request_metadata.subscription_stream_id, + }, + subscription_stream_index, + }, + ); + self.verify_request_timeout_value(request_timeout_ms, false, true, data_request); + + // Emulate network latencies + self.emulate_network_latencies().await; + + // Update the known version and epoch + let known_version = known_version_at_stream_start + subscription_stream_index; + + // Attempt to fetch and serve the new data + if self.data_beyond_highest_advertised && known_version < MAX_REAL_TRANSACTION_OUTPUT { + // Create a mock data client without timeout verification (to handle the internal requests) + let aptos_data_client = self.clone_without_timeout_verification(); + + // Determine the target ledger info + let known_epoch = self.find_known_epoch_for_version(known_version); + let target_ledger_info = + determine_target_ledger_info(known_epoch, request_timeout_ms, &aptos_data_client) + .await; + + // Fetch the new transaction outputs + let outputs_with_proof = aptos_data_client + .get_transaction_outputs_with_proof( + known_version + 1, + known_version + 1, + known_version + 1, + self.aptos_data_client_config.response_timeout_ms, + ) + .await + .unwrap() + .payload; + + // Create and send a data client response + return Ok(create_data_client_response(( + outputs_with_proof, + target_ledger_info, + ))); + } + + // Otherwise, emulate a network request timeout + Err(self.emulate_network_request_timeout().await) + } + + async fn subscribe_to_transactions_with_proof( + &self, + subscription_request_metadata: SubscriptionRequestMetadata, + include_events: bool, + request_timeout_ms: u64, + ) -> aptos_data_client::error::Result< + Response<(TransactionListWithProof, LedgerInfoWithSignatures)>, + > { + // Extract the known version, known epoch and the subscription stream index + let known_version_at_stream_start = + subscription_request_metadata.known_version_at_stream_start; + let known_epoch_at_stream_start = subscription_request_metadata.known_epoch_at_stream_start; + let subscription_stream_index = subscription_request_metadata.subscription_stream_index; + + // Verify the request timeout + let data_request = + DataRequest::SubscribeTransactionsWithProof(SubscribeTransactionsWithProofRequest { + subscription_stream_metadata: SubscriptionStreamMetadata { + known_version_at_stream_start, + known_epoch_at_stream_start, + subscription_stream_id: subscription_request_metadata.subscription_stream_id, + }, + subscription_stream_index, + include_events, + }); + self.verify_request_timeout_value(request_timeout_ms, false, true, data_request); + + // Emulate network latencies + self.emulate_network_latencies().await; + + // Update the known version and epoch + let known_version = known_version_at_stream_start + subscription_stream_index; + + // Attempt to fetch and serve the new data + if self.data_beyond_highest_advertised && known_version < MAX_REAL_TRANSACTION_OUTPUT { + // Create a mock data client without timeout verification (to handle the internal requests) + let aptos_data_client = self.clone_without_timeout_verification(); + + // Determine the target ledger info + let known_epoch = self.find_known_epoch_for_version(known_version); + let target_ledger_info = + determine_target_ledger_info(known_epoch, request_timeout_ms, &aptos_data_client) + .await; + + // Fetch the new transaction outputs + let outputs_with_proof = aptos_data_client + .get_transactions_with_proof( + known_version + 1, + known_version + 1, + known_version + 1, + include_events, + self.aptos_data_client_config.response_timeout_ms, + ) + .await + .unwrap() + .payload; + + // Create and send a data client response + return Ok(create_data_client_response(( + outputs_with_proof, + target_ledger_info, + ))); + } + + // Otherwise, emulate a network request timeout + Err(self.emulate_network_request_timeout().await) + } + + async fn subscribe_to_transactions_or_outputs_with_proof( + &self, + subscription_request_metadata: SubscriptionRequestMetadata, + include_events: bool, + request_timeout_ms: u64, + ) -> aptos_data_client::error::Result< + Response<(TransactionOrOutputListWithProof, LedgerInfoWithSignatures)>, + > { + // Extract the known version, known epoch and the subscription stream index + let known_version_at_stream_start = + subscription_request_metadata.known_version_at_stream_start; + let known_epoch_at_stream_start = subscription_request_metadata.known_epoch_at_stream_start; + let subscription_stream_index = subscription_request_metadata.subscription_stream_index; + + // Verify the request timeout + let data_request = DataRequest::SubscribeTransactionsOrOutputsWithProof( + SubscribeTransactionsOrOutputsWithProofRequest { + subscription_stream_metadata: SubscriptionStreamMetadata { + known_version_at_stream_start, + known_epoch_at_stream_start, + subscription_stream_id: subscription_request_metadata.subscription_stream_id, + }, + subscription_stream_index, + include_events, + max_num_output_reductions: 3, + }, + ); + self.verify_request_timeout_value(request_timeout_ms, false, true, data_request); + + // Emulate network latencies + self.emulate_network_latencies().await; + + // Create a mock data client without timeout verification (to handle the internal requests) + let aptos_data_client = self.clone_without_timeout_verification(); + + // Send the new transactions or outputs response + let response = if return_transactions_instead_of_outputs() { + let (transactions, ledger_info) = aptos_data_client + .subscribe_to_transactions_with_proof( + subscription_request_metadata, + include_events, + request_timeout_ms, + ) + .await? + .payload; + ((Some(transactions), None), ledger_info) + } else { + let (outputs, ledger_info) = aptos_data_client + .subscribe_to_transaction_outputs_with_proof( + subscription_request_metadata, + request_timeout_ms, + ) + .await? + .payload; + ((None, Some(outputs)), ledger_info) + }; + + // Create and send a data client response + Ok(create_data_client_response(response)) + } } #[derive(Debug)] @@ -670,13 +907,13 @@ fn create_epoch_ending_ledger_infos( start_version: Version, end_epoch: Epoch, end_version: Version, -) -> HashMap { +) -> BTreeMap { let mut current_epoch = start_epoch; let mut current_version = start_version; // Populate the epoch ending ledger infos using random intervals let max_num_versions_in_epoch = (end_version - start_version) / ((end_epoch + 1) - start_epoch); - let mut epoch_ending_ledger_infos = HashMap::new(); + let mut epoch_ending_ledger_infos = BTreeMap::new(); while current_epoch < end_epoch + 1 { let num_versions_in_epoch = create_non_zero_random_u64(max_num_versions_in_epoch); current_version += num_versions_in_epoch; @@ -702,7 +939,7 @@ fn create_synced_ledger_infos( start_version: Version, end_epoch: Epoch, end_version: Version, - epoch_ending_ledger_infos: &HashMap, + epoch_ending_ledger_infos: &BTreeMap, ) -> Vec { let mut current_epoch = start_epoch; let mut current_version = start_version; @@ -779,6 +1016,26 @@ fn create_range_random_u64(min_value: u64, max_value: u64) -> u64 { rng.gen_range(min_value, max_value) } +/// Determines the target ledger info for the given known epoch +async fn determine_target_ledger_info( + known_epoch: Epoch, + request_timeout_ms: u64, + aptos_data_client: &MockAptosDataClient, +) -> LedgerInfoWithSignatures { + if known_epoch <= MAX_REAL_EPOCH_END { + // Fetch the epoch ending ledger info + aptos_data_client + .get_epoch_ending_ledger_infos(known_epoch, known_epoch, request_timeout_ms) + .await + .unwrap() + .payload[0] + .clone() + } else { + // Return a synced ledger info at the last version and highest epoch + create_ledger_info(MAX_REAL_TRANSACTION, MAX_REAL_EPOCH_END + 1, false) + } +} + /// Initializes the Aptos logger for tests pub fn initialize_logger() { aptos_logger::Logger::builder() diff --git a/state-sync/state-sync-v2/state-sync-driver/Cargo.toml b/state-sync/state-sync-driver/Cargo.toml similarity index 100% rename from state-sync/state-sync-v2/state-sync-driver/Cargo.toml rename to state-sync/state-sync-driver/Cargo.toml diff --git a/state-sync/state-sync-v2/state-sync-driver/src/bootstrapper.rs b/state-sync/state-sync-driver/src/bootstrapper.rs similarity index 100% rename from state-sync/state-sync-v2/state-sync-driver/src/bootstrapper.rs rename to state-sync/state-sync-driver/src/bootstrapper.rs diff --git a/state-sync/state-sync-v2/state-sync-driver/src/continuous_syncer.rs b/state-sync/state-sync-driver/src/continuous_syncer.rs similarity index 100% rename from state-sync/state-sync-v2/state-sync-driver/src/continuous_syncer.rs rename to state-sync/state-sync-driver/src/continuous_syncer.rs diff --git a/state-sync/state-sync-v2/state-sync-driver/src/driver.rs b/state-sync/state-sync-driver/src/driver.rs similarity index 100% rename from state-sync/state-sync-v2/state-sync-driver/src/driver.rs rename to state-sync/state-sync-driver/src/driver.rs diff --git a/state-sync/state-sync-v2/state-sync-driver/src/driver_client.rs b/state-sync/state-sync-driver/src/driver_client.rs similarity index 100% rename from state-sync/state-sync-v2/state-sync-driver/src/driver_client.rs rename to state-sync/state-sync-driver/src/driver_client.rs diff --git a/state-sync/state-sync-v2/state-sync-driver/src/driver_factory.rs b/state-sync/state-sync-driver/src/driver_factory.rs similarity index 100% rename from state-sync/state-sync-v2/state-sync-driver/src/driver_factory.rs rename to state-sync/state-sync-driver/src/driver_factory.rs diff --git a/state-sync/state-sync-v2/state-sync-driver/src/error.rs b/state-sync/state-sync-driver/src/error.rs similarity index 100% rename from state-sync/state-sync-v2/state-sync-driver/src/error.rs rename to state-sync/state-sync-driver/src/error.rs diff --git a/state-sync/state-sync-v2/state-sync-driver/src/lib.rs b/state-sync/state-sync-driver/src/lib.rs similarity index 100% rename from state-sync/state-sync-v2/state-sync-driver/src/lib.rs rename to state-sync/state-sync-driver/src/lib.rs diff --git a/state-sync/state-sync-v2/state-sync-driver/src/logging.rs b/state-sync/state-sync-driver/src/logging.rs similarity index 100% rename from state-sync/state-sync-v2/state-sync-driver/src/logging.rs rename to state-sync/state-sync-driver/src/logging.rs diff --git a/state-sync/state-sync-v2/state-sync-driver/src/metadata_storage.rs b/state-sync/state-sync-driver/src/metadata_storage.rs similarity index 100% rename from state-sync/state-sync-v2/state-sync-driver/src/metadata_storage.rs rename to state-sync/state-sync-driver/src/metadata_storage.rs diff --git a/state-sync/state-sync-v2/state-sync-driver/src/metrics.rs b/state-sync/state-sync-driver/src/metrics.rs similarity index 92% rename from state-sync/state-sync-v2/state-sync-driver/src/metrics.rs rename to state-sync/state-sync-driver/src/metrics.rs index a91245ff4c9c7..c85519c3a95b1 100644 --- a/state-sync/state-sync-v2/state-sync-driver/src/metrics.rs +++ b/state-sync/state-sync-driver/src/metrics.rs @@ -58,8 +58,9 @@ impl StorageSynchronizerOperations { /// Histogram buckets for tracking chunk sizes const CHUNK_SIZE_BUCKETS: &[f64] = &[ - 1.0, 2.0, 4.0, 8.0, 16.0, 32.0, 64.0, 128.0, 256.0, 512.0, 1024.0, 2048.0, 4096.0, 8192.0, - 16384.0, + 1.0, 2.0, 4.0, 5.0, 10.0, 25.0, 50.0, 75.0, 100.0, 250.0, 500.0, 750.0, 1000.0, 2500.0, 5000.0, + 7500.0, 10_000.0, 12_500.0, 15_000.0, 17_500.0, 20_000.0, 25_000.0, 30_000.0, 35_000.0, + 40_000.0, 45_000.0, 50_000.0, 75_000.0, 100_000.0, ]; /// Counter for state sync bootstrapper errors @@ -142,12 +143,19 @@ pub static STORAGE_SYNCHRONIZER_GAUGES: Lazy = Lazy::new(|| { .unwrap() }); +// Latency buckets for storage synchronizer operations +const STORAGE_SYNCHRONIZER_LATENCY_BUCKETS_SECS: &[f64] = &[ + 0.05, 0.1, 0.2, 0.3, 0.5, 0.75, 1.0, 1.5, 2.0, 3.0, 5.0, 7.5, 10.0, 15.0, 20.0, 30.0, 40.0, + 60.0, 120.0, 180.0, 240.0, 300.0, +]; + /// Counter for tracking storage synchronizer latencies pub static STORAGE_SYNCHRONIZER_LATENCIES: Lazy = Lazy::new(|| { register_histogram_vec!( "aptos_state_sync_storage_synchronizer_latencies", "Counters related to the storage synchronizer latencies", - &["label"] + &["label"], + STORAGE_SYNCHRONIZER_LATENCY_BUCKETS_SECS.to_vec() ) .unwrap() }); diff --git a/state-sync/state-sync-v2/state-sync-driver/src/notification_handlers.rs b/state-sync/state-sync-driver/src/notification_handlers.rs similarity index 100% rename from state-sync/state-sync-v2/state-sync-driver/src/notification_handlers.rs rename to state-sync/state-sync-driver/src/notification_handlers.rs diff --git a/state-sync/state-sync-v2/state-sync-driver/src/storage_synchronizer.rs b/state-sync/state-sync-driver/src/storage_synchronizer.rs similarity index 100% rename from state-sync/state-sync-v2/state-sync-driver/src/storage_synchronizer.rs rename to state-sync/state-sync-driver/src/storage_synchronizer.rs diff --git a/state-sync/state-sync-v2/state-sync-driver/src/tests/bootstrapper.rs b/state-sync/state-sync-driver/src/tests/bootstrapper.rs similarity index 100% rename from state-sync/state-sync-v2/state-sync-driver/src/tests/bootstrapper.rs rename to state-sync/state-sync-driver/src/tests/bootstrapper.rs diff --git a/state-sync/state-sync-v2/state-sync-driver/src/tests/continuous_syncer.rs b/state-sync/state-sync-driver/src/tests/continuous_syncer.rs similarity index 100% rename from state-sync/state-sync-v2/state-sync-driver/src/tests/continuous_syncer.rs rename to state-sync/state-sync-driver/src/tests/continuous_syncer.rs diff --git a/state-sync/state-sync-v2/state-sync-driver/src/tests/driver.rs b/state-sync/state-sync-driver/src/tests/driver.rs similarity index 100% rename from state-sync/state-sync-v2/state-sync-driver/src/tests/driver.rs rename to state-sync/state-sync-driver/src/tests/driver.rs diff --git a/state-sync/state-sync-v2/state-sync-driver/src/tests/driver_factory.rs b/state-sync/state-sync-driver/src/tests/driver_factory.rs similarity index 100% rename from state-sync/state-sync-v2/state-sync-driver/src/tests/driver_factory.rs rename to state-sync/state-sync-driver/src/tests/driver_factory.rs diff --git a/state-sync/state-sync-v2/state-sync-driver/src/tests/metadata_storage.rs b/state-sync/state-sync-driver/src/tests/metadata_storage.rs similarity index 100% rename from state-sync/state-sync-v2/state-sync-driver/src/tests/metadata_storage.rs rename to state-sync/state-sync-driver/src/tests/metadata_storage.rs diff --git a/state-sync/state-sync-v2/state-sync-driver/src/tests/mocks.rs b/state-sync/state-sync-driver/src/tests/mocks.rs similarity index 100% rename from state-sync/state-sync-v2/state-sync-driver/src/tests/mocks.rs rename to state-sync/state-sync-driver/src/tests/mocks.rs diff --git a/state-sync/state-sync-v2/state-sync-driver/src/tests/mod.rs b/state-sync/state-sync-driver/src/tests/mod.rs similarity index 100% rename from state-sync/state-sync-v2/state-sync-driver/src/tests/mod.rs rename to state-sync/state-sync-driver/src/tests/mod.rs diff --git a/state-sync/state-sync-v2/state-sync-driver/src/tests/storage_synchronizer.rs b/state-sync/state-sync-driver/src/tests/storage_synchronizer.rs similarity index 100% rename from state-sync/state-sync-v2/state-sync-driver/src/tests/storage_synchronizer.rs rename to state-sync/state-sync-driver/src/tests/storage_synchronizer.rs diff --git a/state-sync/state-sync-v2/state-sync-driver/src/tests/utils.rs b/state-sync/state-sync-driver/src/tests/utils.rs similarity index 100% rename from state-sync/state-sync-v2/state-sync-driver/src/tests/utils.rs rename to state-sync/state-sync-driver/src/tests/utils.rs diff --git a/state-sync/state-sync-v2/state-sync-driver/src/utils.rs b/state-sync/state-sync-driver/src/utils.rs similarity index 100% rename from state-sync/state-sync-v2/state-sync-driver/src/utils.rs rename to state-sync/state-sync-driver/src/utils.rs diff --git a/state-sync/storage-service/server/src/handler.rs b/state-sync/storage-service/server/src/handler.rs index 658d659c25070..ed2e0c5417fe2 100644 --- a/state-sync/storage-service/server/src/handler.rs +++ b/state-sync/storage-service/server/src/handler.rs @@ -7,7 +7,7 @@ use crate::{ metrics, metrics::{ increment_counter, start_timer, LRU_CACHE_HIT, LRU_CACHE_PROBE, OPTIMISTIC_FETCH_ADD, - SUBSCRIPTION_ADD, SUBSCRIPTION_FAILURE, + SUBSCRIPTION_ADD, SUBSCRIPTION_FAILURE, SUBSCRIPTION_NEW_STREAM, }, moderator::RequestModerator, network::ResponseSender, @@ -32,9 +32,9 @@ use aptos_storage_service_types::{ use aptos_time_service::TimeService; use aptos_types::transaction::Version; use arc_swap::ArcSwap; -use dashmap::DashMap; +use dashmap::{mapref::entry::Entry, DashMap}; use lru::LruCache; -use std::{collections::HashMap, sync::Arc, time::Duration}; +use std::{sync::Arc, time::Duration}; /// Storage server constants const INVALID_REQUEST_LOG_FREQUENCY_SECS: u64 = 5; // The frequency to log invalid requests (secs) @@ -51,7 +51,7 @@ pub struct Handler { lru_response_cache: Arc>>, request_moderator: Arc, storage: T, - subscriptions: Arc>>, + subscriptions: Arc>, time_service: TimeService, } @@ -62,7 +62,7 @@ impl Handler { lru_response_cache: Arc>>, request_moderator: Arc, storage: T, - subscriptions: Arc>>, + subscriptions: Arc>, time_service: TimeService, ) -> Self { Self { @@ -258,67 +258,94 @@ impl Handler { request: StorageServiceRequest, response_sender: ResponseSender, ) { - // Create a new subscription request + // Create a new subscription request and get the stream ID let subscription_request = SubscriptionRequest::new(request.clone(), response_sender, self.time_service.clone()); - - // Grab the lock on the active subscriptions map - let mut subscriptions = self.subscriptions.lock(); - - // Get the existing stream ID and the request stream ID - let existing_stream_id = - subscriptions - .get_mut(&peer_network_id) - .map(|subscription_stream_requests| { - subscription_stream_requests.subscription_stream_id() - }); let request_stream_id = subscription_request.subscription_stream_id(); - // If the stream already exists, add the request to the stream. Otherwise, create a new one. - if existing_stream_id == Some(request_stream_id) { - // Add the request to the existing stream (the stream IDs match) - if let Some(existing_stream) = subscriptions.get_mut(&peer_network_id) { - if let Err((error, subscription_request)) = existing_stream - .add_subscription_request(storage_service_config, subscription_request) - { - // Something went wrong when adding the request to the stream - sample!( - SampleRate::Duration(Duration::from_secs(INVALID_REQUEST_LOG_FREQUENCY_SECS)), - warn!(LogSchema::new(LogEntry::SubscriptionRequest) - .error(&error) - .peer_network_id(&peer_network_id) - .request(&request) - ); + // Update the subscription metrics with the new request + update_new_subscription_metrics(peer_network_id); + + // Get the subscription stream entry for the peer. Internally, this will + // lock the entry, to prevent other requests (for the same peer) from + // modifying the subscription stream entry. + let subscription_stream_entry = self.subscriptions.entry(peer_network_id); + + // If the entry is empty, or the stream ID does not match the request ID, + // create a new subscription stream for the peer. Otherwise, add the + // request to the existing stream (the stream IDs match!). + match subscription_stream_entry { + Entry::Occupied(mut occupied_entry) => { + // If the stream has a different ID than the request, replace the stream. + // Otherwise, add the request to the existing stream. + let existing_stream_id = occupied_entry.get().subscription_stream_id(); + if existing_stream_id != request_stream_id { + // Create a new subscription stream for the peer + let subscription_stream = SubscriptionStreamRequests::new( + subscription_request, + self.time_service.clone(), ); + occupied_entry.replace_entry(subscription_stream); // Update the subscription metrics - increment_counter( - &metrics::SUBSCRIPTION_EVENTS, - peer_network_id.network_id(), - SUBSCRIPTION_FAILURE.into(), - ); - - // Notify the client of the failure - self.send_response( - request, - Err(StorageServiceError::InvalidRequest(error.to_string())), - subscription_request.take_response_sender(), - ); - return; + update_created_stream_metrics(&peer_network_id); + } else { + // Add the request to the existing stream + if let Err((error, subscription_request)) = occupied_entry + .get_mut() + .add_subscription_request(storage_service_config, subscription_request) + { + // Handle the subscription failure + self.handle_subscription_request_failure( + peer_network_id, + request, + error, + subscription_request, + ); + } } - } - } else { - // Create a new stream (either no stream exists, or we have a new stream ID) - let subscription_stream_requests = - SubscriptionStreamRequests::new(subscription_request, self.time_service.clone()); - subscriptions.insert(peer_network_id, subscription_stream_requests); + }, + Entry::Vacant(vacant_entry) => { + // Create a new subscription stream for the peer + let subscription_stream = SubscriptionStreamRequests::new( + subscription_request, + self.time_service.clone(), + ); + vacant_entry.insert(subscription_stream); + + // Update the subscription metrics + update_created_stream_metrics(&peer_network_id); + }, } + } + + /// Handles a subscription request failure by logging the error, + /// updating the subscription metrics, and notifying the client. + fn handle_subscription_request_failure( + &self, + peer_network_id: PeerNetworkId, + request: StorageServiceRequest, + error: Error, + subscription_request: SubscriptionRequest, + ) { + // Something went wrong when adding the request to the stream + sample!( + SampleRate::Duration(Duration::from_secs(INVALID_REQUEST_LOG_FREQUENCY_SECS)), + warn!(LogSchema::new(LogEntry::SubscriptionRequest) + .error(&error) + .peer_network_id(&peer_network_id) + .request(&request) + ); + ); // Update the subscription metrics - increment_counter( - &metrics::SUBSCRIPTION_EVENTS, - peer_network_id.network_id(), - SUBSCRIPTION_ADD.into(), + update_failed_subscription_metrics(peer_network_id); + + // Notify the client of the failure + self.send_response( + request, + Err(StorageServiceError::InvalidRequest(error.to_string())), + subscription_request.take_response_sender(), ); } @@ -477,6 +504,33 @@ impl Handler { } } +/// Updates the subscription metrics with a created subscription stream event +fn update_created_stream_metrics(peer_network_id: &PeerNetworkId) { + increment_counter( + &metrics::SUBSCRIPTION_EVENTS, + peer_network_id.network_id(), + SUBSCRIPTION_NEW_STREAM.into(), + ); +} + +/// Updates the subscription metrics with a failed stream request +fn update_failed_subscription_metrics(peer_network_id: PeerNetworkId) { + increment_counter( + &metrics::SUBSCRIPTION_EVENTS, + peer_network_id.network_id(), + SUBSCRIPTION_FAILURE.into(), + ); +} + +/// Updates the subscription metrics with a new stream request +fn update_new_subscription_metrics(peer_network_id: PeerNetworkId) { + increment_counter( + &metrics::SUBSCRIPTION_EVENTS, + peer_network_id.network_id(), + SUBSCRIPTION_ADD.into(), + ); +} + /// Logs the response sent by storage for a peer request fn log_storage_response( storage_request: StorageServiceRequest, diff --git a/state-sync/storage-service/server/src/lib.rs b/state-sync/storage-service/server/src/lib.rs index 0f6d4d225153e..ced37a863620f 100644 --- a/state-sync/storage-service/server/src/lib.rs +++ b/state-sync/storage-service/server/src/lib.rs @@ -32,7 +32,7 @@ use handler::Handler; use lru::LruCache; use moderator::RequestModerator; use optimistic_fetch::OptimisticFetchRequest; -use std::{collections::HashMap, ops::Deref, sync::Arc, time::Duration}; +use std::{ops::Deref, sync::Arc, time::Duration}; use storage::StorageReaderInterface; use thiserror::Error; use tokio::runtime::Handle; @@ -78,9 +78,8 @@ pub struct StorageServiceServer { // A set of active optimistic fetches for peers waiting for new data optimistic_fetches: Arc>, - // TODO: Reduce lock contention on the mutex. // A set of active subscriptions for peers waiting for new data - subscriptions: Arc>>, + subscriptions: Arc>, // A moderator for incoming peer requests request_moderator: Arc, @@ -114,7 +113,7 @@ impl StorageServiceServer { let lru_response_cache = Arc::new(Mutex::new(LruCache::new( storage_service_config.max_lru_cache_size as usize, ))); - let subscriptions = Arc::new(Mutex::new(HashMap::new())); + let subscriptions = Arc::new(DashMap::new()); let request_moderator = Arc::new(RequestModerator::new( aptos_data_client_config, cached_storage_server_summary.clone(), @@ -463,7 +462,7 @@ impl StorageServiceServer { /// Returns a copy of the active subscriptions for test purposes pub(crate) fn get_subscriptions( &self, - ) -> Arc>> { + ) -> Arc> { self.subscriptions.clone() } } @@ -478,7 +477,7 @@ async fn handle_active_optimistic_fetches( lru_response_cache: Arc>>, request_moderator: Arc, storage: T, - subscriptions: Arc>>, + subscriptions: Arc>, time_service: TimeService, ) { if let Err(error) = optimistic_fetch::handle_active_optimistic_fetches( @@ -510,7 +509,7 @@ async fn handle_active_subscriptions( lru_response_cache: Arc>>, request_moderator: Arc, storage: T, - subscriptions: Arc>>, + subscriptions: Arc>, time_service: TimeService, ) { if let Err(error) = subscription::handle_active_subscriptions( diff --git a/state-sync/storage-service/server/src/metrics.rs b/state-sync/storage-service/server/src/metrics.rs index 5fec3643d5989..e8e76b7729f14 100644 --- a/state-sync/storage-service/server/src/metrics.rs +++ b/state-sync/storage-service/server/src/metrics.rs @@ -17,6 +17,7 @@ pub const OPTIMISTIC_FETCH_EXPIRE: &str = "optimistic_fetch_expire"; pub const SUBSCRIPTION_ADD: &str = "subscription_add"; pub const SUBSCRIPTION_EXPIRE: &str = "subscription_expire"; pub const SUBSCRIPTION_FAILURE: &str = "subscription_failure"; +pub const SUBSCRIPTION_NEW_STREAM: &str = "subscription_new_stream"; /// Gauge for tracking the number of actively ignored peers pub static IGNORED_PEER_COUNT: Lazy = Lazy::new(|| { diff --git a/state-sync/storage-service/server/src/moderator.rs b/state-sync/storage-service/server/src/moderator.rs index bf94228018538..31c30c3bcae17 100644 --- a/state-sync/storage-service/server/src/moderator.rs +++ b/state-sync/storage-service/server/src/moderator.rs @@ -151,7 +151,11 @@ impl RequestModerator { let storage_server_summary = self.cached_storage_server_summary.load(); // Verify the request is serviceable using the current storage server summary - if !storage_server_summary.can_service(&self.aptos_data_client_config, request) { + if !storage_server_summary.can_service( + &self.aptos_data_client_config, + self.time_service.clone(), + request, + ) { // Increment the invalid request count for the peer let mut unhealthy_peer_states = self.unhealthy_peer_states.write(); let unhealthy_peer_state = unhealthy_peer_states diff --git a/state-sync/storage-service/server/src/optimistic_fetch.rs b/state-sync/storage-service/server/src/optimistic_fetch.rs index f02832b337a0f..7fcc34fa0f19b 100644 --- a/state-sync/storage-service/server/src/optimistic_fetch.rs +++ b/state-sync/storage-service/server/src/optimistic_fetch.rs @@ -182,7 +182,7 @@ pub(crate) async fn handle_active_optimistic_fetches( lru_response_cache: Arc>>, request_moderator: Arc, storage: T, - subscriptions: Arc>>, + subscriptions: Arc>, time_service: TimeService, ) -> Result<(), Error> { // Update the number of active optimistic fetches @@ -230,7 +230,7 @@ async fn handle_ready_optimistic_fetches( lru_response_cache: Arc>>, request_moderator: Arc, storage: T, - subscriptions: Arc>>, + subscriptions: Arc>, time_service: TimeService, peers_with_ready_optimistic_fetches: Vec<(PeerNetworkId, LedgerInfoWithSignatures)>, ) { @@ -314,7 +314,7 @@ pub(crate) async fn get_peers_with_ready_optimistic_fetches>>, request_moderator: Arc, storage: T, - subscriptions: Arc>>, + subscriptions: Arc>, time_service: TimeService, ) -> aptos_storage_service_types::Result, Error> { // Fetch the latest storage summary and highest synced version @@ -367,7 +367,7 @@ async fn identify_expired_invalid_and_ready_fetches( config: StorageServiceConfig, cached_storage_server_summary: Arc>, optimistic_fetches: Arc>, - subscriptions: Arc>>, + subscriptions: Arc>, lru_response_cache: Arc>>, request_moderator: Arc, storage: T, @@ -436,7 +436,7 @@ async fn identify_ready_and_invalid_optimistic_fetches>, optimistic_fetches: Arc>, - subscriptions: Arc>>, + subscriptions: Arc>, lru_response_cache: Arc>>, request_moderator: Arc, storage: T, @@ -566,13 +566,18 @@ fn remove_invalid_optimistic_fetches( peers_with_invalid_optimistic_fetches: Vec, ) { for peer_network_id in peers_with_invalid_optimistic_fetches { - if let Some((_, optimistic_fetch)) = optimistic_fetches.remove(&peer_network_id) { + if let Some((peer_network_id, optimistic_fetch)) = + optimistic_fetches.remove(&peer_network_id) + { warn!(LogSchema::new(LogEntry::OptimisticFetchRefresh) .error(&Error::InvalidRequest( "Mismatch between known version and epoch!".into() )) .request(&optimistic_fetch.request) - .message("Dropping invalid optimistic fetch request!")); + .message(&format!( + "Dropping invalid optimistic fetch request for peer: {:?}!", + peer_network_id + ))); } } } diff --git a/state-sync/storage-service/server/src/subscription.rs b/state-sync/storage-service/server/src/subscription.rs index 9bb26e8fe2bcb..018682dd71081 100644 --- a/state-sync/storage-service/server/src/subscription.rs +++ b/state-sync/storage-service/server/src/subscription.rs @@ -515,7 +515,7 @@ pub(crate) async fn handle_active_subscriptions( lru_response_cache: Arc>>, request_moderator: Arc, storage: T, - subscriptions: Arc>>, + subscriptions: Arc>, time_service: TimeService, ) -> Result<(), Error> { // Continuously handle the subscriptions until we identify that @@ -570,7 +570,7 @@ async fn handle_ready_subscriptions( lru_response_cache: Arc>>, request_moderator: Arc, storage: T, - subscriptions: Arc>>, + subscriptions: Arc>, time_service: TimeService, peers_with_ready_subscriptions: Vec<(PeerNetworkId, LedgerInfoWithSignatures)>, ) { @@ -579,14 +579,14 @@ async fn handle_ready_subscriptions( for (peer_network_id, target_ledger_info) in peers_with_ready_subscriptions { // Remove the subscription from the active subscription stream let subscription_request_and_known_version = - subscriptions.clone().lock().get_mut(&peer_network_id).map( - |subscription_stream_requests| { + subscriptions + .get_mut(&peer_network_id) + .map(|mut subscription_stream_requests| { ( subscription_stream_requests.pop_first_pending_request(), subscription_stream_requests.highest_known_version, ) - }, - ); + }); // Handle the subscription if let Some((Some(subscription_request), known_version)) = @@ -640,8 +640,8 @@ async fn handle_ready_subscriptions( ) { Ok(data_response) => { // Update the streams known version and epoch - if let Some(subscription_stream_requests) = - subscriptions.lock().get_mut(&peer_network_id) + if let Some(mut subscription_stream_requests) = + subscriptions.get_mut(&peer_network_id) { // Update the known version and epoch for the stream subscription_stream_requests @@ -692,7 +692,7 @@ pub(crate) async fn get_peers_with_ready_subscriptions>>, request_moderator: Arc, storage: T, - subscriptions: Arc>>, + subscriptions: Arc>, time_service: TimeService, ) -> aptos_storage_service_types::Result, Error> { // Fetch the latest storage summary and highest synced version @@ -742,7 +742,7 @@ async fn identify_expired_invalid_and_ready_subscriptions>, optimistic_fetches: Arc>, - subscriptions: Arc>>, + subscriptions: Arc>, lru_response_cache: Arc>>, request_moderator: Arc, storage: T, @@ -759,7 +759,11 @@ async fn identify_expired_invalid_and_ready_subscriptions( bounded_executor: BoundedExecutor, cached_storage_server_summary: Arc>, optimistic_fetches: Arc>, - subscriptions: Arc>>, + subscriptions: Arc>, lru_response_cache: Arc>>, request_moderator: Arc, storage: T, @@ -919,11 +923,11 @@ async fn identify_ready_and_invalid_subscriptions( /// Removes the expired subscription streams from the active map fn remove_expired_subscriptions( - subscriptions: Arc>>, + subscriptions: Arc>, peers_with_expired_subscriptions: Vec, ) { for peer_network_id in peers_with_expired_subscriptions { - if subscriptions.lock().remove(&peer_network_id).is_some() { + if subscriptions.remove(&peer_network_id).is_some() { increment_counter( &metrics::SUBSCRIPTION_EVENTS, peer_network_id.network_id(), @@ -935,18 +939,21 @@ fn remove_expired_subscriptions( /// Removes the invalid subscription streams from the active map fn remove_invalid_subscriptions( - subscriptions: Arc>>, + subscriptions: Arc>, peers_with_invalid_subscriptions: Vec, ) { for peer_network_id in peers_with_invalid_subscriptions { - if let Some(subscription_stream_requests) = subscriptions.lock().remove(&peer_network_id) { + if let Some((peer_network_id, subscription_stream_requests)) = + subscriptions.remove(&peer_network_id) + { warn!(LogSchema::new(LogEntry::SubscriptionRefresh) .error(&Error::InvalidRequest( "Mismatch between known version and epoch!".into() )) .message(&format!( - "Dropping invalid subscription stream with ID: {:?}!", - subscription_stream_requests.subscription_stream_id() + "Dropping invalid subscription stream with ID: {:?}, for peer: {:?}!", + subscription_stream_requests.subscription_stream_id(), + peer_network_id ))); } } @@ -954,15 +961,15 @@ fn remove_invalid_subscriptions( /// Updates the active subscription metrics for each network fn update_active_subscription_metrics( - subscriptions: Arc>>, + subscriptions: Arc>, ) { // Calculate the total number of subscriptions for each network let mut num_validator_subscriptions = 0; let mut num_vfn_subscriptions = 0; let mut num_public_subscriptions = 0; - for subscription_stream_requests in subscriptions.lock().iter() { + for subscription in subscriptions.iter() { // Get the peer network ID - let peer_network_id = subscription_stream_requests.0; + let peer_network_id = *subscription.key(); // Increment the number of subscriptions for the peer's network match peer_network_id.network_id() { diff --git a/state-sync/storage-service/server/src/tests/optimistic_fetch.rs b/state-sync/storage-service/server/src/tests/optimistic_fetch.rs index 7b51375045e8d..e6ec7b20a8b59 100644 --- a/state-sync/storage-service/server/src/tests/optimistic_fetch.rs +++ b/state-sync/storage-service/server/src/tests/optimistic_fetch.rs @@ -30,7 +30,7 @@ use dashmap::DashMap; use futures::channel::oneshot; use lru::LruCache; use rand::{rngs::OsRng, Rng}; -use std::{collections::HashMap, sync::Arc}; +use std::sync::Arc; use tokio::runtime::Handle; #[tokio::test] @@ -78,7 +78,7 @@ async fn test_peers_with_ready_optimistic_fetches() { storage_service_config, time_service.clone(), )); - let subscriptions = Arc::new(Mutex::new(HashMap::new())); + let subscriptions = Arc::new(DashMap::new()); // Verify that there are no peers with ready optimistic fetches let peers_with_ready_optimistic_fetches = @@ -175,7 +175,7 @@ async fn test_remove_expired_optimistic_fetches() { storage_service_config, time_service.clone(), )); - let subscriptions = Arc::new(Mutex::new(HashMap::new())); + let subscriptions = Arc::new(DashMap::new()); // Create the first batch of test optimistic fetches let num_optimistic_fetches_in_batch = 10; diff --git a/state-sync/storage-service/server/src/tests/state_values.rs b/state-sync/storage-service/server/src/tests/state_values.rs index 2f72ab2b5e763..237b889095b89 100644 --- a/state-sync/storage-service/server/src/tests/state_values.rs +++ b/state-sync/storage-service/server/src/tests/state_values.rs @@ -20,6 +20,7 @@ use aptos_types::{ state_value::{StateValue, StateValueChunkWithProof}, }, }; +use bytes::Bytes; use claims::assert_matches; use mockall::{predicate::eq, Sequence}; use rand::Rng; @@ -182,9 +183,10 @@ fn create_state_keys_and_values( ) -> Vec<(StateKey, StateValue)> { // Generate random bytes of the given size let mut rng = rand::thread_rng(); - let random_bytes: Vec = (0..min_bytes_per_key_value) + let random_bytes: Bytes = (0..min_bytes_per_key_value) .map(|_| rng.gen::()) - .collect(); + .collect::>() + .into(); // Create the requested keys and values (0..num_keys_and_values) diff --git a/state-sync/storage-service/server/src/tests/subscription.rs b/state-sync/storage-service/server/src/tests/subscription.rs index 1ccb793211323..e34276932dc0f 100644 --- a/state-sync/storage-service/server/src/tests/subscription.rs +++ b/state-sync/storage-service/server/src/tests/subscription.rs @@ -32,14 +32,14 @@ use claims::assert_matches; use dashmap::DashMap; use futures::channel::oneshot; use lru::LruCache; -use std::{collections::HashMap, sync::Arc}; +use std::sync::Arc; use tokio::runtime::Handle; #[tokio::test] async fn test_peers_with_ready_subscriptions() { // Create a mock time service and subscriptions map let time_service = TimeService::mock(); - let subscriptions = Arc::new(Mutex::new(HashMap::new())); + let subscriptions = Arc::new(DashMap::new()); // Create three peers with ready subscriptions let mut peer_network_ids = vec![]; @@ -56,9 +56,7 @@ async fn test_peers_with_ready_subscriptions() { Some(0), Some(0), ); - subscriptions - .lock() - .insert(peer_network_id, subscription_stream_requests); + subscriptions.insert(peer_network_id, subscription_stream_requests); } // Create epoch ending test data at version 9 @@ -130,7 +128,7 @@ async fn test_peers_with_ready_subscriptions() { )]); // Manually remove subscription 1 from the map - subscriptions.lock().remove(&peer_network_ids[0]); + subscriptions.remove(&peer_network_ids[0]); // Update the storage server summary so that there is new data (at version 8) let highest_synced_ledger_info = @@ -156,7 +154,7 @@ async fn test_peers_with_ready_subscriptions() { )]); // Manually remove subscription 2 from the map - subscriptions.lock().remove(&peer_network_ids[1]); + subscriptions.remove(&peer_network_ids[1]); // Update the storage server summary so that there is new data (at version 100) let _ = utils::update_storage_summary_cache(cached_storage_server_summary.clone(), 100, 2); @@ -179,7 +177,7 @@ async fn test_peers_with_ready_subscriptions() { assert_eq!(peers_with_ready_subscriptions, vec![]); // Verify that the subscriptions are now empty - assert!(subscriptions.lock().is_empty()); + assert!(subscriptions.is_empty()); } #[tokio::test] @@ -212,17 +210,15 @@ async fn test_remove_expired_subscriptions_no_new_data() { // Create the first batch of test subscriptions let num_subscriptions_in_batch = 10; - let subscriptions = Arc::new(Mutex::new(HashMap::new())); + let subscriptions = Arc::new(DashMap::new()); for _ in 0..num_subscriptions_in_batch { let subscription_stream_requests = create_subscription_stream_requests(time_service.clone(), Some(9), Some(9), None, None); - subscriptions - .lock() - .insert(PeerNetworkId::random(), subscription_stream_requests); + subscriptions.insert(PeerNetworkId::random(), subscription_stream_requests); } // Verify the number of active subscriptions - assert_eq!(subscriptions.lock().len(), num_subscriptions_in_batch); + assert_eq!(subscriptions.len(), num_subscriptions_in_batch); // Elapse a small amount of time (not enough to expire the subscriptions) utils::elapse_time(max_subscription_period_ms / 2, &time_service).await; @@ -245,19 +241,17 @@ async fn test_remove_expired_subscriptions_no_new_data() { .await .unwrap(); assert!(peers_with_ready_subscriptions.is_empty()); - assert_eq!(subscriptions.lock().len(), num_subscriptions_in_batch); + assert_eq!(subscriptions.len(), num_subscriptions_in_batch); // Create another batch of test subscriptions for _ in 0..num_subscriptions_in_batch { let subscription_stream_requests = create_subscription_stream_requests(time_service.clone(), Some(9), Some(9), None, None); - subscriptions - .lock() - .insert(PeerNetworkId::random(), subscription_stream_requests); + subscriptions.insert(PeerNetworkId::random(), subscription_stream_requests); } // Verify the new number of active subscriptions - assert_eq!(subscriptions.lock().len(), num_subscriptions_in_batch * 2); + assert_eq!(subscriptions.len(), num_subscriptions_in_batch * 2); // Elapse enough time to expire the first batch of subscriptions utils::elapse_time(max_subscription_period_ms, &time_service).await; @@ -277,7 +271,7 @@ async fn test_remove_expired_subscriptions_no_new_data() { .await .unwrap(); assert!(peers_with_ready_subscriptions.is_empty()); - assert_eq!(subscriptions.lock().len(), num_subscriptions_in_batch); + assert_eq!(subscriptions.len(), num_subscriptions_in_batch); // Elapse enough time to expire the second batch of subscriptions utils::elapse_time(max_subscription_period_ms, &time_service).await; @@ -297,7 +291,7 @@ async fn test_remove_expired_subscriptions_no_new_data() { .await .unwrap(); assert!(peers_with_ready_subscriptions.is_empty()); - assert!(subscriptions.lock().is_empty()); + assert!(subscriptions.is_empty()); } #[tokio::test] @@ -314,7 +308,7 @@ async fn test_remove_expired_subscriptions_blocked_stream() { // Create a batch of test subscriptions let num_subscriptions_in_batch = 10; - let subscriptions = Arc::new(Mutex::new(HashMap::new())); + let subscriptions = Arc::new(DashMap::new()); let mut peer_network_ids = vec![]; for i in 0..num_subscriptions_in_batch { // Create a new peer @@ -329,9 +323,7 @@ async fn test_remove_expired_subscriptions_blocked_stream() { Some(i as u64), Some(0), ); - subscriptions - .lock() - .insert(peer_network_id, subscription_stream_requests); + subscriptions.insert(peer_network_id, subscription_stream_requests); } // Create test data with an empty storage server summary @@ -372,11 +364,9 @@ async fn test_remove_expired_subscriptions_blocked_stream() { // Verify that all subscription streams are now empty because // the pending requests were sent. - assert_eq!(subscriptions.lock().len(), num_subscriptions_in_batch); - for (_, subscription_stream_requests) in subscriptions.lock().iter() { - assert!(subscription_stream_requests - .first_pending_request() - .is_none()); + assert_eq!(subscriptions.len(), num_subscriptions_in_batch); + for subscription in subscriptions.iter() { + assert!(subscription.value().first_pending_request().is_none()); } // Elapse enough time to expire the blocked streams @@ -407,8 +397,8 @@ async fn test_remove_expired_subscriptions_blocked_stream() { .await .unwrap(); assert!(peers_with_ready_subscriptions.is_empty()); - assert_eq!(subscriptions.lock().len(), 1); - assert!(subscriptions.lock().contains_key(&peer_network_ids[0])); + assert_eq!(subscriptions.len(), 1); + assert!(subscriptions.contains_key(&peer_network_ids[0])); } #[tokio::test] @@ -425,7 +415,7 @@ async fn test_remove_expired_subscriptions_blocked_stream_index() { // Create the first batch of test subscriptions let num_subscriptions_in_batch = 10; - let subscriptions = Arc::new(Mutex::new(HashMap::new())); + let subscriptions = Arc::new(DashMap::new()); for _ in 0..num_subscriptions_in_batch { let subscription_stream_requests = create_subscription_stream_requests( time_service.clone(), @@ -434,9 +424,7 @@ async fn test_remove_expired_subscriptions_blocked_stream_index() { None, Some(0), ); - subscriptions - .lock() - .insert(PeerNetworkId::random(), subscription_stream_requests); + subscriptions.insert(PeerNetworkId::random(), subscription_stream_requests); } // Create test data with an empty storage server summary @@ -498,7 +486,7 @@ async fn test_remove_expired_subscriptions_blocked_stream_index() { .await .unwrap(); assert!(peers_with_ready_subscriptions.is_empty()); - assert!(subscriptions.lock().is_empty()); + assert!(subscriptions.is_empty()); // Create another batch of test subscriptions (where the stream is // blocked on the next index to serve). @@ -516,13 +504,11 @@ async fn test_remove_expired_subscriptions_blocked_stream_index() { None, Some(i as u64 + 1), ); - subscriptions - .lock() - .insert(peer_network_id, subscription_stream_requests); + subscriptions.insert(peer_network_id, subscription_stream_requests); } // Verify the number of active subscriptions - assert_eq!(subscriptions.lock().len(), num_subscriptions_in_batch); + assert_eq!(subscriptions.len(), num_subscriptions_in_batch); // Verify that none of the subscriptions are ready to be served (they are blocked) let peers_with_ready_subscriptions = subscription::get_peers_with_ready_subscriptions( @@ -586,8 +572,8 @@ async fn test_remove_expired_subscriptions_blocked_stream_index() { ) .await .unwrap(); - assert_eq!(subscriptions.lock().len(), 1); - assert!(subscriptions.lock().contains_key(&peer_network_ids[0])); + assert_eq!(subscriptions.len(), 1); + assert!(subscriptions.contains_key(&peer_network_ids[0])); } #[tokio::test(flavor = "multi_thread")] @@ -596,7 +582,7 @@ async fn test_subscription_invalid_requests() { let time_service = TimeService::mock(); // Create a new batch of subscriptions that includes a single stream and request - let subscriptions = Arc::new(Mutex::new(HashMap::new())); + let subscriptions = Arc::new(DashMap::new()); let peer_network_id = PeerNetworkId::random(); let peer_known_version = 10; let peer_known_epoch = 1; @@ -608,9 +594,7 @@ async fn test_subscription_invalid_requests() { Some(subscription_stream_id), Some(0), ); - subscriptions - .lock() - .insert(peer_network_id, subscription_stream_requests); + subscriptions.insert(peer_network_id, subscription_stream_requests); // Add a request to the stream that is invalid (the stream id is incorrect) let subscription_request = create_subscription_request( @@ -662,10 +646,10 @@ async fn test_subscription_invalid_requests() { // Update the next index to serve for the stream let next_index_to_serve = 10; - let mut subscriptions_lock = subscriptions.lock(); - let subscription_stream_requests = subscriptions_lock.get_mut(&peer_network_id).unwrap(); + let mut subscription = subscriptions.get_mut(&peer_network_id).unwrap(); + let subscription_stream_requests = subscription.value_mut(); subscription_stream_requests.set_next_index_to_serve(next_index_to_serve); - drop(subscriptions_lock); + drop(subscription); // Add a request to the stream that is invalid (the stream index is less than the next index to serve) let subscription_request = create_subscription_request( @@ -993,11 +977,11 @@ async fn test_subscription_overwrite_streams() { /// Adds a subscription request to the subscription stream for the given peer fn add_subscription_request_to_stream( subscription_request: SubscriptionRequest, - subscriptions: Arc>>, + subscriptions: Arc>, peer_network_id: &PeerNetworkId, ) -> Result<(), (Error, SubscriptionRequest)> { - let mut subscriptions = subscriptions.lock(); - let subscription_stream_requests = subscriptions.get_mut(peer_network_id).unwrap(); + let mut subscription = subscriptions.get_mut(peer_network_id).unwrap(); + let subscription_stream_requests = subscription.value_mut(); subscription_stream_requests .add_subscription_request(StorageServiceConfig::default(), subscription_request) } @@ -1101,15 +1085,15 @@ fn create_subscription_stream_requests( /// Verifies that the pending subscription request indices are valid. /// Note the expected end indices are exclusive. fn verify_pending_subscription_request_indices( - active_subscriptions: Arc>>, + active_subscriptions: Arc>, peer_network_id: PeerNetworkId, expected_start_index: u64, expected_end_index: u64, ignored_end_index: u64, ) { // Get the pending subscription requests - let mut active_subscriptions = active_subscriptions.lock(); - let subscription_stream_requests = active_subscriptions.get_mut(&peer_network_id).unwrap(); + let mut subscription = active_subscriptions.get_mut(&peer_network_id).unwrap(); + let subscription_stream_requests = subscription.value_mut(); let pending_subscription_requests = subscription_stream_requests.get_pending_subscription_requests(); diff --git a/state-sync/storage-service/server/src/tests/utils.rs b/state-sync/storage-service/server/src/tests/utils.rs index 946e4dc438624..8884d45ff7713 100644 --- a/state-sync/storage-service/server/src/tests/utils.rs +++ b/state-sync/storage-service/server/src/tests/utils.rs @@ -13,7 +13,6 @@ use aptos_config::{ network_id::{NetworkId, PeerNetworkId}, }; use aptos_crypto::{ed25519::Ed25519PrivateKey, HashValue, PrivateKey, SigningKey, Uniform}; -use aptos_infallible::Mutex; use aptos_logger::Level; use aptos_network::protocols::network::RpcError; use aptos_storage_service_notifications::{ @@ -776,13 +775,13 @@ pub fn update_storage_summary_cache( /// Verifies that the peer has an active subscription stream /// and that the stream has the appropriate ID. pub fn verify_active_stream_id_for_peer( - active_subscriptions: Arc>>, + active_subscriptions: Arc>, peer_network_id: PeerNetworkId, new_stream_id: u64, ) { // Get the subscription stream requests for the peer - let mut active_subscriptions = active_subscriptions.lock(); - let subscription_stream_requests = active_subscriptions.get_mut(&peer_network_id).unwrap(); + let subscription = active_subscriptions.get(&peer_network_id).unwrap(); + let subscription_stream_requests = subscription.value(); // Verify the stream ID is correct assert_eq!( @@ -910,7 +909,7 @@ pub async fn verify_output_subscription_response( /// Verifies the state of an active subscription stream entry. /// This is useful for manually testing internal logic. pub fn verify_subscription_stream_entry( - active_subscriptions: Arc>>, + active_subscriptions: Arc>, peer_network_id: PeerNetworkId, num_requests_per_batch: u64, peer_known_version: u64, @@ -918,8 +917,8 @@ pub fn verify_subscription_stream_entry( max_transaction_output_chunk_size: u64, ) { // Get the subscription stream for the specified peer - let mut active_subscriptions = active_subscriptions.lock(); - let subscription_stream_requests = active_subscriptions.get_mut(&peer_network_id).unwrap(); + let mut subscription = active_subscriptions.get_mut(&peer_network_id).unwrap(); + let subscription_stream_requests = subscription.value_mut(); // Get the next index to serve on the stream let next_index_to_serve = subscription_stream_requests.get_next_index_to_serve(); @@ -975,7 +974,7 @@ pub async fn wait_for_active_optimistic_fetches( /// Waits for the specified number of active stream requests for /// the given peer ID. pub async fn wait_for_active_stream_requests( - active_subscriptions: Arc>>, + active_subscriptions: Arc>, peer_network_id: PeerNetworkId, expected_num_active_stream_requests: usize, ) { @@ -983,10 +982,9 @@ pub async fn wait_for_active_stream_requests( let check_active_stream_requests = async move { loop { // Check if the number of active stream requests matches - if let Some(subscription_stream_requests) = - active_subscriptions.lock().get_mut(&peer_network_id) - { - let num_active_stream_requests = subscription_stream_requests + if let Some(mut subscription) = active_subscriptions.get_mut(&peer_network_id) { + let num_active_stream_requests = subscription + .value_mut() .get_pending_subscription_requests() .len(); if num_active_stream_requests == expected_num_active_stream_requests { @@ -1012,15 +1010,14 @@ pub async fn wait_for_active_stream_requests( /// Waits for the specified number of subscriptions to be active pub async fn wait_for_active_subscriptions( - active_subscriptions: Arc>>, + active_subscriptions: Arc>, expected_num_active_subscriptions: usize, ) { // Wait for the specified number of active subscriptions let check_active_subscriptions = async move { loop { // Check if the number of active subscriptions matches - let num_active_subscriptions = active_subscriptions.lock().len(); - if num_active_subscriptions == expected_num_active_subscriptions { + if active_subscriptions.len() == expected_num_active_subscriptions { return; // We found the expected number of active subscriptions } diff --git a/state-sync/storage-service/server/src/utils.rs b/state-sync/storage-service/server/src/utils.rs index 34342ff5508c5..d0e02672fc60c 100644 --- a/state-sync/storage-service/server/src/utils.rs +++ b/state-sync/storage-service/server/src/utils.rs @@ -17,13 +17,13 @@ use aptos_types::ledger_info::LedgerInfoWithSignatures; use arc_swap::ArcSwap; use dashmap::DashMap; use lru::LruCache; -use std::{collections::HashMap, sync::Arc}; +use std::sync::Arc; /// Gets the epoch ending ledger info at the given epoch pub fn get_epoch_ending_ledger_info( cached_storage_server_summary: Arc>, optimistic_fetches: Arc>, - subscriptions: Arc>>, + subscriptions: Arc>, epoch: u64, lru_response_cache: Arc>>, request_moderator: Arc, @@ -85,7 +85,7 @@ pub fn get_epoch_ending_ledger_info( pub fn notify_peer_of_new_data( cached_storage_server_summary: Arc>, optimistic_fetches: Arc>, - subscriptions: Arc>>, + subscriptions: Arc>, lru_response_cache: Arc>>, request_moderator: Arc, storage: T, diff --git a/state-sync/storage-service/types/Cargo.toml b/state-sync/storage-service/types/Cargo.toml index 73e0e3e18fe69..6509f1835b143 100644 --- a/state-sync/storage-service/types/Cargo.toml +++ b/state-sync/storage-service/types/Cargo.toml @@ -16,6 +16,7 @@ rust-version = { workspace = true } aptos-compression = { workspace = true } aptos-config = { workspace = true } aptos-crypto = { workspace = true } +aptos-time-service = { workspace = true } aptos-types = { workspace = true } bcs = { workspace = true } num-traits = { workspace = true } @@ -23,6 +24,7 @@ serde = { workspace = true } thiserror = { workspace = true } [dev-dependencies] +aptos-time-service = { workspace = true, features = ["testing"] } claims = { workspace = true } proptest = { workspace = true } rand = { workspace = true } diff --git a/state-sync/storage-service/types/src/responses.rs b/state-sync/storage-service/types/src/responses.rs index 0aa4f1aaa2a6d..c7aedc9b4f88b 100644 --- a/state-sync/storage-service/types/src/responses.rs +++ b/state-sync/storage-service/types/src/responses.rs @@ -17,6 +17,7 @@ use aptos_compression::{metrics::CompressionClient, CompressedData, CompressionE use aptos_config::config::{ AptosDataClientConfig, StorageServiceConfig, MAX_APPLICATION_MESSAGE_SIZE, }; +use aptos_time_service::{TimeService, TimeServiceTrait}; use aptos_types::{ epoch_change::EpochChangeProof, ledger_info::LedgerInfoWithSignatures, @@ -33,6 +34,9 @@ use std::{ }; use thiserror::Error; +// Useful file constants +pub const NUM_MICROSECONDS_IN_SECOND: u64 = 1_000_000; + #[derive(Clone, Debug, Deserialize, Error, PartialEq, Eq, Serialize)] pub enum Error { #[error("Data range cannot be degenerate!")] @@ -356,16 +360,19 @@ pub struct StorageServerSummary { pub data_summary: DataSummary, } +// TODO: it probably makes sense to move this logic to the data client, +// instead of having it attached to the storage server summary. impl StorageServerSummary { pub fn can_service( &self, aptos_data_client_config: &AptosDataClientConfig, + time_service: TimeService, request: &StorageServiceRequest, ) -> bool { self.protocol_metadata.can_service(request) && self .data_summary - .can_service(aptos_data_client_config, request) + .can_service(aptos_data_client_config, time_service, request) } } @@ -428,6 +435,7 @@ impl DataSummary { pub fn can_service( &self, aptos_data_client_config: &AptosDataClientConfig, + time_service: TimeService, request: &StorageServiceRequest, ) -> bool { match &request.data_request { @@ -442,12 +450,21 @@ impl DataSummary { .map(|range| range.superset_of(&desired_range)) .unwrap_or(false) }, - GetNewTransactionOutputsWithProof(request) => { - self.can_service_optimistic_request(aptos_data_client_config, request.known_version) - }, - GetNewTransactionsWithProof(request) => { - self.can_service_optimistic_request(aptos_data_client_config, request.known_version) - }, + GetNewTransactionOutputsWithProof(_) => can_service_optimistic_request( + aptos_data_client_config, + time_service, + self.synced_ledger_info.as_ref(), + ), + GetNewTransactionsWithProof(_) => can_service_optimistic_request( + aptos_data_client_config, + time_service, + self.synced_ledger_info.as_ref(), + ), + GetNewTransactionsOrOutputsWithProof(_) => can_service_optimistic_request( + aptos_data_client_config, + time_service, + self.synced_ledger_info.as_ref(), + ), GetNumberOfStatesAtVersion(version) => self .states .map(|range| range.contains(*version)) @@ -508,9 +525,6 @@ impl DataSummary { can_serve_txns && can_create_proof }, - GetNewTransactionsOrOutputsWithProof(request) => { - self.can_service_optimistic_request(aptos_data_client_config, request.known_version) - }, GetTransactionsOrOutputsWithProof(request) => { let desired_range = match CompleteDataRange::new(request.start_version, request.end_version) { @@ -536,55 +550,24 @@ impl DataSummary { can_serve_txns && can_serve_outputs && can_create_proof }, - SubscribeTransactionOutputsWithProof(request) => { - let known_version = request - .subscription_stream_metadata - .known_version_at_stream_start; - self.can_service_subscription_request(aptos_data_client_config, known_version) - }, - SubscribeTransactionsOrOutputsWithProof(request) => { - let known_version = request - .subscription_stream_metadata - .known_version_at_stream_start; - self.can_service_subscription_request(aptos_data_client_config, known_version) - }, - SubscribeTransactionsWithProof(request) => { - let known_version = request - .subscription_stream_metadata - .known_version_at_stream_start; - self.can_service_subscription_request(aptos_data_client_config, known_version) - }, + SubscribeTransactionOutputsWithProof(_) => can_service_subscription_request( + aptos_data_client_config, + time_service, + self.synced_ledger_info.as_ref(), + ), + SubscribeTransactionsOrOutputsWithProof(_) => can_service_subscription_request( + aptos_data_client_config, + time_service, + self.synced_ledger_info.as_ref(), + ), + SubscribeTransactionsWithProof(_) => can_service_subscription_request( + aptos_data_client_config, + time_service, + self.synced_ledger_info.as_ref(), + ), } } - /// Returns true iff the optimistic data request can be serviced - fn can_service_optimistic_request( - &self, - aptos_data_client_config: &AptosDataClientConfig, - known_version: u64, - ) -> bool { - let max_version_lag = aptos_data_client_config.max_optimistic_fetch_version_lag; - self.check_synced_version_lag(known_version, max_version_lag) - } - - /// Returns true iff the subscription data request can be serviced - fn can_service_subscription_request( - &self, - aptos_data_client_config: &AptosDataClientConfig, - known_version: u64, - ) -> bool { - let max_version_lag = aptos_data_client_config.max_subscription_version_lag; - self.check_synced_version_lag(known_version, max_version_lag) - } - - /// Returns true iff the synced version is within the given lag range - fn check_synced_version_lag(&self, known_version: u64, max_version_lag: u64) -> bool { - self.synced_ledger_info - .as_ref() - .map(|li| (li.ledger_info().version() + max_version_lag) > known_version) - .unwrap_or(false) - } - /// Returns the version of the synced ledger info (if one exists) pub fn get_synced_ledger_info_version(&self) -> Option { self.synced_ledger_info @@ -593,6 +576,50 @@ impl DataSummary { } } +/// Returns true iff an optimistic data request can be serviced +/// by the peer with the given synced ledger info. +fn can_service_optimistic_request( + aptos_data_client_config: &AptosDataClientConfig, + time_service: TimeService, + synced_ledger_info: Option<&LedgerInfoWithSignatures>, +) -> bool { + let max_lag_secs = aptos_data_client_config.max_optimistic_fetch_lag_secs; + check_synced_ledger_lag(synced_ledger_info, time_service, max_lag_secs) +} + +/// Returns true iff a subscription data request can be serviced +/// by the peer with the given synced ledger info. +fn can_service_subscription_request( + aptos_data_client_config: &AptosDataClientConfig, + time_service: TimeService, + synced_ledger_info: Option<&LedgerInfoWithSignatures>, +) -> bool { + let max_lag_secs = aptos_data_client_config.max_subscription_lag_secs; + check_synced_ledger_lag(synced_ledger_info, time_service, max_lag_secs) +} + +/// Returns true iff the synced ledger info timestamp +/// is within the given lag (in seconds). +fn check_synced_ledger_lag( + synced_ledger_info: Option<&LedgerInfoWithSignatures>, + time_service: TimeService, + max_lag_secs: u64, +) -> bool { + if let Some(synced_ledger_info) = synced_ledger_info { + // Get the ledger info timestamp (in microseconds) + let ledger_info_timestamp_usecs = synced_ledger_info.ledger_info().timestamp_usecs(); + + // Get the current timestamp and max version lag (in microseconds) + let current_timestamp_usecs = time_service.now_unix_time().as_micros() as u64; + let max_version_lag_usecs = max_lag_secs * NUM_MICROSECONDS_IN_SECOND; + + // Return true iff the synced ledger info timestamp is within the max version lag + ledger_info_timestamp_usecs + max_version_lag_usecs > current_timestamp_usecs + } else { + false // No synced ledger info was found! + } +} + /// A struct representing a contiguous, non-empty data range (lowest to highest, /// inclusive) where data is complete (i.e. there are no missing pieces of data). /// diff --git a/state-sync/storage-service/types/src/tests.rs b/state-sync/storage-service/types/src/tests.rs index 3421789713d9e..7c9090faabcc6 100644 --- a/state-sync/storage-service/types/src/tests.rs +++ b/state-sync/storage-service/types/src/tests.rs @@ -15,6 +15,7 @@ use crate::{ }; use aptos_config::config::AptosDataClientConfig; use aptos_crypto::hash::HashValue; +use aptos_time_service::{TimeService, TimeServiceTrait}; use aptos_types::{ aggregate_signature::AggregateSignature, block_info::BlockInfo, @@ -91,44 +92,59 @@ fn test_data_summary_service_epoch_ending_ledger_infos() { #[test] fn test_data_summary_service_optimistic_fetch() { // Create a data client config with the specified max optimistic fetch lag - let max_optimistic_fetch_version_lag = 1000; + let max_optimistic_fetch_lag_secs = 50; let data_client_config = AptosDataClientConfig { - max_optimistic_fetch_version_lag, + max_optimistic_fetch_lag_secs, ..Default::default() }; - // Create a data summary with the specified synced ledger info version - let highest_synced_version = 50_000; + // Create a mock time service and get the current timestamp + let time_service = TimeService::mock(); + let timestamp_usecs = time_service.now_unix_time().as_micros() as u64; + + // Create a data summary with the specified synced ledger info + let highest_synced_version = 10_000; let data_summary = DataSummary { - synced_ledger_info: Some(create_ledger_info_at_version(highest_synced_version)), + synced_ledger_info: Some(create_ledger_info_at_version_and_timestamp( + highest_synced_version, + timestamp_usecs, + )), ..Default::default() }; - // Verify the different requests that can be serviced + // Elapse the time service by half the max optimistic fetch lag + time_service + .clone() + .into_mock() + .advance_secs(max_optimistic_fetch_lag_secs / 2); + + // Verify that optimistic fetch requests can be serviced for compression in [true, false] { - // Test the known versions that are within the optimistic fetch lag - let known_versions = vec![ - highest_synced_version, - highest_synced_version + (max_optimistic_fetch_version_lag / 2), - highest_synced_version + max_optimistic_fetch_version_lag - 1, - ]; + let known_versions = vec![0, 1, highest_synced_version, highest_synced_version * 2]; verify_can_service_optimistic_fetch_requests( &data_client_config, &data_summary, + time_service.clone(), compression, known_versions, true, ); + } - // Test the known versions that are outside the optimistic fetch lag - let known_versions = vec![ - highest_synced_version + max_optimistic_fetch_version_lag, - highest_synced_version + max_optimistic_fetch_version_lag + 1, - highest_synced_version + (max_optimistic_fetch_version_lag * 2), - ]; + // Elapse the time service by the max optimistic fetch lag + time_service + .clone() + .into_mock() + .advance_secs(max_optimistic_fetch_lag_secs); + + // Verify that optimistic fetch requests can no longer be serviced + // (as the max lag has been exceeded for the given data summary). + for compression in [true, false] { + let known_versions = vec![0, 1, highest_synced_version, highest_synced_version * 2]; verify_can_service_optimistic_fetch_requests( &data_client_config, &data_summary, + time_service.clone(), compression, known_versions, false, @@ -139,44 +155,59 @@ fn test_data_summary_service_optimistic_fetch() { #[test] fn test_data_summary_service_subscription() { // Create a data client config with the specified max subscription lag - let max_subscription_version_lag = 1000; + let max_subscription_lag_secs = 100; let data_client_config = AptosDataClientConfig { - max_subscription_version_lag, + max_subscription_lag_secs, ..Default::default() }; - // Create a data summary with the specified synced ledger info version + // Create a mock time service and get the current timestamp + let time_service = TimeService::mock(); + let timestamp_usecs = time_service.now_unix_time().as_micros() as u64; + + // Create a data summary with the specified synced ledger info let highest_synced_version = 50_000; let data_summary = DataSummary { - synced_ledger_info: Some(create_ledger_info_at_version(highest_synced_version)), + synced_ledger_info: Some(create_ledger_info_at_version_and_timestamp( + highest_synced_version, + timestamp_usecs, + )), ..Default::default() }; - // Verify the different requests that can be serviced + // Elapse the time service by half the max subscription lag + time_service + .clone() + .into_mock() + .advance_secs(max_subscription_lag_secs / 2); + + // Verify that subscription requests can be serviced for compression in [true, false] { - // Test the known versions that are within the subscription lag - let known_versions = vec![ - highest_synced_version, - highest_synced_version + (max_subscription_version_lag / 2), - highest_synced_version + max_subscription_version_lag - 1, - ]; + let known_versions = vec![0, 1, highest_synced_version, highest_synced_version * 2]; verify_can_service_subscription_requests( &data_client_config, &data_summary, + time_service.clone(), compression, known_versions, true, ); + } - // Test the known versions that are outside the subscription lag - let known_versions = vec![ - highest_synced_version + max_subscription_version_lag, - highest_synced_version + max_subscription_version_lag + 1, - highest_synced_version + (max_subscription_version_lag * 2), - ]; + // Elapse the time service by the max subscription lag + time_service + .clone() + .into_mock() + .advance_secs(max_subscription_lag_secs); + + // Verify that subscription requests can no longer be serviced + // (as the max lag has been exceeded for the given data summary). + for compression in [true, false] { + let known_versions = vec![0, 1, highest_synced_version, highest_synced_version * 2]; verify_can_service_subscription_requests( &data_client_config, &data_summary, + time_service.clone(), compression, known_versions, false, @@ -490,9 +521,25 @@ fn create_epoch_ending_request( /// Creates a new ledger info at the given version fn create_ledger_info_at_version(version: Version) -> LedgerInfoWithSignatures { + create_ledger_info_at_version_and_timestamp(version, 0) +} + +/// Creates a new ledger info at the given version and timestamp +fn create_ledger_info_at_version_and_timestamp( + version: Version, + timestamp_usecs: u64, +) -> LedgerInfoWithSignatures { LedgerInfoWithSignatures::new( LedgerInfo::new( - BlockInfo::new(0, 0, HashValue::zero(), HashValue::zero(), version, 0, None), + BlockInfo::new( + 0, + 0, + HashValue::zero(), + HashValue::zero(), + version, + timestamp_usecs, + None, + ), HashValue::zero(), ), AggregateSignature::empty(), @@ -664,7 +711,13 @@ fn verify_can_service_epoch_ending_requests( let request = create_epoch_ending_request(start_epoch, end_epoch, compression); // Verify the serviceability of the request - verify_serviceability(data_client_config, data_summary, request, expect_service); + verify_serviceability( + data_client_config, + data_summary, + None, + request, + expect_service, + ); } } @@ -674,6 +727,7 @@ fn verify_can_service_epoch_ending_requests( fn verify_can_service_optimistic_fetch_requests( data_client_config: &AptosDataClientConfig, data_summary: &DataSummary, + time_service: TimeService, compression: bool, known_versions: Vec, expect_service: bool, @@ -683,7 +737,13 @@ fn verify_can_service_optimistic_fetch_requests( let request = create_optimistic_fetch_request(known_version, compression); // Verify the serviceability of the request - verify_serviceability(data_client_config, data_summary, request, expect_service); + verify_serviceability( + data_client_config, + data_summary, + Some(time_service.clone()), + request, + expect_service, + ); } } @@ -702,7 +762,13 @@ fn verify_can_service_state_chunk_requests( let request = create_state_values_request_at_version(version, use_compression); // Verify the serviceability of the request - verify_serviceability(data_client_config, data_summary, request, expect_service); + verify_serviceability( + data_client_config, + data_summary, + None, + request, + expect_service, + ); } } @@ -712,6 +778,7 @@ fn verify_can_service_state_chunk_requests( fn verify_can_service_subscription_requests( data_client_config: &AptosDataClientConfig, data_summary: &DataSummary, + time_service: TimeService, compression: bool, known_versions: Vec, expect_service: bool, @@ -721,7 +788,13 @@ fn verify_can_service_subscription_requests( let request = create_subscription_request(known_version, compression); // Verify the serviceability of the request - verify_serviceability(data_client_config, data_summary, request, expect_service); + verify_serviceability( + data_client_config, + data_summary, + Some(time_service.clone()), + request, + expect_service, + ); } } @@ -741,7 +814,13 @@ fn verify_can_service_transaction_requests( create_transactions_request(proof_version, start_version, end_version, use_compression); // Verify the serviceability of the request - verify_serviceability(data_client_config, data_summary, request, expect_service); + verify_serviceability( + data_client_config, + data_summary, + None, + request, + expect_service, + ); } } @@ -765,7 +844,13 @@ fn verify_can_service_transaction_or_output_requests( ); // Verify the serviceability of the request - verify_serviceability(data_client_config, data_summary, request, expect_service); + verify_serviceability( + data_client_config, + data_summary, + None, + request, + expect_service, + ); } } @@ -785,7 +870,13 @@ fn verify_can_service_output_requests( create_outputs_request(proof_version, start_version, end_version, use_compression); // Verify the serviceability of the request - verify_serviceability(data_client_config, data_summary, request, expect_service); + verify_serviceability( + data_client_config, + data_summary, + None, + request, + expect_service, + ); } } @@ -793,10 +884,12 @@ fn verify_can_service_output_requests( fn verify_serviceability( data_client_config: &AptosDataClientConfig, data_summary: &DataSummary, + time_service: Option, request: StorageServiceRequest, expect_service: bool, ) { - let can_service = data_summary.can_service(data_client_config, &request); + let time_service = time_service.unwrap_or(TimeService::mock()); + let can_service = data_summary.can_service(data_client_config, time_service, &request); // Assert that the serviceability matches the expectation if expect_service { diff --git a/storage/aptosdb/Cargo.toml b/storage/aptosdb/Cargo.toml index 1c795904092cb..b3d14f6cc63ab 100644 --- a/storage/aptosdb/Cargo.toml +++ b/storage/aptosdb/Cargo.toml @@ -40,6 +40,7 @@ byteorder = { workspace = true } claims = { workspace = true } clap = { workspace = true, optional = true } dashmap = { workspace = true } +either = { workspace = true } itertools = { workspace = true } lru = { workspace = true } move-core-types = { workspace = true } diff --git a/storage/aptosdb/src/aptosdb_test.rs b/storage/aptosdb/src/aptosdb_test.rs index 794af6c863bc0..6342ec969162e 100644 --- a/storage/aptosdb/src/aptosdb_test.rs +++ b/storage/aptosdb/src/aptosdb_test.rs @@ -90,7 +90,7 @@ fn test_pruner_config() { let aptos_db = AptosDB::new_for_test(&tmp_dir); for enable in [false, true] { let state_merkle_pruner = StateMerklePrunerManager::::new( - Arc::clone(&aptos_db.state_merkle_db), + Arc::clone(&aptos_db.state_merkle_db()), StateMerklePrunerConfig { enable, prune_window: 20, diff --git a/storage/aptosdb/src/db_debugger/checkpoint/mod.rs b/storage/aptosdb/src/db_debugger/checkpoint/mod.rs index 7895506d7ad07..f4c0600b5ac71 100644 --- a/storage/aptosdb/src/db_debugger/checkpoint/mod.rs +++ b/storage/aptosdb/src/db_debugger/checkpoint/mod.rs @@ -20,8 +20,12 @@ impl Cmd { pub fn run(self) -> Result<()> { ensure!(!self.output_dir.exists(), "Output dir already exists."); fs::create_dir_all(&self.output_dir)?; - - // TODO(grao): Support sharded state merkle db and split_ledger_db here. - AptosDB::create_checkpoint(self.db_dir, self.output_dir, false, false) + let sharding_config = self.db_dir.sharding_config.clone(); + AptosDB::create_checkpoint( + self.db_dir, + self.output_dir, + sharding_config.use_sharded_state_merkle_db, + sharding_config.split_ledger_db, + ) } } diff --git a/storage/aptosdb/src/db_debugger/common/mod.rs b/storage/aptosdb/src/db_debugger/common/mod.rs index a164a2abb77f6..1a600aad7c1c9 100644 --- a/storage/aptosdb/src/db_debugger/common/mod.rs +++ b/storage/aptosdb/src/db_debugger/common/mod.rs @@ -2,35 +2,49 @@ // SPDX-License-Identifier: Apache-2.0 use crate::{ - db_options::state_merkle_db_column_families, ledger_db::LedgerDb, STATE_MERKLE_DB_NAME, + db_debugger::ShardingConfig, ledger_db::LedgerDb, state_merkle_db::StateMerkleDb, + STATE_MERKLE_DB_NAME, }; use anyhow::Result; use aptos_config::config::RocksdbConfigs; use aptos_types::nibble::{nibble_path::NibblePath, Nibble}; use clap::Parser; +use core::default::Default; use std::path::{Path, PathBuf}; pub const PAGE_SIZE: usize = 10; -#[derive(Parser)] +#[derive(Parser, Clone)] pub struct DbDir { #[clap(long, value_parser)] db_dir: PathBuf, + + #[clap(flatten)] + pub sharding_config: ShardingConfig, } impl DbDir { - // TODO(grao): Use StateMerkleDb struct. - pub fn open_state_merkle_db(&self) -> Result { - aptos_schemadb::DB::open_cf_readonly( - &aptos_schemadb::Options::default(), + pub fn open_state_merkle_db(&self) -> Result { + StateMerkleDb::new( self.db_dir.join(STATE_MERKLE_DB_NAME).as_path(), - STATE_MERKLE_DB_NAME, - state_merkle_db_column_families(), + RocksdbConfigs { + use_sharded_state_merkle_db: self.sharding_config.use_sharded_state_merkle_db, + ..Default::default() + }, + false, + 0, ) } pub fn open_ledger_db(&self) -> Result { - LedgerDb::new(self.db_dir.as_path(), RocksdbConfigs::default(), true) + LedgerDb::new( + self.db_dir.as_path(), + RocksdbConfigs { + split_ledger_db: self.sharding_config.split_ledger_db, + ..Default::default() + }, + true, + ) } } diff --git a/storage/aptosdb/src/db_debugger/examine/print_db_versions.rs b/storage/aptosdb/src/db_debugger/examine/print_db_versions.rs index 0f477421114a4..7be1bcd87c25d 100644 --- a/storage/aptosdb/src/db_debugger/examine/print_db_versions.rs +++ b/storage/aptosdb/src/db_debugger/examine/print_db_versions.rs @@ -2,6 +2,7 @@ // SPDX-License-Identifier: Apache-2.0 use crate::{ + db_debugger::ShardingConfig, schema::{ db_metadata::{DbMetadataKey, DbMetadataSchema}, event_accumulator::EventAccumulatorSchema, @@ -32,14 +33,15 @@ pub struct Cmd { #[clap(long, value_parser)] db_dir: PathBuf, - #[clap(long)] - split_ledger_db: bool, + #[clap(flatten)] + sharding_config: ShardingConfig, } impl Cmd { pub fn run(self) -> Result<()> { let rocksdb_config = RocksdbConfigs { - split_ledger_db: self.split_ledger_db, + split_ledger_db: self.sharding_config.split_ledger_db, + use_sharded_state_merkle_db: self.sharding_config.use_sharded_state_merkle_db, ..Default::default() }; let (ledger_db, state_merkle_db, state_kv_db) = AptosDB::open_dbs( diff --git a/storage/aptosdb/src/db_debugger/mod.rs b/storage/aptosdb/src/db_debugger/mod.rs index 10c935dc877f4..7bc761965cc7f 100644 --- a/storage/aptosdb/src/db_debugger/mod.rs +++ b/storage/aptosdb/src/db_debugger/mod.rs @@ -11,6 +11,14 @@ pub mod truncate; use anyhow::Result; use clap::Parser; +#[derive(Parser, Clone)] +pub struct ShardingConfig { + #[clap(long)] + split_ledger_db: bool, + #[clap(long)] + use_sharded_state_merkle_db: bool, +} + #[derive(Parser)] pub enum Cmd { #[clap(subcommand)] diff --git a/storage/aptosdb/src/db_debugger/state_tree/get_path.rs b/storage/aptosdb/src/db_debugger/state_tree/get_path.rs index f3545f0f73b08..fe9f19d2e82e1 100644 --- a/storage/aptosdb/src/db_debugger/state_tree/get_path.rs +++ b/storage/aptosdb/src/db_debugger/state_tree/get_path.rs @@ -4,10 +4,14 @@ use crate::{ db_debugger::common::{parse_nibble_path, DbDir}, jellyfish_merkle_node::JellyfishMerkleNodeSchema, + state_merkle_db::StateMerkleDb, }; use anyhow::{ensure, Result}; use aptos_crypto::HashValue; -use aptos_jellyfish_merkle::node_type::{Child, Node, NodeKey, NodeType}; +use aptos_jellyfish_merkle::{ + node_type::{Child, Node, NodeKey, NodeType}, + TreeReader, +}; use aptos_types::{ nibble::{nibble_path::NibblePath, Nibble}, transaction::Version, @@ -41,7 +45,10 @@ impl Cmd { ); let db = self.db_dir.open_state_merkle_db()?; - let mut iter = db.rev_iter::(Default::default())?; + let mut iter = db + .metadata_db() + .rev_iter::(Default::default())?; + iter.seek_for_prev(&NodeKey::new_empty_path(self.before_version - 1))?; let mut version = iter.next().transpose()?.unwrap().0.version(); let root_version = version; @@ -79,7 +86,7 @@ impl Cmd { pub fn render_node( &self, - db: &aptos_schemadb::DB, + db: &StateMerkleDb, version: Version, pos: &NibblePath, root_version: Version, @@ -87,7 +94,7 @@ impl Cmd { expected_hash: Option, ) -> Result> { let node_key = NodeKey::new(version, pos.clone()); - let node = db.get::(&node_key)?; + let node = db.get_node_option(&node_key, "unknown")?; let node_type = match node { None => "No node", Some(Node::Internal(_)) => "Internal node", diff --git a/storage/aptosdb/src/db_debugger/state_tree/get_snapshots.rs b/storage/aptosdb/src/db_debugger/state_tree/get_snapshots.rs index 51ce612e1cfa7..5e94e1de0d561 100644 --- a/storage/aptosdb/src/db_debugger/state_tree/get_snapshots.rs +++ b/storage/aptosdb/src/db_debugger/state_tree/get_snapshots.rs @@ -1,12 +1,8 @@ // Copyright © Aptos Foundation // SPDX-License-Identifier: Apache-2.0 -use crate::{ - db_debugger::common::{DbDir, PAGE_SIZE}, - jellyfish_merkle_node::JellyfishMerkleNodeSchema, -}; +use crate::db_debugger::common::{DbDir, PAGE_SIZE}; use anyhow::Result; -use aptos_jellyfish_merkle::node_type::NodeKey; use aptos_types::transaction::Version; use clap::Parser; @@ -29,17 +25,17 @@ impl Cmd { if self.next_version > 0 { let db = self.db_dir.open_state_merkle_db()?; - let mut iter = db.rev_iter::(Default::default())?; let mut version = self.next_version - 1; for n in 0..PAGE_SIZE { - iter.seek_for_prev(&NodeKey::new_empty_path(version))?; - if let Some((key, _node)) = iter.next().transpose()? { - println!("{} {}", n, key.version()); - if key.version() == 0 { + let res = db.get_state_snapshot_version_before(version)?; + + if let Some(ver) = res { + println!("{} {}", n, ver); + if ver == 0 { break; } - version = key.version() - 1; + version = ver - 1; } else { break; } diff --git a/storage/aptosdb/src/db_debugger/truncate/mod.rs b/storage/aptosdb/src/db_debugger/truncate/mod.rs index 3b5cbf61e0919..69dca8d99df78 100644 --- a/storage/aptosdb/src/db_debugger/truncate/mod.rs +++ b/storage/aptosdb/src/db_debugger/truncate/mod.rs @@ -2,6 +2,7 @@ // SPDX-License-Identifier: Apache-2.0 use crate::{ + db_debugger::ShardingConfig, jellyfish_merkle_node::JellyfishMerkleNodeSchema, schema::{ db_metadata::{DbMetadataKey, DbMetadataSchema, DbMetadataValue}, @@ -46,8 +47,8 @@ pub struct Cmd { #[clap(long, group = "backup")] opt_out_backup_checkpoint: bool, - #[clap(long)] - split_ledger_db: bool, + #[clap(flatten)] + sharding_config: ShardingConfig, } impl Cmd { @@ -60,12 +61,11 @@ impl Cmd { ); println!("Creating backup at: {:?}", &backup_checkpoint_dir); fs::create_dir_all(&backup_checkpoint_dir)?; - // TODO(grao): Support sharded state merkle db here. AptosDB::create_checkpoint( &self.db_dir, backup_checkpoint_dir, - self.split_ledger_db, - false, + self.sharding_config.split_ledger_db, + self.sharding_config.use_sharded_state_merkle_db, )?; println!("Done!"); } else { @@ -73,7 +73,8 @@ impl Cmd { } let rocksdb_config = RocksdbConfigs { - split_ledger_db: self.split_ledger_db, + split_ledger_db: self.sharding_config.split_ledger_db, + use_sharded_state_merkle_db: self.sharding_config.use_sharded_state_merkle_db, ..Default::default() }; let (ledger_db, state_merkle_db, state_kv_db) = AptosDB::open_dbs( @@ -210,24 +211,30 @@ mod test { }, test_helper::{arb_blocks_to_commit_with_block_nums, update_in_memory_state}, utils::truncation_helper::num_frozen_nodes_in_accumulator, - AptosDB, + AptosDB, NUM_STATE_SHARDS, }; use aptos_storage_interface::{DbReader, DbWriter}; use aptos_temppath::TempPath; use proptest::prelude::*; proptest! { - #![proptest_config(ProptestConfig::with_cases(10))] + #![proptest_config(ProptestConfig::with_cases(1))] #[test] fn test_truncation(input in arb_blocks_to_commit_with_block_nums(80, 120)) { + use crate::DEFAULT_MAX_NUM_NODES_PER_LRU_CACHE_SHARD; aptos_logger::Logger::new().init(); + let sharding_config = ShardingConfig { + split_ledger_db: input.1, + use_sharded_state_merkle_db: input.1, + }; let tmp_dir = TempPath::new(); - let db = AptosDB::new_for_test(&tmp_dir); + + let db = if input.1 { AptosDB::new_for_test_with_sharding(&tmp_dir, DEFAULT_MAX_NUM_NODES_PER_LRU_CACHE_SHARD) } else { AptosDB::new_for_test(&tmp_dir) }; let mut in_memory_state = db.state_store.buffered_state().lock().current_state().clone(); let _ancestor = in_memory_state.base.clone(); let mut version = 0; - for (txns_to_commit, ledger_info_with_sigs) in input.iter() { + for (txns_to_commit, ledger_info_with_sigs) in input.0.iter() { update_in_memory_state(&mut in_memory_state, txns_to_commit.as_slice()); db.save_transactions(txns_to_commit, version, version.checked_sub(1), Some(ledger_info_with_sigs), true, in_memory_state.clone()) .unwrap(); @@ -247,12 +254,12 @@ mod test { ledger_db_batch_size: 15, opt_out_backup_checkpoint: true, backup_checkpoint_dir: None, - split_ledger_db: false, + sharding_config: sharding_config.clone(), }; cmd.run().unwrap(); - let db = AptosDB::new_for_test(&tmp_dir); + let db = if input.1 { AptosDB::new_for_test_with_sharding(&tmp_dir, DEFAULT_MAX_NUM_NODES_PER_LRU_CACHE_SHARD) } else { AptosDB::new_for_test(&tmp_dir) }; let db_version = db.get_latest_version().unwrap(); prop_assert_eq!(db_version, target_version); @@ -273,7 +280,11 @@ mod test { let (ledger_db, state_merkle_db, state_kv_db) = AptosDB::open_dbs( tmp_dir.path().to_path_buf(), - RocksdbConfigs::default(), + RocksdbConfigs { + use_sharded_state_merkle_db: input.1, + split_ledger_db: input.1, + ..Default::default() + }, /*readonly=*/ false, /*max_num_nodes_per_lru_cache_shard=*/ 0, ).unwrap(); @@ -309,7 +320,7 @@ mod test { iter.seek_to_last(); prop_assert_eq!(iter.next().transpose().unwrap().unwrap().0, epoch); - // TODO(grao): Support sharding here. + let mut iter = state_kv_db.metadata_db().iter::(ReadOptions::default()).unwrap(); iter.seek_to_first(); for item in iter { @@ -317,7 +328,6 @@ mod test { prop_assert!(version <= target_version); } - // TODO(grao): Support sharding here. let mut iter = state_kv_db.metadata_db().iter::(ReadOptions::default()).unwrap(); iter.seek_to_first(); for item in iter { @@ -325,7 +335,6 @@ mod test { prop_assert!(version <= target_version); } - // TODO(grao): Support sharding here. let mut iter = state_merkle_db.metadata_db().iter::(ReadOptions::default()).unwrap(); iter.seek_to_first(); for item in iter { @@ -333,7 +342,6 @@ mod test { prop_assert!(version <= target_version); } - // TODO(grao): Support sharding here. let mut iter = state_merkle_db.metadata_db().iter::(ReadOptions::default()).unwrap(); iter.seek_to_first(); for item in iter { @@ -341,13 +349,51 @@ mod test { prop_assert!(version <= target_version); } - // TODO(grao): Support sharding here. let mut iter = state_merkle_db.metadata_db().iter::(ReadOptions::default()).unwrap(); iter.seek_to_first(); for item in iter { let version = item.unwrap().0.version(); prop_assert!(version <= target_version); } + + if sharding_config.split_ledger_db && sharding_config.use_sharded_state_merkle_db { + let state_merkle_db = Arc::new(state_merkle_db); + for i in 0..NUM_STATE_SHARDS as u8 { + let mut kv_shard_iter = state_kv_db.db_shard(i).iter::(ReadOptions::default()).unwrap(); + kv_shard_iter.seek_to_first(); + for item in kv_shard_iter { + let ((_, version), _) = item.unwrap(); + prop_assert!(version <= target_version); + } + + let value_index_shard_iter = state_kv_db.db_shard(i).iter::(ReadOptions::default()).unwrap(); + for item in value_index_shard_iter { + let version = item.unwrap().0.stale_since_version; + prop_assert!(version <= target_version); + } + + let mut stale_node_ind_iter = state_merkle_db.db_shard(i).iter::(ReadOptions::default()).unwrap(); + stale_node_ind_iter.seek_to_first(); + for item in stale_node_ind_iter { + let version = item.unwrap().0.stale_since_version; + prop_assert!(version <= target_version); + } + + let mut jelly_iter = state_merkle_db.db_shard(i).iter::(ReadOptions::default()).unwrap(); + jelly_iter.seek_to_first(); + for item in jelly_iter { + let version = item.unwrap().0.version(); + prop_assert!(version <= target_version); + } + + let mut cross_iter = state_merkle_db.db_shard(i).iter::(ReadOptions::default()).unwrap(); + cross_iter.seek_to_first(); + for item in cross_iter { + let version = item.unwrap().0.stale_since_version; + prop_assert!(version <= target_version); + } + } + } } } } diff --git a/storage/aptosdb/src/event_store/mod.rs b/storage/aptosdb/src/event_store/mod.rs index 0786bb3f88351..9167b1827377b 100644 --- a/storage/aptosdb/src/event_store/mod.rs +++ b/storage/aptosdb/src/event_store/mod.rs @@ -462,6 +462,16 @@ impl EventStore { Ok(()) } + pub fn latest_version(&self) -> Result> { + let mut iter = self.event_db.iter::(ReadOptions::default())?; + iter.seek_to_last(); + if let Some(((version, _), _)) = iter.next().transpose()? { + Ok(Some(version)) + } else { + Ok(None) + } + } + /// Prune a set of candidate events in the range of version in [begin, end) and all related indices pub fn prune_events( &self, diff --git a/storage/aptosdb/src/fake_aptosdb.rs b/storage/aptosdb/src/fake_aptosdb.rs index 6b66ff4250f5d..3764e13538ec6 100644 --- a/storage/aptosdb/src/fake_aptosdb.rs +++ b/storage/aptosdb/src/fake_aptosdb.rs @@ -745,7 +745,7 @@ impl DbReader for FakeAptosDB { EventHandle::new(EventKey::new(1, account_address), 0), ); let bytes = bcs::to_bytes(&account)?; - Ok(Some(StateValue::new_legacy(bytes))) + Ok(Some(StateValue::new_legacy(bytes.into()))) } else { self.inner.get_state_value_by_version(state_key, version) } diff --git a/storage/aptosdb/src/fast_sync_storage_wrapper.rs b/storage/aptosdb/src/fast_sync_storage_wrapper.rs new file mode 100644 index 0000000000000..d13e3cb924cdf --- /dev/null +++ b/storage/aptosdb/src/fast_sync_storage_wrapper.rs @@ -0,0 +1,215 @@ +// Copyright © Aptos Foundation +// SPDX-License-Identifier: Apache-2.0 + +use crate::AptosDB; +use anyhow::{anyhow, Result}; +use aptos_config::config::NodeConfig; +use aptos_crypto::HashValue; +use aptos_infallible::RwLock; +use aptos_storage_interface::{ + cached_state_view::ShardedStateCache, state_delta::StateDelta, DbReader, DbWriter, + StateSnapshotReceiver, +}; +use aptos_types::{ + epoch_change::EpochChangeProof, + ledger_info::LedgerInfoWithSignatures, + state_store::{state_key::StateKey, state_value::StateValue, ShardedStateUpdates}, + transaction::{TransactionOutputListWithProof, TransactionToCommit, Version}, +}; +use either::Either; +use std::sync::Arc; + +pub const SECONDARY_DB_DIR: &str = "fast_sync_secondary"; + +#[derive(Clone, Copy, Debug, Eq, PartialEq)] +pub enum FastSyncStatus { + UNKNOWN, + STARTED, + FINISHED, +} + +/// This is a wrapper around [AptosDB] that is used to bootstrap the node for fast sync mode +pub struct FastSyncStorageWrapper { + // Used for storing genesis data during fast sync + temporary_db_with_genesis: Arc, + // Used for restoring fast sync snapshot and all the read/writes afterwards + db_for_fast_sync: Arc, + // This is for reading the fast_sync status to determine which db to use + fast_sync_status: Arc>, +} + +impl FastSyncStorageWrapper { + /// If the db is empty and configured to do fast sync, we return a FastSyncStorageWrapper + /// Otherwise, we returns AptosDB directly and the FastSyncStorageWrapper is None + pub fn initialize_dbs(config: &NodeConfig) -> Result> { + let mut db_dir = config.storage.dir(); + let db_main = AptosDB::open( + db_dir.as_path(), + false, + config.storage.storage_pruner_config, + config.storage.rocksdb_configs, + config.storage.enable_indexer, + config.storage.buffered_state_target_items, + config.storage.max_num_nodes_per_lru_cache_shard, + ) + .map_err(|err| anyhow!("fast sync DB failed to open {}", err))?; + + // when the db is empty and configured to do fast sync, we will create a second DB + if config + .state_sync + .state_sync_driver + .bootstrapping_mode + .is_fast_sync() + && (db_main.ledger_store.get_latest_version().map_or(0, |v| v) == 0) + { + db_dir.push(SECONDARY_DB_DIR); + let secondary_db = AptosDB::open( + db_dir.as_path(), + false, + config.storage.storage_pruner_config, + config.storage.rocksdb_configs, + config.storage.enable_indexer, + config.storage.buffered_state_target_items, + config.storage.max_num_nodes_per_lru_cache_shard, + ) + .map_err(|err| anyhow!("Secondary DB failed to open {}", err))?; + + Ok(Either::Right(FastSyncStorageWrapper { + temporary_db_with_genesis: Arc::new(secondary_db), + db_for_fast_sync: Arc::new(db_main), + fast_sync_status: Arc::new(RwLock::new(FastSyncStatus::UNKNOWN)), + })) + } else { + Ok(Either::Left(db_main)) + } + } + + pub fn get_fast_sync_db(&self) -> Arc { + self.db_for_fast_sync.clone() + } + + pub fn get_temporary_db_with_genesis(&self) -> Arc { + self.temporary_db_with_genesis.clone() + } + + pub fn get_fast_sync_status(&self) -> FastSyncStatus { + *self.fast_sync_status.read() + } + + /// Check if the fast sync finished already + fn is_fast_sync_bootstrap_finished(&self) -> bool { + let status = self.get_fast_sync_status(); + status == FastSyncStatus::FINISHED + } + + /// Check if the fast sync started already + fn is_fast_sync_bootstrap_started(&self) -> bool { + let status = self.get_fast_sync_status(); + status == FastSyncStatus::STARTED + } + + pub(crate) fn get_aptos_db_read_ref(&self) -> &AptosDB { + if self.is_fast_sync_bootstrap_finished() { + self.db_for_fast_sync.as_ref() + } else { + self.temporary_db_with_genesis.as_ref() + } + } + + pub(crate) fn get_aptos_db_write_ref(&self) -> &AptosDB { + if self.is_fast_sync_bootstrap_started() || self.is_fast_sync_bootstrap_finished() { + self.db_for_fast_sync.as_ref() + } else { + self.temporary_db_with_genesis.as_ref() + } + } +} + +impl DbWriter for FastSyncStorageWrapper { + fn get_state_snapshot_receiver( + &self, + version: Version, + expected_root_hash: HashValue, + ) -> Result>> { + *self.fast_sync_status.write() = FastSyncStatus::STARTED; + self.get_aptos_db_write_ref() + .get_state_snapshot_receiver(version, expected_root_hash) + } + + fn finalize_state_snapshot( + &self, + version: Version, + output_with_proof: TransactionOutputListWithProof, + ledger_infos: &[LedgerInfoWithSignatures], + ) -> Result<()> { + let status = self.get_fast_sync_status(); + assert_eq!(status, FastSyncStatus::STARTED); + self.get_aptos_db_write_ref().finalize_state_snapshot( + version, + output_with_proof, + ledger_infos, + )?; + let mut status = self.fast_sync_status.write(); + *status = FastSyncStatus::FINISHED; + Ok(()) + } + + fn save_transactions( + &self, + txns_to_commit: &[TransactionToCommit], + first_version: Version, + base_state_version: Option, + ledger_info_with_sigs: Option<&LedgerInfoWithSignatures>, + sync_commit: bool, + latest_in_memory_state: StateDelta, + ) -> Result<()> { + self.get_aptos_db_write_ref().save_transactions( + txns_to_commit, + first_version, + base_state_version, + ledger_info_with_sigs, + sync_commit, + latest_in_memory_state, + ) + } + + fn save_transaction_block( + &self, + txns_to_commit: &[Arc], + first_version: Version, + base_state_version: Option, + ledger_info_with_sigs: Option<&LedgerInfoWithSignatures>, + sync_commit: bool, + latest_in_memory_state: StateDelta, + block_state_updates: ShardedStateUpdates, + sharded_state_cache: &ShardedStateCache, + ) -> Result<()> { + self.get_aptos_db_write_ref().save_transaction_block( + txns_to_commit, + first_version, + base_state_version, + ledger_info_with_sigs, + sync_commit, + latest_in_memory_state, + block_state_updates, + sharded_state_cache, + ) + } +} + +impl DbReader for FastSyncStorageWrapper { + fn get_read_delegatee(&self) -> &dyn DbReader { + self.get_aptos_db_read_ref() + } + + fn get_epoch_ending_ledger_infos( + &self, + start_epoch: u64, + end_epoch: u64, + ) -> Result { + let (ledger_info, flag) = self + .get_aptos_db_read_ref() + .get_epoch_ending_ledger_infos(start_epoch, end_epoch)?; + Ok(EpochChangeProof::new(ledger_info, flag)) + } +} diff --git a/storage/aptosdb/src/ledger_db.rs b/storage/aptosdb/src/ledger_db.rs index e6481dd55cab3..5ea668eb00d8a 100644 --- a/storage/aptosdb/src/ledger_db.rs +++ b/storage/aptosdb/src/ledger_db.rs @@ -356,10 +356,10 @@ impl LedgerDb { .write_schemas(schemas.transaction_info_db_batches)?; self.transaction_db .write_schemas(schemas.transaction_db_batches)?; - self.ledger_metadata_db - .write_schemas(schemas.ledger_metadata_db_batches)?; self.event_db.write_schemas(schemas.event_db_batches)?; self.transaction_accumulator_db - .write_schemas(schemas.transaction_accumulator_db_batches) + .write_schemas(schemas.transaction_accumulator_db_batches)?; + self.ledger_metadata_db + .write_schemas(schemas.ledger_metadata_db_batches) } } diff --git a/storage/aptosdb/src/lib.rs b/storage/aptosdb/src/lib.rs index 157f80d426f5a..3a5985e81574c 100644 --- a/storage/aptosdb/src/lib.rs +++ b/storage/aptosdb/src/lib.rs @@ -40,11 +40,17 @@ mod aptosdb_test; #[cfg(feature = "db-debugger")] pub mod db_debugger; +pub mod fast_sync_storage_wrapper; use crate::{ backup::{backup_handler::BackupHandler, restore_handler::RestoreHandler, restore_utils}, db_metadata::{DbMetadataKey, DbMetadataSchema, DbMetadataValue}, - db_options::{ledger_db_column_families, state_merkle_db_column_families}, + db_options::{ + event_db_column_families, ledger_db_column_families, ledger_metadata_db_column_families, + state_kv_db_column_families, state_merkle_db_column_families, + transaction_accumulator_db_column_families, transaction_db_column_families, + transaction_info_db_column_families, write_set_db_column_families, + }, errors::AptosDbError, event_store::EventStore, ledger_db::{LedgerDb, LedgerDbSchemaBatches}, @@ -192,27 +198,80 @@ fn error_if_too_many_requested(num_requested: u64, max_allowed: u64) -> Result<( } } -fn update_rocksdb_properties(ledger_rocksdb: &DB, state_merkle_db: &StateMerkleDb) -> Result<()> { +fn update_rocksdb_properties( + ledger_db: &LedgerDb, + state_merkle_db: &StateMerkleDb, + state_kv_db: &StateKvDb, +) -> Result<()> { let _timer = OTHER_TIMERS_SECONDS .with_label_values(&["update_rocksdb_properties"]) .start_timer(); - for cf_name in ledger_db_column_families() { + + let set_property_fn = |cf_name: &str, db: &DB| -> Result<()> { for (rockdb_property_name, aptos_rocksdb_property_name) in &*ROCKSDB_PROPERTY_MAP { ROCKSDB_PROPERTIES .with_label_values(&[cf_name, aptos_rocksdb_property_name]) - .set(ledger_rocksdb.get_property(cf_name, rockdb_property_name)? as i64); + .set(db.get_property(cf_name, rockdb_property_name)? as i64); + } + Ok(()) + }; + + let gen_shard_cf_name = + |cf_name: &str, shard_id: u8| -> String { format!("shard_{}_{}", shard_id, cf_name) }; + + let split_ledger = state_kv_db.enabled_sharding(); + + if split_ledger { + for cf in ledger_metadata_db_column_families() { + set_property_fn(cf, ledger_db.metadata_db())?; + } + + for cf in write_set_db_column_families() { + set_property_fn(cf, ledger_db.write_set_db())?; + } + + for cf in transaction_info_db_column_families() { + set_property_fn(cf, ledger_db.transaction_info_db())?; + } + + for cf in transaction_db_column_families() { + set_property_fn(cf, ledger_db.transaction_db())?; + } + + for cf in event_db_column_families() { + set_property_fn(cf, ledger_db.event_db())?; + } + + for cf in transaction_accumulator_db_column_families() { + set_property_fn(cf, ledger_db.transaction_accumulator_db())?; + } + + for cf in state_kv_db_column_families() { + set_property_fn(cf, state_kv_db.metadata_db())?; + if state_kv_db.enabled_sharding() { + for shard in 0..NUM_STATE_SHARDS { + set_property_fn( + gen_shard_cf_name(cf, shard as u8).as_str(), + state_kv_db.db_shard(shard as u8), + )?; + } + } + } + } else { + for cf in ledger_db_column_families() { + set_property_fn(cf, ledger_db.metadata_db())?; } } + for cf_name in state_merkle_db_column_families() { - for (rockdb_property_name, aptos_rocksdb_property_name) in &*ROCKSDB_PROPERTY_MAP { - // TODO(grao): Support sharding here. - ROCKSDB_PROPERTIES - .with_label_values(&[cf_name, aptos_rocksdb_property_name]) - .set( - state_merkle_db - .metadata_db() - .get_property(cf_name, rockdb_property_name)? as i64, - ); + set_property_fn(cf_name, state_merkle_db.metadata_db())?; + if state_merkle_db.sharding_enabled() { + for shard in 0..NUM_STATE_SHARDS { + set_property_fn( + gen_shard_cf_name(cf_name, shard as u8).as_str(), + state_merkle_db.db_shard(shard as u8), + )?; + } } } Ok(()) @@ -225,10 +284,14 @@ struct RocksdbPropertyReporter { } impl RocksdbPropertyReporter { - fn new(ledger_rocksdb: Arc, state_merkle_rocksdb: Arc) -> Self { + fn new( + ledger_db: Arc, + state_merkle_db: Arc, + state_kv_db: Arc, + ) -> Self { let (send, recv) = mpsc::channel(); let join_handle = Some(thread::spawn(move || loop { - if let Err(e) = update_rocksdb_properties(&ledger_rocksdb, &state_merkle_rocksdb) { + if let Err(e) = update_rocksdb_properties(&ledger_db, &state_merkle_db, &state_kv_db) { warn!( error = ?e, "Updating rocksdb property failed." @@ -266,12 +329,11 @@ impl Drop for RocksdbPropertyReporter { /// access to the core Aptos data structures. pub struct AptosDB { ledger_db: Arc, - state_merkle_db: Arc, state_kv_db: Arc, - event_store: Arc, - ledger_store: Arc, - state_store: Arc, - transaction_store: Arc, + pub(crate) event_store: Arc, + pub(crate) ledger_store: Arc, + pub(crate) state_store: Arc, + pub(crate) transaction_store: Arc, ledger_pruner: LedgerPrunerManager, _rocksdb_property_reporter: RocksdbPropertyReporter, ledger_commit_lock: std::sync::Mutex<()>, @@ -321,17 +383,16 @@ impl AptosDB { AptosDB { ledger_db: Arc::clone(&ledger_db), - state_merkle_db: Arc::clone(&state_merkle_db), state_kv_db: Arc::clone(&state_kv_db), event_store: Arc::new(EventStore::new(ledger_db.event_db_arc())), ledger_store: Arc::new(LedgerStore::new(Arc::clone(&ledger_db))), state_store, transaction_store: Arc::new(TransactionStore::new(Arc::clone(&ledger_db))), ledger_pruner, - // TODO(grao): Include other DBs. _rocksdb_property_reporter: RocksdbPropertyReporter::new( - ledger_db.metadata_db_arc(), - Arc::clone(&state_merkle_db), + ledger_db, + state_merkle_db, + state_kv_db, ), ledger_commit_lock: std::sync::Mutex::new(()), indexer: None, @@ -519,7 +580,10 @@ impl AptosDB { /// This opens db with sharding enabled. #[cfg(any(test, feature = "fuzzing"))] - pub fn new_for_test_with_sharding + Clone>(db_root_path: P) -> Self { + pub fn new_for_test_with_sharding + Clone>( + db_root_path: P, + max_node_cache: usize, + ) -> Self { let db_config = RocksdbConfigs { use_sharded_state_merkle_db: true, split_ledger_db: true, @@ -532,7 +596,7 @@ impl AptosDB { db_config, false, BUFFERED_STATE_TARGET_ITEMS, - DEFAULT_MAX_NUM_NODES_PER_LRU_CACHE_SHARD, + max_node_cache, ) .expect("Unable to open AptosDB") } @@ -588,9 +652,9 @@ impl AptosDB { self.state_store.buffered_state() } - /// This force the db to update rocksdb properties immediately. - pub fn update_rocksdb_properties(&self) -> Result<()> { - update_rocksdb_properties(&self.ledger_db.metadata_db_arc(), &self.state_merkle_db) + #[cfg(any(test, feature = "fuzzing"))] + fn state_merkle_db(&self) -> Arc { + self.state_store.state_db.state_merkle_db.clone() } /// Returns ledger infos reflecting epoch bumps starting with the given epoch. If there are no @@ -623,6 +687,7 @@ impl AptosDB { ); // Note that the latest epoch can be the same with the current epoch (in most cases), or // current_epoch + 1 (when the latest ledger_info carries next validator set) + let latest_epoch = self .ledger_store .get_latest_ledger_info()? @@ -645,6 +710,7 @@ impl AptosDB { .ledger_store .get_epoch_ending_ledger_info_iter(start_epoch, paging_epoch)? .collect::>>()?; + ensure!( lis.len() == (paging_epoch - start_epoch) as usize, "DB corruption: missing epoch ending ledger info for epoch {}", @@ -2158,6 +2224,7 @@ impl DbWriter for AptosDB { }) } + // TODO(bowu): populate the flag indicating the fast_sync is done. fn finalize_state_snapshot( &self, version: Version, diff --git a/storage/aptosdb/src/pruner/state_merkle_pruner/mod.rs b/storage/aptosdb/src/pruner/state_merkle_pruner/mod.rs index b794b5ac223ac..79e038e2728dc 100644 --- a/storage/aptosdb/src/pruner/state_merkle_pruner/mod.rs +++ b/storage/aptosdb/src/pruner/state_merkle_pruner/mod.rs @@ -178,10 +178,11 @@ where s.spawn(move |_| { shard_pruner .prune(current_progress, target_version) - .unwrap_or_else(|_| { + .unwrap_or_else(|err| { panic!( - "Failed to prune state merkle shard {}.", - shard_pruner.shard_id() + "Failed to prune state merkle shard {}: {}", + shard_pruner.shard_id(), + err, ) }); }); diff --git a/storage/aptosdb/src/pruner/state_merkle_pruner/test.rs b/storage/aptosdb/src/pruner/state_merkle_pruner/test.rs index 1eadedebf9295..5179faa172ccc 100644 --- a/storage/aptosdb/src/pruner/state_merkle_pruner/test.rs +++ b/storage/aptosdb/src/pruner/state_merkle_pruner/test.rs @@ -8,7 +8,7 @@ use crate::{ state_merkle_db::StateMerkleDb, state_store::StateStore, test_helper::{arb_state_kv_sets, update_store}, - AptosDB, PrunerManager, StateKvPrunerManager, StateMerklePrunerManager, + AptosDB, PrunerManager, StateKvPrunerManager, StateMerklePrunerManager, NUM_STATE_SHARDS, }; use aptos_config::config::{LedgerPrunerConfig, StateMerklePrunerConfig}; use aptos_crypto::HashValue; @@ -128,7 +128,7 @@ fn test_state_store_pruner() { // test the min_readable_version initialization logic. { let pruner = - create_state_merkle_pruner_manager(&aptos_db.state_merkle_db, prune_batch_size); + create_state_merkle_pruner_manager(&aptos_db.state_merkle_db(), prune_batch_size); pruner.wake_and_wait_pruner(0 /* latest_version */).unwrap(); for i in 0..num_versions { verify_state_in_store( @@ -145,7 +145,7 @@ fn test_state_store_pruner() { // min_readable_version initialization logic. { let pruner = - create_state_merkle_pruner_manager(&aptos_db.state_merkle_db, prune_batch_size); + create_state_merkle_pruner_manager(&aptos_db.state_merkle_db(), prune_batch_size); pruner .wake_and_wait_pruner(prune_batch_size as u64 /* latest_version */) .unwrap(); @@ -194,7 +194,7 @@ fn test_state_store_pruner_partial_version() { let prune_batch_size = 1; let tmp_dir = TempPath::new(); - let aptos_db = AptosDB::new_for_test_no_cache(&tmp_dir); + let aptos_db = AptosDB::new_for_test_with_sharding(&tmp_dir, 0); let state_store = &aptos_db.state_store; let _root0 = put_value_set( @@ -220,7 +220,7 @@ fn test_state_store_pruner_partial_version() { // to test the min_readable_version initialization logic. { let pruner = - create_state_merkle_pruner_manager(&aptos_db.state_merkle_db, prune_batch_size); + create_state_merkle_pruner_manager(&aptos_db.state_merkle_db(), prune_batch_size); pruner.wake_and_wait_pruner(0 /* latest_version */).unwrap(); verify_state_in_store(state_store, key1.clone(), Some(&value1), 1); verify_state_in_store(state_store, key2.clone(), Some(&value2_update), 1); @@ -232,7 +232,7 @@ fn test_state_store_pruner_partial_version() { // min_readable_version initialization logic. { let pruner = - create_state_merkle_pruner_manager(&aptos_db.state_merkle_db, prune_batch_size); + create_state_merkle_pruner_manager(&aptos_db.state_merkle_db(), prune_batch_size); assert!(pruner.wake_and_wait_pruner(1 /* latest_version */,).is_ok()); assert!(state_store .get_state_value_with_proof_by_version(&key1, 0_u64) @@ -246,7 +246,7 @@ fn test_state_store_pruner_partial_version() { // everytime to test the min_readable_version initialization logic. { let pruner = - create_state_merkle_pruner_manager(&aptos_db.state_merkle_db, prune_batch_size); + create_state_merkle_pruner_manager(&aptos_db.state_merkle_db(), prune_batch_size); assert!(pruner.wake_and_wait_pruner(2 /* latest_version */,).is_ok()); assert!(pruner.wake_and_wait_pruner(2 /* latest_version */,).is_ok()); @@ -265,16 +265,29 @@ fn test_state_store_pruner_partial_version() { // Make sure all stale indices are gone. // - // TODO(grao): Support sharding here. assert_eq!( aptos_db - .state_merkle_db + .state_merkle_db() .metadata_db() .iter::(ReadOptions::default()) .unwrap() .count(), 0 ); + + if aptos_db.state_merkle_db().sharding_enabled() { + for i in 0..NUM_STATE_SHARDS as u8 { + assert_eq!( + aptos_db + .state_merkle_db() + .db_shard(i) + .iter::(ReadOptions::default()) + .unwrap() + .count(), + 0 + ); + } + } } #[test] diff --git a/storage/aptosdb/src/state_kv_db.rs b/storage/aptosdb/src/state_kv_db.rs index e0764d56626e9..6dda6b68f1dec 100644 --- a/storage/aptosdb/src/state_kv_db.rs +++ b/storage/aptosdb/src/state_kv_db.rs @@ -108,7 +108,7 @@ impl StateKvDb { s.spawn(move |_| { // TODO(grao): Consider propagating the error instead of panic, if necessary. self.commit_single_shard(version, shard_id as u8, state_kv_batch) - .unwrap_or_else(|_| panic!("Failed to commit shard {shard_id}.")); + .unwrap_or_else(|err| panic!("Failed to commit shard {shard_id}: {err}.")); }); } }); diff --git a/storage/aptosdb/src/state_merkle_db.rs b/storage/aptosdb/src/state_merkle_db.rs index 79210535ae370..1252089ef6d1d 100644 --- a/storage/aptosdb/src/state_merkle_db.rs +++ b/storage/aptosdb/src/state_merkle_db.rs @@ -121,8 +121,8 @@ impl StateMerkleDb { let state_merkle_batch = batches.next().unwrap(); s.spawn(move |_| { self.commit_single_shard(version, shard_id as u8, state_merkle_batch) - .unwrap_or_else(|_| { - panic!("Failed to commit state merkle shard {shard_id}.") + .unwrap_or_else(|err| { + panic!("Failed to commit state merkle shard {shard_id}: {err}") }); }); } @@ -648,24 +648,48 @@ impl StateMerkleDb { ) -> Result> { let mut ret = None; - // TODO(grao): Support sharding here. - let mut iter = self - .metadata_db() - .iter::(Default::default())?; - iter.seek(&(version, 0)).unwrap(); + if self.enable_sharding { + let mut iter = self + .metadata_db() + .iter::(Default::default())?; + iter.seek(&(version, 0)).unwrap(); + // early exit if no node is found for the target version + match iter.next().transpose()? { + Some((node_key, node)) => { + if node.node_type() == NodeType::Null || node_key.version() != version { + return Ok(None); + } + }, + None => return Ok(None), + }; + } - while let Some((node_key, node)) = iter.next().transpose()? { - if let Node::Leaf(leaf_node) = node { - if node_key.version() != version { - break; - } - match ret { - None => ret = Some((node_key, leaf_node)), - Some(ref other) => { - if leaf_node.account_key() > other.1.account_key() { - ret = Some((node_key, leaf_node)); - } - }, + // traverse all shards in a naive way + // if sharding is not enable, we only need to search once. + let shards = self + .enable_sharding + .then(|| (0..NUM_STATE_SHARDS)) + .unwrap_or(0..1); + let start_num_of_nibbles = if self.enable_sharding { 1 } else { 0 }; + for shard_id in shards.rev() { + let shard_db = self.state_merkle_db_shards[shard_id].clone(); + let mut shard_iter = shard_db.iter::(Default::default())?; + // DB sharded only contain nodes with num_of_nibbles >= 1 + shard_iter.seek(&(version, start_num_of_nibbles)).unwrap(); + + while let Some((node_key, node)) = shard_iter.next().transpose()? { + if let Node::Leaf(leaf_node) = node { + if node_key.version() != version { + break; + } + match ret { + None => ret = Some((node_key, leaf_node)), + Some(ref other) => { + if leaf_node.account_key() > other.1.account_key() { + ret = Some((node_key, leaf_node)); + } + }, + } } } } @@ -777,6 +801,7 @@ impl TreeReader for StateMerkleDb { fn get_rightmost_leaf(&self, version: Version) -> Result> { // Since everything has the same version during restore, we seek to the first node and get // its version. + let mut iter = self .metadata_db() .iter::(Default::default())?; diff --git a/storage/aptosdb/src/state_store/mod.rs b/storage/aptosdb/src/state_store/mod.rs index e8a53ae1b2b9c..8eb86e6d5674d 100644 --- a/storage/aptosdb/src/state_store/mod.rs +++ b/storage/aptosdb/src/state_store/mod.rs @@ -78,7 +78,7 @@ const MAX_WRITE_SETS_AFTER_SNAPSHOT: LeafCount = buffered_state::TARGET_SNAPSHOT * (buffered_state::ASYNC_COMMIT_CHANNEL_BUFFER_SIZE + 2 + 1/* Rendezvous channel */) * 2; -const MAX_COMMIT_PROGRESS_DIFFERENCE: u64 = 100000; +pub const MAX_COMMIT_PROGRESS_DIFFERENCE: u64 = 100000; pub(crate) struct StateDb { pub ledger_db: Arc, @@ -305,11 +305,13 @@ impl StateStore { empty_buffered_state_for_restore: bool, skip_usage: bool, ) -> Self { - Self::sync_commit_progress( - Arc::clone(&ledger_db), - Arc::clone(&state_kv_db), - /*crash_if_difference_is_too_large=*/ true, - ); + if !hack_for_tests { + Self::sync_commit_progress( + Arc::clone(&ledger_db), + Arc::clone(&state_kv_db), + /*crash_if_difference_is_too_large=*/ true, + ); + } let state_db = Arc::new(StateDb { ledger_db, state_merkle_db, @@ -379,29 +381,24 @@ impl StateStore { .expect_version(); assert_ge!(state_kv_commit_progress, overall_commit_progress); - if ledger_commit_progress != overall_commit_progress { - info!( - ledger_commit_progress = ledger_commit_progress, - "Start truncation...", - ); - let difference = ledger_commit_progress - overall_commit_progress; - if crash_if_difference_is_too_large { - assert_le!(difference, MAX_COMMIT_PROGRESS_DIFFERENCE); - } - // TODO(grao): Support truncation for splitted ledger DBs. - truncate_ledger_db( - ledger_db, - ledger_commit_progress, - overall_commit_progress, - difference as usize, - ) - .expect("Failed to truncate ledger db."); + // LedgerCommitProgress was not guaranteed to commit after all ledger changes finish, + // have to attempt truncating every column family. + info!( + ledger_commit_progress = ledger_commit_progress, + "Attempt ledger truncation...", + ); + let difference = ledger_commit_progress - overall_commit_progress; + if crash_if_difference_is_too_large { + assert_le!(difference, MAX_COMMIT_PROGRESS_DIFFERENCE); } + // TODO(grao): Support truncation for split ledger DBs. + truncate_ledger_db(ledger_db, overall_commit_progress) + .expect("Failed to truncate ledger db."); if state_kv_commit_progress != overall_commit_progress { info!( state_kv_commit_progress = state_kv_commit_progress, - "Start truncation..." + "Start state KV truncation..." ); let difference = state_kv_commit_progress - overall_commit_progress; if crash_if_difference_is_too_large { @@ -1070,7 +1067,6 @@ impl StateStore { #[cfg(test)] pub fn get_all_jmt_nodes(&self) -> Result> { - // TODO(grao): Support sharding here. let mut iter = self .state_db .state_merkle_db @@ -1079,8 +1075,26 @@ impl StateStore { Default::default(), )?; iter.seek_to_first(); + let all_rows = iter.collect::>>()?; - Ok(all_rows.into_iter().map(|(k, _v)| k).collect()) + + let mut keys: Vec = + all_rows.into_iter().map(|(k, _v)| k).collect(); + if self.state_merkle_db.sharding_enabled() { + for i in 0..NUM_STATE_SHARDS as u8 { + let mut iter = self + .state_merkle_db + .db_shard(i) + .iter::( + Default::default(), + )?; + iter.seek_to_first(); + + let all_rows = iter.collect::>>()?; + keys.extend(all_rows.into_iter().map(|(k, _v)| k).collect::>()); + } + } + Ok(keys) } fn prepare_version_in_cache( diff --git a/storage/aptosdb/src/state_store/state_store_test.rs b/storage/aptosdb/src/state_store/state_store_test.rs index 2196ddc1cc534..b67643f3b7150 100644 --- a/storage/aptosdb/src/state_store/state_store_test.rs +++ b/storage/aptosdb/src/state_store/state_store_test.rs @@ -309,13 +309,13 @@ pub fn test_get_state_snapshot_before() { &Node::Null, ) .unwrap(); - db.state_merkle_db + db.state_merkle_db() .metadata_db() .write_schemas(batch) .unwrap(); assert_eq!( - db.state_merkle_db + db.state_merkle_db() .get_state_snapshot_version_before(4) .unwrap(), Some(2) @@ -325,13 +325,13 @@ pub fn test_get_state_snapshot_before() { batch .delete::(&NodeKey::new_empty_path(2)) .unwrap(); - db.state_merkle_db + db.state_merkle_db() .metadata_db() .write_schemas(batch) .unwrap(); assert_eq!( - db.state_merkle_db + db.state_merkle_db() .get_state_snapshot_version_before(4) .unwrap(), Some(0) diff --git a/storage/aptosdb/src/test_helper.rs b/storage/aptosdb/src/test_helper.rs index 9c4e534ff4edb..930ace9866b6e 100644 --- a/storage/aptosdb/src/test_helper.rs +++ b/storage/aptosdb/src/test_helper.rs @@ -233,11 +233,19 @@ pub fn arb_blocks_to_commit( pub fn arb_blocks_to_commit_with_block_nums( min_blocks: usize, max_blocks: usize, -) -> impl Strategy, LedgerInfoWithSignatures)>> { - arb_blocks_to_commit_impl( - 5, /* num_accounts */ - 2, /* max_user_txn_per_block */ - min_blocks, max_blocks, +) -> impl Strategy< + Value = ( + Vec<(Vec, LedgerInfoWithSignatures)>, + bool, + ), +> { + ( + arb_blocks_to_commit_impl( + 5, /* num_accounts */ + 2, /* max_user_txn_per_block */ + min_blocks, max_blocks, + ), + proptest::bool::ANY, ) } @@ -897,7 +905,7 @@ pub fn put_transaction_info(db: &AptosDB, version: Version, txn_info: &Transacti pub fn put_as_state_root(db: &AptosDB, version: Version, key: StateKey, value: StateValue) { let leaf_node = Node::new_leaf(key.hash(), value.hash(), (key.clone(), version)); - db.state_merkle_db + db.state_merkle_db() .metadata_db() .put::(&NodeKey::new_empty_path(version), &leaf_node) .unwrap(); diff --git a/storage/aptosdb/src/utils/truncation_helper.rs b/storage/aptosdb/src/utils/truncation_helper.rs index 2b40e9218ca24..f4565837f73c7 100644 --- a/storage/aptosdb/src/utils/truncation_helper.rs +++ b/storage/aptosdb/src/utils/truncation_helper.rs @@ -5,7 +5,7 @@ use crate::{ db_metadata::{DbMetadataKey, DbMetadataSchema, DbMetadataValue}, - ledger_db::LedgerDb, + ledger_db::{LedgerDb, LedgerDbSchemaBatches}, schema::{ epoch_by_version::EpochByVersionSchema, jellyfish_merkle_node::JellyfishMerkleNodeSchema, ledger_info::LedgerInfoSchema, stale_node_index::StaleNodeIndexSchema, @@ -17,11 +17,13 @@ use crate::{ }, state_kv_db::StateKvDb, state_merkle_db::StateMerkleDb, + state_store::MAX_COMMIT_PROGRESS_DIFFERENCE, utils::get_progress, EventStore, TransactionStore, NUM_STATE_SHARDS, }; use anyhow::Result; use aptos_jellyfish_merkle::{node_type::NodeKey, StaleNodeIndex}; +use aptos_logger::info; use aptos_schemadb::{ schema::{Schema, SeekKeyCodec}, ReadOptions, SchemaBatch, DB, @@ -61,34 +63,17 @@ pub(crate) fn get_state_merkle_commit_progress( ) } -pub(crate) fn truncate_ledger_db( - ledger_db: Arc, - current_version: Version, - target_version: Version, - batch_size: usize, -) -> Result<()> { - let status = StatusLine::new(Progress::new(target_version)); - +pub(crate) fn truncate_ledger_db(ledger_db: Arc, target_version: Version) -> Result<()> { let event_store = EventStore::new(ledger_db.event_db_arc()); let transaction_store = TransactionStore::new(Arc::clone(&ledger_db)); - let mut current_version = current_version; - while current_version > target_version { - let start_version = - std::cmp::max(current_version - batch_size as u64 + 1, target_version + 1); - let end_version = current_version + 1; - // TODO(grao): Support splitted ledger DBs here. - truncate_ledger_db_single_batch( - ledger_db.metadata_db(), - &event_store, - &transaction_store, - start_version, - end_version, - )?; - current_version = start_version - 1; - status.set_current_version(current_version); - } - assert_eq!(current_version, target_version); + let start_version = target_version + 1; + truncate_ledger_db_single_batch( + ledger_db.clone(), + &event_store, + &transaction_store, + start_version, + )?; Ok(()) } @@ -232,22 +217,19 @@ pub(crate) fn num_frozen_nodes_in_accumulator(num_leaves: u64) -> u64 { } fn truncate_transaction_accumulator( - ledger_db: &DB, + transaction_accumulator_db: &DB, start_version: Version, - end_version: Version, batch: &SchemaBatch, ) -> Result<()> { - let num_frozen_nodes = num_frozen_nodes_in_accumulator(end_version); - let mut iter = ledger_db.iter::(ReadOptions::default())?; + let mut iter = + transaction_accumulator_db.iter::(ReadOptions::default())?; iter.seek_to_last(); let (position, _) = iter.next().transpose()?.unwrap(); - assert_eq!(position.to_postorder_index() + 1, num_frozen_nodes); - - let num_frozen_nodes_after_this_batch = num_frozen_nodes_in_accumulator(start_version); + let num_frozen_nodes = position.to_postorder_index() + 1; + let num_frozen_nodes_after = num_frozen_nodes_in_accumulator(start_version); + let mut num_nodes_to_delete = num_frozen_nodes - num_frozen_nodes_after; - let mut num_nodes_to_delete = num_frozen_nodes - num_frozen_nodes_after_this_batch; - - let start_position = Position::from_postorder_index(num_frozen_nodes_after_this_batch)?; + let start_position = Position::from_postorder_index(num_frozen_nodes_after)?; iter.seek(&start_position)?; for item in iter { @@ -262,40 +244,61 @@ fn truncate_transaction_accumulator( } fn truncate_ledger_db_single_batch( - ledger_db: &DB, + ledger_db: Arc, event_store: &EventStore, transaction_store: &TransactionStore, start_version: Version, - end_version: Version, ) -> Result<()> { - let batch = SchemaBatch::new(); + let batch = LedgerDbSchemaBatches::new(); - delete_transaction_index_data(transaction_store, start_version, end_version, &batch)?; - delete_per_epoch_data(ledger_db, start_version, end_version, &batch)?; - delete_per_version_data(start_version, end_version, &batch)?; + delete_transaction_index_data( + transaction_store, + start_version, + &batch.transaction_db_batches, + )?; + delete_per_epoch_data( + ledger_db.metadata_db(), + start_version, + &batch.ledger_metadata_db_batches, + )?; + delete_per_version_data(&ledger_db, start_version, &batch)?; - event_store.prune_events(start_version, end_version, &batch)?; + delete_event_data(event_store, start_version, &batch.event_db_batches)?; - truncate_transaction_accumulator(ledger_db, start_version, end_version, &batch)?; + truncate_transaction_accumulator( + ledger_db.transaction_accumulator_db(), + start_version, + &batch.transaction_accumulator_db_batches, + )?; - batch.put::( + let progress_batch = SchemaBatch::new(); + progress_batch.put::( &DbMetadataKey::LedgerCommitProgress, &DbMetadataValue::Version(start_version - 1), )?; + ledger_db.metadata_db().write_schemas(progress_batch)?; + ledger_db.write_schemas(batch) } fn delete_transaction_index_data( transaction_store: &TransactionStore, start_version: Version, - end_version: Version, batch: &SchemaBatch, ) -> Result<()> { let transactions = transaction_store - .get_transaction_iter(start_version, (end_version - start_version) as usize)? + .get_transaction_iter(start_version, MAX_COMMIT_PROGRESS_DIFFERENCE as usize * 2)? .collect::>>()?; - transaction_store.prune_transaction_by_account(&transactions, batch)?; - transaction_store.prune_transaction_by_hash(&transactions, batch)?; + let num_txns = transactions.len(); + if num_txns > 0 { + info!( + start_version = start_version, + latest_version = start_version + num_txns as u64 - 1, + "Truncate transaction index data." + ); + transaction_store.prune_transaction_by_account(&transactions, batch)?; + transaction_store.prune_transaction_by_hash(&transactions, batch)?; + } Ok(()) } @@ -303,15 +306,18 @@ fn delete_transaction_index_data( fn delete_per_epoch_data( ledger_db: &DB, start_version: Version, - end_version: Version, batch: &SchemaBatch, ) -> Result<()> { let mut iter = ledger_db.iter::(ReadOptions::default())?; iter.seek_to_last(); if let Some((epoch, ledger_info)) = iter.next().transpose()? { let version = ledger_info.commit_info().version(); - assert_lt!(version, end_version); if version >= start_version { + info!( + version = version, + epoch = epoch, + "Truncate latest epoch data." + ); batch.delete::(&epoch)?; } } @@ -321,7 +327,11 @@ fn delete_per_epoch_data( for item in iter { let (version, epoch) = item?; - assert_lt!(version, end_version); + info!( + version = version, + epoch = epoch, + "Truncate epoch ending data." + ); batch.delete::(&version)?; batch.delete::(&epoch)?; } @@ -330,17 +340,75 @@ fn delete_per_epoch_data( } fn delete_per_version_data( + ledger_db: &LedgerDb, start_version: Version, - end_version: Version, - batch: &SchemaBatch, + batch: &LedgerDbSchemaBatches, ) -> Result<()> { - for version in start_version..end_version { - batch.delete::(&version)?; - batch.delete::(&version)?; - batch.delete::(&version)?; - batch.delete::(&version)?; + delete_per_version_data_impl::( + ledger_db.transaction_info_db(), + start_version, + &batch.transaction_info_db_batches, + )?; + delete_per_version_data_impl::( + ledger_db.transaction_db(), + start_version, + &batch.transaction_db_batches, + )?; + delete_per_version_data_impl::( + ledger_db.metadata_db(), + start_version, + &batch.ledger_metadata_db_batches, + )?; + delete_per_version_data_impl::( + ledger_db.write_set_db(), + start_version, + &batch.write_set_db_batches, + )?; + + Ok(()) +} + +fn delete_per_version_data_impl( + ledger_db: &DB, + start_version: Version, + batch: &SchemaBatch, +) -> Result<()> +where + S: Schema, +{ + let mut iter = ledger_db.iter::(ReadOptions::default())?; + iter.seek_to_last(); + if let Some((lastest_version, _)) = iter.next().transpose()? { + if lastest_version >= start_version { + info!( + start_version = start_version, + latest_version = lastest_version, + cf_name = S::COLUMN_FAMILY_NAME, + "Truncate per version data." + ); + for version in start_version..=lastest_version { + batch.delete::(&version)?; + } + } } + Ok(()) +} +fn delete_event_data( + event_store: &EventStore, + start_version: Version, + batch: &SchemaBatch, +) -> Result<()> { + if let Some(latest_version) = event_store.latest_version()? { + if latest_version >= start_version { + info!( + start_version = start_version, + latest_version = latest_version, + "Truncate event data." + ); + event_store.prune_events(start_version, latest_version + 1, batch)?; + } + } Ok(()) } diff --git a/storage/backup/backup-cli/src/metadata/cache.rs b/storage/backup/backup-cli/src/metadata/cache.rs index aac3f136a9b3a..64fc9259e81cb 100644 --- a/storage/backup/backup-cli/src/metadata/cache.rs +++ b/storage/backup/backup-cli/src/metadata/cache.rs @@ -14,7 +14,6 @@ use aptos_temppath::TempPath; use async_trait::async_trait; use clap::Parser; use futures::stream::poll_fn; -use once_cell::sync::Lazy; use std::{ collections::{HashMap, HashSet}, path::{Path, PathBuf}, @@ -27,13 +26,6 @@ use tokio::{ }; use tokio_stream::StreamExt; -static TEMP_METADATA_CACHE_DIR: Lazy = Lazy::new(|| { - let dir = TempPath::new(); - dir.create_as_dir() - .expect("Temp metadata dir should create."); - dir -}); - #[derive(Clone, Parser)] pub struct MetadataCacheOpt { #[clap( @@ -60,7 +52,7 @@ impl MetadataCacheOpt { pub(crate) fn cache_dir(&self) -> PathBuf { self.dir .clone() - .unwrap_or_else(|| TEMP_METADATA_CACHE_DIR.path().to_path_buf()) + .unwrap_or_else(|| TempPath::new().path().to_path_buf()) .join(Self::SUB_DIR) } } diff --git a/storage/indexer/Cargo.toml b/storage/indexer/Cargo.toml index 5440217d5ec75..366d188e5711d 100644 --- a/storage/indexer/Cargo.toml +++ b/storage/indexer/Cargo.toml @@ -30,6 +30,7 @@ aptos-types = { workspace = true } aptos-vm = { workspace = true } bcs = { workspace = true } byteorder = { workspace = true } +bytes = { workspace = true } move-core-types = { workspace = true } move-resource-viewer = { workspace = true } num-derive = { workspace = true } diff --git a/storage/indexer/src/lib.rs b/storage/indexer/src/lib.rs index 5260177da34cd..426d04786f4f3 100644 --- a/storage/indexer/src/lib.rs +++ b/storage/indexer/src/lib.rs @@ -28,10 +28,12 @@ use aptos_types::{ transaction::{AtomicVersion, Version}, write_set::{WriteOp, WriteSet}, }; -use aptos_vm::data_cache::{AsMoveResolver, StorageAdapter}; +use aptos_vm::data_cache::AsMoveResolver; +use bytes::Bytes; use move_core_types::{ ident_str, language_storage::{StructTag, TypeTag}, + resolver::MoveResolver, }; use move_resource_viewer::{AnnotatedMoveValue, MoveValueAnnotator}; use std::{ @@ -39,7 +41,6 @@ use std::{ convert::TryInto, sync::{atomic::Ordering, Arc}, }; - #[derive(Debug)] pub struct Indexer { db: DB, @@ -86,9 +87,9 @@ impl Indexer { self.index_with_annotator(&annotator, first_version, write_sets) } - pub fn index_with_annotator( + pub fn index_with_annotator( &self, - annotator: &MoveValueAnnotator>, + annotator: &MoveValueAnnotator, first_version: Version, write_sets: &[&WriteSet], ) -> Result<()> { @@ -150,18 +151,15 @@ impl Indexer { } } -struct TableInfoParser<'a> { +struct TableInfoParser<'a, R> { indexer: &'a Indexer, - annotator: &'a MoveValueAnnotator<'a, StorageAdapter<'a, DbStateView>>, + annotator: &'a MoveValueAnnotator<'a, R>, result: HashMap, - pending_on: HashMap>, + pending_on: HashMap>, } -impl<'a> TableInfoParser<'a> { - pub fn new( - indexer: &'a Indexer, - annotator: &'a MoveValueAnnotator>, - ) -> Self { +impl<'a, R: MoveResolver> TableInfoParser<'a, R> { + pub fn new(indexer: &'a Indexer, annotator: &'a MoveValueAnnotator) -> Self { Self { indexer, annotator, @@ -188,7 +186,7 @@ impl<'a> TableInfoParser<'a> { Ok(()) } - fn parse_struct(&mut self, struct_tag: StructTag, bytes: &[u8]) -> Result<()> { + fn parse_struct(&mut self, struct_tag: StructTag, bytes: &Bytes) -> Result<()> { self.parse_move_value( &self .annotator @@ -196,8 +194,8 @@ impl<'a> TableInfoParser<'a> { ) } - fn parse_resource_group(&mut self, bytes: &[u8]) -> Result<()> { - type ResourceGroup = BTreeMap>; + fn parse_resource_group(&mut self, bytes: &Bytes) -> Result<()> { + type ResourceGroup = BTreeMap; for (struct_tag, bytes) in bcs::from_bytes::(bytes)? { self.parse_struct(struct_tag, &bytes)?; @@ -205,7 +203,7 @@ impl<'a> TableInfoParser<'a> { Ok(()) } - fn parse_table_item(&mut self, handle: TableHandle, bytes: &'a [u8]) -> Result<()> { + fn parse_table_item(&mut self, handle: TableHandle, bytes: &Bytes) -> Result<()> { match self.get_table_info(handle)? { Some(table_info) => { self.parse_move_value(&self.annotator.view_value(&table_info.value_type, bytes)?)?; @@ -214,7 +212,7 @@ impl<'a> TableInfoParser<'a> { self.pending_on .entry(handle) .or_insert_with(Vec::new) - .push(bytes); + .push(bytes.clone()); }, } Ok(()) @@ -269,7 +267,7 @@ impl<'a> TableInfoParser<'a> { self.result.insert(handle, info); if let Some(pending_items) = self.pending_on.remove(&handle) { for bytes in pending_items { - self.parse_table_item(handle, bytes)?; + self.parse_table_item(handle, &bytes)?; } } } diff --git a/storage/scratchpad/benches/sparse_merkle.rs b/storage/scratchpad/benches/sparse_merkle.rs index f28f50c8af10a..3789a693d3d04 100644 --- a/storage/scratchpad/benches/sparse_merkle.rs +++ b/storage/scratchpad/benches/sparse_merkle.rs @@ -179,7 +179,7 @@ impl Benches { None } else { let bytes: Vec = rng.sample_iter::(Standard).take(100).collect(); - Some(StateValue::new_legacy(bytes)) + Some(StateValue::new_legacy(bytes.into())) } } diff --git a/storage/state-view/Cargo.toml b/storage/state-view/Cargo.toml index 7bf40c3a6a777..9a6528c04ba3c 100644 --- a/storage/state-view/Cargo.toml +++ b/storage/state-view/Cargo.toml @@ -17,6 +17,7 @@ anyhow = { workspace = true } aptos-crypto = { workspace = true } aptos-types = { workspace = true } bcs = { workspace = true } +bytes = { workspace = true } serde = { workspace = true } serde_bytes = { workspace = true } serde_json = { workspace = true } diff --git a/storage/state-view/src/account_with_state_cache.rs b/storage/state-view/src/account_with_state_cache.rs index 85043a6f9950b..93306e4fb45a8 100644 --- a/storage/state-view/src/account_with_state_cache.rs +++ b/storage/state-view/src/account_with_state_cache.rs @@ -5,8 +5,8 @@ use aptos_types::{ account_view::AccountView, state_store::{state_key::StateKey, state_value::StateValue}, }; +use bytes::Bytes; use std::collections::HashMap; - pub struct AccountWithStateCache<'a> { account_address: &'a AccountAddress, state_cache: &'a HashMap, @@ -25,8 +25,11 @@ impl<'a> AccountWithStateCache<'a> { } impl<'a> AccountView for AccountWithStateCache<'a> { - fn get_state_value(&self, state_key: &StateKey) -> anyhow::Result>> { - Ok(self.state_cache.get(state_key).map(|x| x.bytes().to_vec())) + fn get_state_value(&self, state_key: &StateKey) -> anyhow::Result> { + Ok(self + .state_cache + .get(state_key) + .map(|val| val.bytes().clone())) } fn get_account_address(&self) -> anyhow::Result> { diff --git a/storage/state-view/src/account_with_state_view.rs b/storage/state-view/src/account_with_state_view.rs index 535c76b4c4f64..fb8e07e1f5b69 100644 --- a/storage/state-view/src/account_with_state_view.rs +++ b/storage/state-view/src/account_with_state_view.rs @@ -4,7 +4,7 @@ use crate::StateView; use aptos_types::{ account_address::AccountAddress, account_view::AccountView, state_store::state_key::StateKey, }; - +use bytes::Bytes; pub struct AccountWithStateView<'a> { account_address: &'a AccountAddress, state_view: &'a dyn StateView, @@ -20,7 +20,7 @@ impl<'a> AccountWithStateView<'a> { } impl<'a> AccountView for AccountWithStateView<'a> { - fn get_state_value(&self, state_key: &StateKey) -> anyhow::Result>> { + fn get_state_value(&self, state_key: &StateKey) -> anyhow::Result> { self.state_view.get_state_value_bytes(state_key) } diff --git a/storage/state-view/src/lib.rs b/storage/state-view/src/lib.rs index 407fce8ca11ae..4967a8a16c86e 100644 --- a/storage/state-view/src/lib.rs +++ b/storage/state-view/src/lib.rs @@ -19,8 +19,8 @@ use aptos_types::{ }, transaction::Version, }; +use bytes::Bytes; use std::ops::Deref; - pub mod account_with_state_cache; pub mod account_with_state_view; pub mod in_memory_state_view; @@ -36,18 +36,10 @@ pub trait TStateView { StateViewId::Miscellaneous } - /// Tries to interpret the state value as u128. - fn get_state_value_u128(&self, state_key: &Self::Key) -> Result> { - match self.get_state_value_bytes(state_key)? { - Some(bytes) => Ok(Some(bcs::from_bytes(&bytes)?)), - None => Ok(None), - } - } - /// Gets the state value bytes for a given state key. - fn get_state_value_bytes(&self, state_key: &Self::Key) -> Result>> { + fn get_state_value_bytes(&self, state_key: &Self::Key) -> Result> { let val_opt = self.get_state_value(state_key)?; - Ok(val_opt.map(|val| val.into_bytes())) + Ok(val_opt.map(|val| val.bytes().clone())) } /// Gets the state value for a given state key. diff --git a/storage/storage-interface/Cargo.toml b/storage/storage-interface/Cargo.toml index 71a54b9b7ffd2..216b5e1c23dee 100644 --- a/storage/storage-interface/Cargo.toml +++ b/storage/storage-interface/Cargo.toml @@ -24,6 +24,7 @@ aptos-types = { workspace = true } aptos-vm = { workspace = true } arr_macro = { workspace = true } bcs = { workspace = true } +bytes = { workspace = true } crossbeam-channel = { workspace = true } dashmap = { workspace = true } itertools = { workspace = true } diff --git a/storage/storage-interface/src/lib.rs b/storage/storage-interface/src/lib.rs index 552fcf3105cad..7a328c2054631 100644 --- a/storage/storage-interface/src/lib.rs +++ b/storage/storage-interface/src/lib.rs @@ -100,204 +100,348 @@ pub enum Order { Descending, } +macro_rules! delegate_read { + ($( + $(#[$($attr:meta)*])* + fn $name:ident(&self $(, $arg: ident : $ty: ty $(,)?)*) -> $return_type:ty; + )+) => { + $( + $(#[$($attr)*])* + fn $name(&self, $($arg: $ty),*) -> $return_type { + self.get_read_delegatee().$name($($arg),*) + } + )+ + }; +} + /// Trait that is implemented by a DB that supports certain public (to client) read APIs /// expected of an Aptos DB #[allow(unused_variables)] pub trait DbReader: Send + Sync { - /// See [AptosDB::get_epoch_ending_ledger_infos]. - /// - /// [AptosDB::get_epoch_ending_ledger_infos]: - /// ../aptosdb/struct.AptosDB.html#method.get_epoch_ending_ledger_infos - fn get_epoch_ending_ledger_infos( - &self, - start_epoch: u64, - end_epoch: u64, - ) -> Result { - unimplemented!() - } - - /// See [AptosDB::get_transactions]. - /// - /// [AptosDB::get_transactions]: ../aptosdb/struct.AptosDB.html#method.get_transactions - fn get_transactions( - &self, - start_version: Version, - batch_size: u64, - ledger_version: Version, - fetch_events: bool, - ) -> Result { - unimplemented!() - } - - /// See [AptosDB::get_transaction_by_hash]. - /// - /// [AptosDB::get_transaction_by_hash]: ../aptosdb/struct.AptosDB.html#method.get_transaction_by_hash - fn get_transaction_by_hash( - &self, - hash: HashValue, - ledger_version: Version, - fetch_events: bool, - ) -> Result> { - unimplemented!() - } - - /// See [AptosDB::get_transaction_by_version]. - /// - /// [AptosDB::get_transaction_by_version]: ../aptosdb/struct.AptosDB.html#method.get_transaction_by_version - fn get_transaction_by_version( - &self, - version: Version, - ledger_version: Version, - fetch_events: bool, - ) -> Result { - unimplemented!() - } - - /// See [AptosDB::get_first_txn_version]. - /// - /// [AptosDB::get_first_txn_version]: ../aptosdb/struct.AptosDB.html#method.get_first_txn_version - fn get_first_txn_version(&self) -> Result> { - unimplemented!() - } - - /// See [AptosDB::get_first_viable_txn_version]. - /// - /// [AptosDB::get_first_viable_txn_version]: ../aptosdb/struct.AptosDB.html#method.get_first_viable_txn_version - fn get_first_viable_txn_version(&self) -> Result { - unimplemented!() - } - - /// See [AptosDB::get_first_write_set_version]. - /// - /// [AptosDB::get_first_write_set_version]: ../aptosdb/struct.AptosDB.html#method.get_first_write_set_version - fn get_first_write_set_version(&self) -> Result> { - unimplemented!() - } - - /// See [AptosDB::get_transaction_outputs]. - /// - /// [AptosDB::get_transaction_outputs]: ../aptosdb/struct.AptosDB.html#method.get_transaction_outputs - fn get_transaction_outputs( - &self, - start_version: Version, - limit: u64, - ledger_version: Version, - ) -> Result { - unimplemented!() - } - - /// Returns events by given event key - fn get_events( - &self, - event_key: &EventKey, - start: u64, - order: Order, - limit: u64, - ledger_version: Version, - ) -> Result> { - unimplemented!() - } - - fn get_transaction_iterator( - &self, - start_version: Version, - limit: u64, - ) -> Result> + '_>> { - unimplemented!() - } - - fn get_transaction_info_iterator( - &self, - start_version: Version, - limit: u64, - ) -> Result> + '_>> { - unimplemented!() - } - - fn get_events_iterator( - &self, - start_version: Version, - limit: u64, - ) -> Result>> + '_>> { - unimplemented!() - } - - fn get_write_set_iterator( - &self, - start_version: Version, - limit: u64, - ) -> Result> + '_>> { - unimplemented!() - } - - fn get_transaction_accumulator_range_proof( - &self, - start_version: Version, - limit: u64, - ledger_version: Version, - ) -> Result { - unimplemented!() - } - - /// See [AptosDB::get_block_timestamp]. - /// - /// [AptosDB::get_block_timestamp]: - /// ../aptosdb/struct.AptosDB.html#method.get_block_timestamp - fn get_block_timestamp(&self, version: Version) -> Result { - unimplemented!() - } - - fn get_next_block_event(&self, version: Version) -> Result<(Version, NewBlockEvent)> { - unimplemented!() - } - - /// Returns the start_version, end_version and NewBlockEvent of the block containing the input - /// transaction version. - fn get_block_info_by_version( - &self, - version: Version, - ) -> Result<(Version, Version, NewBlockEvent)> { - unimplemented!() - } - - /// Returns the start_version, end_version and NewBlockEvent of the block containing the input - /// transaction version. - fn get_block_info_by_height(&self, height: u64) -> Result<(Version, Version, NewBlockEvent)> { - unimplemented!() - } - - /// Gets the version of the last transaction committed before timestamp, - /// a committed block at or after the required timestamp must exist (otherwise it's possible - /// the next block committed as a timestamp smaller than the one in the request). - fn get_last_version_before_timestamp( - &self, - _timestamp: u64, - _ledger_version: Version, - ) -> Result { - unimplemented!() - } - - /// Gets the latest epoch state currently held in storage. - fn get_latest_epoch_state(&self) -> Result { - unimplemented!() - } - - /// Returns the (key, value) iterator for a particular state key prefix at at desired version. This - /// API can be used to get all resources of an account by passing the account address as the - /// key prefix. - fn get_prefixed_state_value_iterator( - &self, - key_prefix: &StateKeyPrefix, - cursor: Option<&StateKey>, - version: Version, - ) -> Result> + '_>> { - unimplemented!() - } - - /// Returns the latest ledger info, if any. - fn get_latest_ledger_info_option(&self) -> Result> { - unimplemented!() - } + fn get_read_delegatee(&self) -> &dyn DbReader { + unimplemented!("Implement desired method or get_delegatee()."); + } + + delegate_read!( + /// See [AptosDB::get_epoch_ending_ledger_infos]. + /// + /// [AptosDB::get_epoch_ending_ledger_infos]: + /// ../aptosdb/struct.AptosDB.html#method.get_epoch_ending_ledger_infos + fn get_epoch_ending_ledger_infos( + &self, + start_epoch: u64, + end_epoch: u64, + ) -> Result; + + /// See [AptosDB::get_transactions]. + /// + /// [AptosDB::get_transactions]: ../aptosdb/struct.AptosDB.html#method.get_transactions + fn get_transactions( + &self, + start_version: Version, + batch_size: u64, + ledger_version: Version, + fetch_events: bool, + ) -> Result; + + /// See [AptosDB::get_transaction_by_hash]. + /// + /// [AptosDB::get_transaction_by_hash]: ../aptosdb/struct.AptosDB.html#method.get_transaction_by_hash + fn get_transaction_by_hash( + &self, + hash: HashValue, + ledger_version: Version, + fetch_events: bool, + ) -> Result>; + + /// See [AptosDB::get_transaction_by_version]. + /// + /// [AptosDB::get_transaction_by_version]: ../aptosdb/struct.AptosDB.html#method.get_transaction_by_version + fn get_transaction_by_version( + &self, + version: Version, + ledger_version: Version, + fetch_events: bool, + ) -> Result; + + /// See [AptosDB::get_first_txn_version]. + /// + /// [AptosDB::get_first_txn_version]: ../aptosdb/struct.AptosDB.html#method.get_first_txn_version + fn get_first_txn_version(&self) -> Result>; + + /// See [AptosDB::get_first_viable_txn_version]. + /// + /// [AptosDB::get_first_viable_txn_version]: ../aptosdb/struct.AptosDB.html#method.get_first_viable_txn_version + fn get_first_viable_txn_version(&self) -> Result; + + /// See [AptosDB::get_first_write_set_version]. + /// + /// [AptosDB::get_first_write_set_version]: ../aptosdb/struct.AptosDB.html#method.get_first_write_set_version + fn get_first_write_set_version(&self) -> Result>; + + /// See [AptosDB::get_transaction_outputs]. + /// + /// [AptosDB::get_transaction_outputs]: ../aptosdb/struct.AptosDB.html#method.get_transaction_outputs + fn get_transaction_outputs( + &self, + start_version: Version, + limit: u64, + ledger_version: Version, + ) -> Result; + + /// Returns events by given event key + fn get_events( + &self, + event_key: &EventKey, + start: u64, + order: Order, + limit: u64, + ledger_version: Version, + ) -> Result>; + + fn get_transaction_iterator( + &self, + start_version: Version, + limit: u64, + ) -> Result> + '_>>; + + fn get_transaction_info_iterator( + &self, + start_version: Version, + limit: u64, + ) -> Result> + '_>>; + + fn get_events_iterator( + &self, + start_version: Version, + limit: u64, + ) -> Result>> + '_>>; + + fn get_write_set_iterator( + &self, + start_version: Version, + limit: u64, + ) -> Result> + '_>>; + + fn get_transaction_accumulator_range_proof( + &self, + start_version: Version, + limit: u64, + ledger_version: Version, + ) -> Result; + + /// See [AptosDB::get_block_timestamp]. + /// + /// [AptosDB::get_block_timestamp]: + /// ../aptosdb/struct.AptosDB.html#method.get_block_timestamp + fn get_block_timestamp(&self, version: Version) -> Result; + + fn get_next_block_event(&self, version: Version) -> Result<(Version, NewBlockEvent)>; + + /// Returns the start_version, end_version and NewBlockEvent of the block containing the input + /// transaction version. + fn get_block_info_by_version( + &self, + version: Version, + ) -> Result<(Version, Version, NewBlockEvent)>; + + /// Returns the start_version, end_version and NewBlockEvent of the block containing the input + /// transaction version. + fn get_block_info_by_height( + &self, + height: u64, + ) -> Result<(Version, Version, NewBlockEvent)>; + + /// Gets the version of the last transaction committed before timestamp, + /// a committed block at or after the required timestamp must exist (otherwise it's possible + /// the next block committed as a timestamp smaller than the one in the request). + fn get_last_version_before_timestamp( + &self, + _timestamp: u64, + _ledger_version: Version, + ) -> Result; + + /// Gets the latest epoch state currently held in storage. + fn get_latest_epoch_state(&self) -> Result; + + /// Returns the (key, value) iterator for a particular state key prefix at at desired version. This + /// API can be used to get all resources of an account by passing the account address as the + /// key prefix. + fn get_prefixed_state_value_iterator( + &self, + key_prefix: &StateKeyPrefix, + cursor: Option<&StateKey>, + version: Version, + ) -> Result> + '_>>; + + /// Returns the latest ledger info, if any. + fn get_latest_ledger_info_option(&self) -> Result>; + + /// Returns the latest committed version, error on on non-bootstrapped/empty DB. + fn get_latest_version(&self) -> Result; + + /// Returns the latest state checkpoint version if any. + fn get_latest_state_checkpoint_version(&self) -> Result>; + + /// Returns the latest state snapshot strictly before `next_version` if any. + fn get_state_snapshot_before( + &self, + next_version: Version, + ) -> Result>; + + /// Returns a transaction that is the `seq_num`-th one associated with the given account. If + /// the transaction with given `seq_num` doesn't exist, returns `None`. + fn get_account_transaction( + &self, + address: AccountAddress, + seq_num: u64, + include_events: bool, + ledger_version: Version, + ) -> Result>; + + /// Returns the list of transactions sent by an account with `address` starting + /// at sequence number `seq_num`. Will return no more than `limit` transactions. + /// Will ignore transactions with `txn.version > ledger_version`. Optionally + /// fetch events for each transaction when `fetch_events` is `true`. + fn get_account_transactions( + &self, + address: AccountAddress, + seq_num: u64, + limit: u64, + include_events: bool, + ledger_version: Version, + ) -> Result; + + /// Returns proof of new state for a given ledger info with signatures relative to version known + /// to client + fn get_state_proof_with_ledger_info( + &self, + known_version: u64, + ledger_info: LedgerInfoWithSignatures, + ) -> Result; + + /// Returns proof of new state relative to version known to client + fn get_state_proof(&self, known_version: u64) -> Result; + + /// Gets the state value by state key at version. + /// See [AptosDB::get_state_value_by_version]. + /// + /// [AptosDB::get_state_value_by_version]: + /// ../aptosdb/struct.AptosDB.html#method.get_state_value_by_version + fn get_state_value_by_version( + &self, + state_key: &StateKey, + version: Version, + ) -> Result>; + + /// Get the latest state value and its corresponding version when it's of the given key up + /// to the given version. + /// See [AptosDB::get_state_value_with_version_by_version]. + /// + /// [AptosDB::get_state_value_with_version_by_version]: + /// ../aptosdb/struct.AptosDB.html#method.get_state_value_with_version_by_version + fn get_state_value_with_version_by_version( + &self, + state_key: &StateKey, + version: Version, + ) -> Result>; + + /// Returns the proof of the given state key and version. + fn get_state_proof_by_version_ext( + &self, + state_key: &StateKey, + version: Version, + ) -> Result; + + /// Gets a state value by state key along with the proof, out of the ledger state indicated by the state + /// Merkle tree root with a sparse merkle proof proving state tree root. + /// See [AptosDB::get_account_state_with_proof_by_version]. + /// + /// [AptosDB::get_account_state_with_proof_by_version]: + /// ../aptosdb/struct.AptosDB.html#method.get_account_state_with_proof_by_version + /// + /// This is used by aptos core (executor) internally. + fn get_state_value_with_proof_by_version_ext( + &self, + state_key: &StateKey, + version: Version, + ) -> Result<(Option, SparseMerkleProofExt)>; + + /// Gets the latest ExecutedTrees no matter if db has been bootstrapped. + /// Used by the Db-bootstrapper. + fn get_latest_executed_trees(&self) -> Result; + + /// Get the ledger info of the epoch that `known_version` belongs to. + fn get_epoch_ending_ledger_info( + &self, + known_version: u64, + ) -> Result; + + /// Gets the transaction accumulator root hash at specified version. + /// Caller must guarantee the version is not greater than the latest version. + fn get_accumulator_root_hash(&self, _version: Version) -> Result; + + /// Gets an [`AccumulatorConsistencyProof`] starting from `client_known_version` + /// (or pre-genesis if `None`) until `ledger_version`. + /// + /// In other words, if the client has an accumulator summary for + /// `client_known_version`, they can use the result from this API to efficiently + /// extend their accumulator to `ledger_version` and prove that the new accumulator + /// is consistent with their old accumulator. By consistent, we mean that by + /// appending the actual `ledger_version - client_known_version` transactions + /// to the old accumulator summary you get the new accumulator summary. + /// + /// If the client is starting up for the first time and has no accumulator + /// summary yet, they can call this with `client_known_version=None`, i.e., + /// pre-genesis, to get the complete accumulator summary up to `ledger_version`. + fn get_accumulator_consistency_proof( + &self, + _client_known_version: Option, + _ledger_version: Version, + ) -> Result; + + /// A convenience function for building a [`TransactionAccumulatorSummary`] + /// at the given `ledger_version`. + /// + /// Note: this is roughly equivalent to calling + /// `DbReader::get_accumulator_consistency_proof(None, ledger_version)`. + fn get_accumulator_summary( + &self, + ledger_version: Version, + ) -> Result; + + /// Returns total number of leaves in state store at given version. + fn get_state_leaf_count(&self, version: Version) -> Result; + + /// Get a chunk of state store value, addressed by the index. + fn get_state_value_chunk_with_proof( + &self, + version: Version, + start_idx: usize, + chunk_size: usize, + ) -> Result; + + /// Returns if the state store pruner is enabled. + fn is_state_merkle_pruner_enabled(&self) -> Result; + + /// Get the state prune window config value. + fn get_epoch_snapshot_prune_window(&self) -> Result; + + /// Returns if the ledger pruner is enabled. + fn is_ledger_pruner_enabled(&self) -> Result; + + /// Get the ledger prune window config value. + fn get_ledger_prune_window(&self) -> Result; + + /// Get table info from the internal indexer. + fn get_table_info(&self, handle: TableHandle) -> Result; + + /// Returns whether the internal indexer DB has been enabled or not + fn indexer_enabled(&self) -> bool; + + /// Returns state storage usage at the end of an epoch. + fn get_state_storage_usage(&self, version: Option) -> Result; + ); // end delegated /// Returns the latest ledger info. fn get_latest_ledger_info(&self) -> Result { @@ -305,24 +449,6 @@ pub trait DbReader: Send + Sync { .and_then(|opt| opt.ok_or_else(|| format_err!("Latest LedgerInfo not found."))) } - /// Returns the latest committed version, error on on non-bootstrapped/empty DB. - fn get_latest_version(&self) -> Result { - unimplemented!() - } - - /// Returns the latest state checkpoint version if any. - fn get_latest_state_checkpoint_version(&self) -> Result> { - unimplemented!() - } - - /// Returns the latest state snapshot strictly before `next_version` if any. - fn get_state_snapshot_before( - &self, - next_version: Version, - ) -> Result> { - unimplemented!() - } - /// Returns the latest version and committed block timestamp fn get_latest_commit_metadata(&self) -> Result<(Version, u64)> { let ledger_info_with_sig = self.get_latest_ledger_info()?; @@ -330,100 +456,6 @@ pub trait DbReader: Send + Sync { Ok((ledger_info.version(), ledger_info.timestamp_usecs())) } - /// Returns a transaction that is the `seq_num`-th one associated with the given account. If - /// the transaction with given `seq_num` doesn't exist, returns `None`. - fn get_account_transaction( - &self, - address: AccountAddress, - seq_num: u64, - include_events: bool, - ledger_version: Version, - ) -> Result> { - unimplemented!() - } - - /// Returns the list of transactions sent by an account with `address` starting - /// at sequence number `seq_num`. Will return no more than `limit` transactions. - /// Will ignore transactions with `txn.version > ledger_version`. Optionally - /// fetch events for each transaction when `fetch_events` is `true`. - fn get_account_transactions( - &self, - address: AccountAddress, - seq_num: u64, - limit: u64, - include_events: bool, - ledger_version: Version, - ) -> Result { - unimplemented!() - } - - /// Returns proof of new state for a given ledger info with signatures relative to version known - /// to client - fn get_state_proof_with_ledger_info( - &self, - known_version: u64, - ledger_info: LedgerInfoWithSignatures, - ) -> Result { - unimplemented!() - } - - /// Returns proof of new state relative to version known to client - fn get_state_proof(&self, known_version: u64) -> Result { - unimplemented!() - } - - /// Gets the state value by state key at version. - /// See [AptosDB::get_state_value_by_version]. - /// - /// [AptosDB::get_state_value_by_version]: - /// ../aptosdb/struct.AptosDB.html#method.get_state_value_by_version - fn get_state_value_by_version( - &self, - state_key: &StateKey, - version: Version, - ) -> Result> { - unimplemented!() - } - - /// Get the latest state value and its corresponding version when it's of the given key up - /// to the given version. - /// See [AptosDB::get_state_value_with_version_by_version]. - /// - /// [AptosDB::get_state_value_with_version_by_version]: - /// ../aptosdb/struct.AptosDB.html#method.get_state_value_with_version_by_version - fn get_state_value_with_version_by_version( - &self, - state_key: &StateKey, - version: Version, - ) -> Result> { - unimplemented!() - } - - /// Returns the proof of the given state key and version. - fn get_state_proof_by_version_ext( - &self, - state_key: &StateKey, - version: Version, - ) -> Result { - unimplemented!() - } - - /// Gets a state value by state key along with the proof, out of the ledger state indicated by the state - /// Merkle tree root with a sparse merkle proof proving state tree root. - /// See [AptosDB::get_account_state_with_proof_by_version]. - /// - /// [AptosDB::get_account_state_with_proof_by_version]: - /// ../aptosdb/struct.AptosDB.html#method.get_account_state_with_proof_by_version - /// - /// This is used by aptos core (executor) internally. - fn get_state_value_with_proof_by_version_ext( - &self, - state_key: &StateKey, - version: Version, - ) -> Result<(Option, SparseMerkleProofExt)> { - unimplemented!() - } - fn get_state_value_with_proof_by_version( &self, state_key: &StateKey, @@ -432,106 +464,6 @@ pub trait DbReader: Send + Sync { self.get_state_value_with_proof_by_version_ext(state_key, version) .map(|(value, proof_ext)| (value, proof_ext.into())) } - - /// Gets the latest ExecutedTrees no matter if db has been bootstrapped. - /// Used by the Db-bootstrapper. - fn get_latest_executed_trees(&self) -> Result { - unimplemented!() - } - - /// Get the ledger info of the epoch that `known_version` belongs to. - fn get_epoch_ending_ledger_info(&self, known_version: u64) -> Result { - unimplemented!() - } - - /// Gets the transaction accumulator root hash at specified version. - /// Caller must guarantee the version is not greater than the latest version. - fn get_accumulator_root_hash(&self, _version: Version) -> Result { - unimplemented!() - } - - /// Gets an [`AccumulatorConsistencyProof`] starting from `client_known_version` - /// (or pre-genesis if `None`) until `ledger_version`. - /// - /// In other words, if the client has an accumulator summary for - /// `client_known_version`, they can use the result from this API to efficiently - /// extend their accumulator to `ledger_version` and prove that the new accumulator - /// is consistent with their old accumulator. By consistent, we mean that by - /// appending the actual `ledger_version - client_known_version` transactions - /// to the old accumulator summary you get the new accumulator summary. - /// - /// If the client is starting up for the first time and has no accumulator - /// summary yet, they can call this with `client_known_version=None`, i.e., - /// pre-genesis, to get the complete accumulator summary up to `ledger_version`. - fn get_accumulator_consistency_proof( - &self, - _client_known_version: Option, - _ledger_version: Version, - ) -> Result { - unimplemented!() - } - - /// A convenience function for building a [`TransactionAccumulatorSummary`] - /// at the given `ledger_version`. - /// - /// Note: this is roughly equivalent to calling - /// `DbReader::get_accumulator_consistency_proof(None, ledger_version)`. - fn get_accumulator_summary( - &self, - ledger_version: Version, - ) -> Result { - unimplemented!() - } - - /// Returns total number of leaves in state store at given version. - fn get_state_leaf_count(&self, version: Version) -> Result { - unimplemented!() - } - - /// Get a chunk of state store value, addressed by the index. - fn get_state_value_chunk_with_proof( - &self, - version: Version, - start_idx: usize, - chunk_size: usize, - ) -> Result { - unimplemented!() - } - - /// Returns if the state store pruner is enabled. - fn is_state_merkle_pruner_enabled(&self) -> Result { - unimplemented!() - } - - /// Get the state prune window config value. - fn get_epoch_snapshot_prune_window(&self) -> Result { - unimplemented!() - } - - /// Returns if the ledger pruner is enabled. - fn is_ledger_pruner_enabled(&self) -> Result { - unimplemented!() - } - - /// Get the ledger prune window config value. - fn get_ledger_prune_window(&self) -> Result { - unimplemented!() - } - - /// Get table info from the internal indexer. - fn get_table_info(&self, handle: TableHandle) -> Result { - unimplemented!() - } - - /// Returns whether the internal indexer DB has been enabled or not - fn indexer_enabled(&self) -> bool { - unimplemented!() - } - - /// Returns state storage usage at the end of an epoch. - fn get_state_storage_usage(&self, version: Option) -> Result { - unimplemented!() - } } impl MoveStorage for &dyn DbReader { @@ -545,7 +477,7 @@ impl MoveStorage for &dyn DbReader { state_value .ok_or_else(|| format_err!("no value found in DB")) - .map(|value| value.into_bytes()) + .map(|value| value.bytes().to_vec()) } fn fetch_config_by_version(&self, config_id: ConfigID, version: Version) -> Result> { @@ -557,7 +489,7 @@ impl MoveStorage for &dyn DbReader { version, )?; config_value_option - .map(|x| x.into_bytes()) + .map(|x| x.bytes().to_vec()) .ok_or_else(|| anyhow!("no config {} found in aptos root account state", config_id)) } diff --git a/testsuite/forge-cli/src/main.rs b/testsuite/forge-cli/src/main.rs index 451fb3cf21c05..9f8621d50b8f4 100644 --- a/testsuite/forge-cli/src/main.rs +++ b/testsuite/forge-cli/src/main.rs @@ -2,8 +2,13 @@ // Parts of the project are originally copyright © Meta Platforms, Inc. // SPDX-License-Identifier: Apache-2.0 +#![allow(clippy::field_reassign_with_default)] + use anyhow::{format_err, Context, Result}; -use aptos_config::config::{ChainHealthBackoffValues, ConsensusConfig, PipelineBackpressureValues}; +use aptos_config::config::{ + BootstrappingMode, ConsensusConfig, ContinuousSyncingMode, MempoolConfig, NetbenchConfig, + NodeConfig, StateSyncConfig, +}; use aptos_forge::{ args::TransactionTypeArg, prometheus_metrics::LatencyBreakdownSlice, @@ -25,7 +30,9 @@ use aptos_testcases::{ generate_traffic, load_vs_perf_benchmark::{LoadVsPerfBenchmark, TransactionWorkload, Workloads}, modifiers::{CpuChaosTest, ExecutionDelayConfig, ExecutionDelayTest}, - multi_region_network_test::MultiRegionNetworkEmulationTest, + multi_region_network_test::{ + MultiRegionNetworkEmulationConfig, MultiRegionNetworkEmulationTest, + }, network_bandwidth_test::NetworkBandwidthTest, network_loss_test::NetworkLossTest, network_partition_test::NetworkPartitionTest, @@ -580,6 +587,8 @@ fn single_test_suite( "pfn_performance_with_network_chaos" => pfn_performance(duration, false, true), "pfn_performance_with_realistic_env" => pfn_performance(duration, true, true), "gather_metrics" => gather_metrics(), + "net_bench" => net_bench(), + "net_bench_two_region_env" => net_bench_two_region_env(), _ => return Err(format_err!("Invalid --suite given: {:?}", test_name)), }; Ok(single_test_suite) @@ -593,6 +602,44 @@ fn wrap_with_realistic_env(test: T) -> CompositeNetwor ) } +fn mempool_config_practically_non_expiring(mempool_config: &mut MempoolConfig) { + mempool_config.capacity = 3_000_000; + mempool_config.capacity_bytes = (3_u64 * 1024 * 1024 * 1024) as usize; + mempool_config.capacity_per_user = 100_000; + mempool_config.system_transaction_timeout_secs = 5 * 60 * 60; + mempool_config.system_transaction_gc_interval_ms = 5 * 60 * 60_000; +} + +fn state_sync_config_execute_transactions(state_sync_config: &mut StateSyncConfig) { + state_sync_config.state_sync_driver.bootstrapping_mode = + BootstrappingMode::ExecuteTransactionsFromGenesis; + state_sync_config.state_sync_driver.continuous_syncing_mode = + ContinuousSyncingMode::ExecuteTransactions; +} + +fn state_sync_config_apply_transaction_outputs(state_sync_config: &mut StateSyncConfig) { + state_sync_config.state_sync_driver.bootstrapping_mode = + BootstrappingMode::ApplyTransactionOutputsFromGenesis; + state_sync_config.state_sync_driver.continuous_syncing_mode = + ContinuousSyncingMode::ApplyTransactionOutputs; +} + +fn state_sync_config_fast_sync(state_sync_config: &mut StateSyncConfig) { + state_sync_config.state_sync_driver.bootstrapping_mode = + BootstrappingMode::DownloadLatestStates; + state_sync_config.state_sync_driver.continuous_syncing_mode = + ContinuousSyncingMode::ApplyTransactionOutputs; +} + +fn wrap_with_two_region_env(test: T) -> CompositeNetworkTest { + CompositeNetworkTest::new( + MultiRegionNetworkEmulationTest::new_with_config( + MultiRegionNetworkEmulationConfig::two_region(), + ), + test, + ) +} + fn run_consensus_only_realistic_env_max_tps() -> ForgeConfig { ForgeConfig::default() .with_initial_validator_count(NonZeroUsize::new(20).unwrap()) @@ -611,70 +658,56 @@ fn run_consensus_only_realistic_env_max_tps() -> ForgeConfig { // no epoch change. helm_values["chain"]["epoch_duration_secs"] = (24 * 3600).into(); })) - .with_node_helm_config_fn(Arc::new(|helm_values| { - // Mempool config - helm_values["validator"]["config"]["mempool"]["capacity"] = 3_000_000.into(); - helm_values["validator"]["config"]["mempool"]["capacity_bytes"] = - (3_u64 * 1024 * 1024 * 1024).into(); - helm_values["validator"]["config"]["mempool"]["capacity_per_user"] = 100_000.into(); - helm_values["validator"]["config"]["mempool"]["system_transaction_timeout_secs"] = - (5 * 60 * 60).into(); - helm_values["validator"]["config"]["mempool"]["system_transaction_gc_interval_ms"] = - (5 * 60 * 60_000).into(); - - // State sync config - helm_values["validator"]["config"]["state_sync"]["state_sync_driver"] - ["bootstrapping_mode"] = "ExecuteTransactionsFromGenesis".into(); - helm_values["validator"]["config"]["state_sync"]["state_sync_driver"] - ["continuous_syncing_mode"] = "ExecuteTransactions".into(); - - // consensus configs - helm_values["validator"]["config"]["consensus"] - ["max_sending_block_txns_quorum_store_override"] = 30000.into(); - helm_values["validator"]["config"]["consensus"] - ["max_receiving_block_txns_quorum_store_override"] = 40000.into(); - - helm_values["validator"]["config"]["consensus"] - ["max_sending_block_bytes_quorum_store_override"] = (10 * 1024 * 1024).into(); - helm_values["validator"]["config"]["consensus"] - ["max_receiving_block_bytes_quorum_store_override"] = (12 * 1024 * 1024).into(); - - helm_values["validator"]["config"]["consensus"]["pipeline_backpressure"] = - serde_yaml::to_value(Vec::::new()).unwrap(); - helm_values["validator"]["config"]["consensus"]["chain_health_backoff"] = - serde_yaml::to_value(Vec::::new()).unwrap(); - - // quorum store configs - helm_values["validator"]["config"]["consensus"]["quorum_store"]["back_pressure"] - ["backlog_txn_limit_count"] = 200000.into(); - helm_values["validator"]["config"]["consensus"]["quorum_store"]["back_pressure"] - ["backlog_per_validator_batch_limit_count"] = 50.into(); - - helm_values["validator"]["config"]["consensus"]["quorum_store"]["back_pressure"] - ["dynamic_min_txn_per_s"] = 2000.into(); - helm_values["validator"]["config"]["consensus"]["quorum_store"]["back_pressure"] - ["dynamic_max_txn_per_s"] = 8000.into(); - - helm_values["validator"]["config"]["consensus"]["quorum_store"] - ["sender_max_batch_txns"] = 1000.into(); - helm_values["validator"]["config"]["consensus"]["quorum_store"] - ["sender_max_batch_bytes"] = (4 * 1024 * 1024).into(); - helm_values["validator"]["config"]["consensus"]["quorum_store"] - ["sender_max_num_batches"] = 100.into(); - helm_values["validator"]["config"]["consensus"]["quorum_store"] - ["sender_max_total_txns"] = 4000.into(); - helm_values["validator"]["config"]["consensus"]["quorum_store"] - ["sender_max_total_bytes"] = (8 * 1024 * 1024).into(); - helm_values["validator"]["config"]["consensus"]["quorum_store"] - ["receiver_max_batch_txns"] = 1000.into(); - helm_values["validator"]["config"]["consensus"]["quorum_store"] - ["receiver_max_batch_bytes"] = (4 * 1024 * 1024).into(); - helm_values["validator"]["config"]["consensus"]["quorum_store"] - ["receiver_max_num_batches"] = 100.into(); - helm_values["validator"]["config"]["consensus"]["quorum_store"] - ["receiver_max_total_txns"] = 4000.into(); - helm_values["validator"]["config"]["consensus"]["quorum_store"] - ["receiver_max_total_bytes"] = (8 * 1024 * 1024).into(); + .with_validator_override_node_config_fn(Arc::new(|config, _| { + mempool_config_practically_non_expiring(&mut config.mempool); + state_sync_config_execute_transactions(&mut config.state_sync); + + config + .consensus + .max_sending_block_txns_quorum_store_override = 30000; + config + .consensus + .max_receiving_block_txns_quorum_store_override = 40000; + config + .consensus + .max_sending_block_bytes_quorum_store_override = 10 * 1024 * 1024; + config + .consensus + .max_receiving_block_bytes_quorum_store_override = 12 * 1024 * 1024; + config.consensus.pipeline_backpressure = vec![]; + config.consensus.chain_health_backoff = vec![]; + + config + .consensus + .quorum_store + .back_pressure + .backlog_txn_limit_count = 200000; + config + .consensus + .quorum_store + .back_pressure + .backlog_per_validator_batch_limit_count = 50; + config + .consensus + .quorum_store + .back_pressure + .dynamic_min_txn_per_s = 2000; + config + .consensus + .quorum_store + .back_pressure + .dynamic_max_txn_per_s = 8000; + + config.consensus.quorum_store.sender_max_batch_txns = 1000; + config.consensus.quorum_store.sender_max_batch_bytes = 4 * 1024 * 1024; + config.consensus.quorum_store.sender_max_num_batches = 100; + config.consensus.quorum_store.sender_max_total_txns = 4000; + config.consensus.quorum_store.sender_max_total_bytes = 8 * 1024 * 1024; + config.consensus.quorum_store.receiver_max_batch_txns = 1000; + config.consensus.quorum_store.receiver_max_batch_bytes = 4 * 1024 * 1024; + config.consensus.quorum_store.receiver_max_num_batches = 100; + config.consensus.quorum_store.receiver_max_total_txns = 4000; + config.consensus.quorum_store.receiver_max_total_bytes = 8 * 1024 * 1024; })) // TODO(ibalajiarun): tune these success critiera after we have a better idea of the test behavior .with_success_criteria( @@ -809,9 +842,8 @@ fn realistic_env_sweep_wrap( ForgeConfig::default() .with_initial_validator_count(NonZeroUsize::new(num_validators).unwrap()) .with_initial_fullnode_count(num_fullnodes) - .with_node_helm_config_fn(Arc::new(move |helm_values| { - helm_values["validator"]["config"]["execution"] - ["processed_transactions_detailed_counters"] = true.into(); + .with_validator_override_node_config_fn(Arc::new(|config, _| { + config.execution.processed_transactions_detailed_counters = true; })) .add_network_test(wrap_with_realistic_env(test)) // Test inherits the main EmitJobRequest, so update here for more precise latency measurements @@ -955,9 +987,8 @@ fn workload_vs_perf_benchmark() -> ForgeConfig { ForgeConfig::default() .with_initial_validator_count(NonZeroUsize::new(7).unwrap()) .with_initial_fullnode_count(7) - .with_node_helm_config_fn(Arc::new(move |helm_values| { - helm_values["validator"]["config"]["execution"] - ["processed_transactions_detailed_counters"] = true.into(); + .with_validator_override_node_config_fn(Arc::new(|config, _| { + config.execution.processed_transactions_detailed_counters = true; })) .add_network_test(LoadVsPerfBenchmark { test: Box::new(PerformanceBenchmark), @@ -1100,9 +1131,8 @@ fn realistic_env_graceful_overload() -> ForgeConfig { .mode(EmitJobMode::ConstTps { tps: 1000 }) .gas_price(5 * aptos_global_constants::GAS_UNIT_PRICE), ) - .with_node_helm_config_fn(Arc::new(move |helm_values| { - helm_values["validator"]["config"]["execution"] - ["processed_transactions_detailed_counters"] = true.into(); + .with_validator_override_node_config_fn(Arc::new(|config, _| { + config.execution.processed_transactions_detailed_counters = true; })) .with_genesis_helm_config_fn(Arc::new(|helm_values| { helm_values["chain"]["epoch_duration_secs"] = 300.into(); @@ -1132,9 +1162,8 @@ fn workload_mix_test() -> ForgeConfig { .with_initial_validator_count(NonZeroUsize::new(5).unwrap()) .with_initial_fullnode_count(3) .add_network_test(PerformanceBenchmark) - .with_node_helm_config_fn(Arc::new(move |helm_values| { - helm_values["validator"]["config"]["execution"] - ["processed_transactions_detailed_counters"] = true.into(); + .with_validator_override_node_config_fn(Arc::new(|config, _| { + config.execution.processed_transactions_detailed_counters = true; })) .with_emit_job( EmitJobRequest::default() @@ -1210,9 +1239,8 @@ fn individual_workload_tests(test_name: String) -> ForgeConfig { .with_genesis_helm_config_fn(Arc::new(|helm_values| { helm_values["chain"]["epoch_duration_secs"] = 600.into(); })) - .with_node_helm_config_fn(Arc::new(move |helm_values| { - helm_values["validator"]["config"]["execution"] - ["processed_transactions_detailed_counters"] = true.into(); + .with_validator_override_node_config_fn(Arc::new(|config, _| { + config.execution.processed_transactions_detailed_counters = true; })) .with_emit_job( if test_name == "write_new_resource" { @@ -1288,9 +1316,12 @@ fn validator_reboot_stress_test() -> ForgeConfig { })) } -fn apply_quorum_store_configs_for_single_node(helm_values: &mut serde_yaml::Value) { - helm_values["validator"]["config"]["consensus"]["quorum_store"]["back_pressure"] - ["dynamic_max_txn_per_s"] = 5500.into(); +fn apply_config_for_quorum_store_single_node(config: &mut NodeConfig) { + config + .consensus + .quorum_store + .back_pressure + .dynamic_max_txn_per_s = 5500; } fn single_vfn_perf() -> ForgeConfig { @@ -1303,8 +1334,8 @@ fn single_vfn_perf() -> ForgeConfig { .add_no_restarts() .add_wait_for_catchup_s(240), ) - .with_node_helm_config_fn(Arc::new(|helm_values| { - apply_quorum_store_configs_for_single_node(helm_values); + .with_validator_override_node_config_fn(Arc::new(|config, _| { + apply_config_for_quorum_store_single_node(config); })) } @@ -1324,10 +1355,49 @@ fn network_bandwidth() -> ForgeConfig { fn gather_metrics() -> ForgeConfig { ForgeConfig::default() .add_network_test(GatherMetrics) - .add_network_test(Delay::new(60)) + .add_network_test(Delay::new(180)) .add_network_test(GatherMetrics) } +fn netbench_config_100_megabytes_per_sec(netbench_config: &mut NetbenchConfig) { + netbench_config.enabled = true; + netbench_config.max_network_channel_size = 1000; + netbench_config.enable_direct_send_testing = true; + netbench_config.direct_send_data_size = 100000; + netbench_config.direct_send_per_second = 1000; +} + +fn netbench_config_4_megabytes_per_sec(netbench_config: &mut NetbenchConfig) { + netbench_config.enabled = true; + netbench_config.max_network_channel_size = 1000; + netbench_config.enable_direct_send_testing = true; + netbench_config.direct_send_data_size = 100000; + netbench_config.direct_send_per_second = 40; +} + +fn net_bench() -> ForgeConfig { + ForgeConfig::default() + .add_network_test(Delay::new(180)) + .with_initial_validator_count(NonZeroUsize::new(2).unwrap()) + .with_validator_override_node_config_fn(Arc::new(|config, _| { + let mut netbench_config = NetbenchConfig::default(); + netbench_config_100_megabytes_per_sec(&mut netbench_config); + config.netbench = Some(netbench_config); + })) +} + +fn net_bench_two_region_env() -> ForgeConfig { + ForgeConfig::default() + .add_network_test(wrap_with_two_region_env(Delay::new(180))) + .with_initial_validator_count(NonZeroUsize::new(2).unwrap()) + .with_validator_override_node_config_fn(Arc::new(|config, _| { + // Not using 100 MBps here, as it will lead to throughput collapse + let mut netbench_config = NetbenchConfig::default(); + netbench_config_4_megabytes_per_sec(&mut netbench_config); + config.netbench = Some(netbench_config); + })) +} + fn three_region_simulation_with_different_node_speed() -> ForgeConfig { ForgeConfig::default() .with_initial_validator_count(NonZeroUsize::new(30).unwrap()) @@ -1343,16 +1413,11 @@ fn three_region_simulation_with_different_node_speed() -> ForgeConfig { }, ThreeRegionSameCloudSimulationTest, )) - .with_node_helm_config_fn(Arc::new(move |helm_values| { - helm_values["validator"]["config"]["api"]["failpoints_enabled"] = true.into(); - // helm_values["validator"]["config"]["consensus"]["max_sending_block_txns"] = - // 4000.into(); - // helm_values["validator"]["config"]["consensus"]["max_sending_block_bytes"] = - // 1000000.into(); - helm_values["fullnode"]["config"]["state_sync"]["state_sync_driver"] - ["bootstrapping_mode"] = "ExecuteTransactionsFromGenesis".into(); - helm_values["fullnode"]["config"]["state_sync"]["state_sync_driver"] - ["continuous_syncing_mode"] = "ExecuteTransactions".into(); + .with_validator_override_node_config_fn(Arc::new(|config, _| { + config.api.failpoints_enabled = true; + })) + .with_fullnode_override_node_config_fn(Arc::new(|config, _| { + state_sync_config_execute_transactions(&mut config.state_sync); })) .with_success_criteria( SuccessCriteria::new(1000) @@ -1392,8 +1457,8 @@ fn network_partition() -> ForgeConfig { .add_no_restarts() .add_wait_for_catchup_s(240), ) - .with_node_helm_config_fn(Arc::new(|helm_values| { - apply_quorum_store_configs_for_single_node(helm_values); + .with_validator_override_node_config_fn(Arc::new(|config, _| { + apply_config_for_quorum_store_single_node(config); })) } @@ -1442,11 +1507,8 @@ fn state_sync_perf_fullnodes_apply_outputs() -> ForgeConfig { .with_genesis_helm_config_fn(Arc::new(|helm_values| { helm_values["chain"]["epoch_duration_secs"] = 600.into(); })) - .with_node_helm_config_fn(Arc::new(|helm_values| { - helm_values["fullnode"]["config"]["state_sync"]["state_sync_driver"] - ["bootstrapping_mode"] = "ApplyTransactionOutputsFromGenesis".into(); - helm_values["fullnode"]["config"]["state_sync"]["state_sync_driver"] - ["continuous_syncing_mode"] = "ApplyTransactionOutputs".into(); + .with_fullnode_override_node_config_fn(Arc::new(|config, _| { + state_sync_config_apply_transaction_outputs(&mut config.state_sync); })) .with_success_criteria(SuccessCriteria::new(9000)) } @@ -1459,11 +1521,8 @@ fn state_sync_perf_fullnodes_execute_transactions() -> ForgeConfig { .with_genesis_helm_config_fn(Arc::new(|helm_values| { helm_values["chain"]["epoch_duration_secs"] = 600.into(); })) - .with_node_helm_config_fn(Arc::new(|helm_values| { - helm_values["fullnode"]["config"]["state_sync"]["state_sync_driver"] - ["bootstrapping_mode"] = "ExecuteTransactionsFromGenesis".into(); - helm_values["fullnode"]["config"]["state_sync"]["state_sync_driver"] - ["continuous_syncing_mode"] = "ExecuteTransactions".into(); + .with_fullnode_override_node_config_fn(Arc::new(|config, _| { + state_sync_config_execute_transactions(&mut config.state_sync); })) .with_success_criteria(SuccessCriteria::new(5000)) } @@ -1483,11 +1542,8 @@ fn state_sync_perf_fullnodes_fast_sync() -> ForgeConfig { }) .transaction_type(TransactionTypeArg::AccountGeneration.materialize_default()), // Create many state values ) - .with_node_helm_config_fn(Arc::new(|helm_values| { - helm_values["fullnode"]["config"]["state_sync"]["state_sync_driver"] - ["bootstrapping_mode"] = "DownloadLatestStates".into(); - helm_values["fullnode"]["config"]["state_sync"]["state_sync_driver"] - ["continuous_syncing_mode"] = "ApplyTransactionOutputs".into(); + .with_fullnode_override_node_config_fn(Arc::new(|config, _| { + state_sync_config_fast_sync(&mut config.state_sync); })) } @@ -1499,11 +1555,8 @@ fn state_sync_perf_validators() -> ForgeConfig { .with_genesis_helm_config_fn(Arc::new(|helm_values| { helm_values["chain"]["epoch_duration_secs"] = 600.into(); })) - .with_node_helm_config_fn(Arc::new(|helm_values| { - helm_values["validator"]["config"]["state_sync"]["state_sync_driver"] - ["bootstrapping_mode"] = "ApplyTransactionOutputsFromGenesis".into(); - helm_values["validator"]["config"]["state_sync"]["state_sync_driver"] - ["continuous_syncing_mode"] = "ApplyTransactionOutputs".into(); + .with_validator_override_node_config_fn(Arc::new(|config, _| { + state_sync_config_apply_transaction_outputs(&mut config.state_sync); })) .add_network_test(StateSyncValidatorPerformance) .with_success_criteria(SuccessCriteria::new(5000)) @@ -1652,32 +1705,36 @@ fn realistic_network_tuned_for_throughput_test() -> ForgeConfig { .with_emit_job(EmitJobRequest::default().mode(EmitJobMode::MaxLoad { mempool_backlog: 150000, })) - .with_node_helm_config_fn(Arc::new(move |helm_values| { - helm_values["validator"]["config"]["consensus"] - ["max_sending_block_txns_quorum_store_override"] = 10000.into(); - helm_values["validator"]["config"]["consensus"]["pipeline_backpressure"] = - serde_yaml::to_value(Vec::::new()).unwrap(); - helm_values["validator"]["config"]["consensus"]["chain_health_backoff"] = - serde_yaml::to_value(Vec::::new()).unwrap(); - - helm_values["validator"]["config"]["consensus"] - ["wait_for_full_blocks_above_recent_fill_threshold"] = (0.8).into(); - helm_values["validator"]["config"]["consensus"] - ["wait_for_full_blocks_above_pending_blocks"] = 8.into(); - - helm_values["validator"]["config"]["consensus"]["quorum_store"]["back_pressure"] - ["backlog_txn_limit_count"] = 100000.into(); - helm_values["validator"]["config"]["consensus"]["quorum_store"]["back_pressure"] - ["backlog_per_validator_batch_limit_count"] = 10.into(); - - helm_values["validator"]["config"]["consensus"]["quorum_store"]["back_pressure"] - ["dynamic_max_txn_per_s"] = 6000.into(); + .with_validator_override_node_config_fn(Arc::new(|config, _| { + config + .consensus + .max_sending_block_txns_quorum_store_override = 10000; + config.consensus.pipeline_backpressure = vec![]; + config.consensus.chain_health_backoff = vec![]; + config + .consensus + .wait_for_full_blocks_above_recent_fill_threshold = 0.8; + config.consensus.wait_for_full_blocks_above_pending_blocks = 8; + + config + .consensus + .quorum_store + .back_pressure + .backlog_txn_limit_count = 100000; + config + .consensus + .quorum_store + .back_pressure + .backlog_per_validator_batch_limit_count = 10; + config + .consensus + .quorum_store + .back_pressure + .dynamic_max_txn_per_s = 6000; // Experimental storage optimizations - helm_values["validator"]["config"]["storage"]["rocksdb_configs"]["split_ledger_db"] = - true.into(); - helm_values["validator"]["config"]["storage"]["rocksdb_configs"] - ["use_sharded_state_merkle_db"] = true.into(); + config.storage.rocksdb_configs.split_ledger_db = true; + config.storage.rocksdb_configs.use_sharded_state_merkle_db = true; })) .with_success_criteria( SuccessCriteria::new(8000) @@ -1750,21 +1807,20 @@ fn changing_working_quorum_test_helper( helm_values["genesis"]["validator"]["num_validators_with_larger_stake"] = num_large_validators.into(); })) - .with_node_helm_config_fn(Arc::new(move |helm_values| { - helm_values["validator"]["config"]["api"]["failpoints_enabled"] = true.into(); + .with_validator_override_node_config_fn(Arc::new(move |config, _| { + config.api.failpoints_enabled = true; let block_size = (target_tps / 4) as u64; - helm_values["validator"]["config"]["consensus"]["max_sending_block_txns"] = - block_size.into(); - helm_values["validator"]["config"]["consensus"] - ["max_sending_block_txns_quorum_store_override"] = block_size.into(); - helm_values["validator"]["config"]["consensus"] - ["max_receiving_block_txns_quorum_store_override"] = block_size.into(); - helm_values["validator"]["config"]["consensus"]["round_initial_timeout_ms"] = - 500.into(); - helm_values["validator"]["config"]["consensus"] - ["round_timeout_backoff_exponent_base"] = 1.0.into(); - helm_values["validator"]["config"]["consensus"]["quorum_store_poll_time_ms"] = - 100.into(); + + config.consensus.max_sending_block_txns = block_size; + config + .consensus + .max_sending_block_txns_quorum_store_override = block_size; + config + .consensus + .max_receiving_block_txns_quorum_store_override = block_size; + config.consensus.round_initial_timeout_ms = 500; + config.consensus.round_timeout_backoff_exponent_base = 1.0; + config.consensus.quorum_store_poll_time_ms = 100; let mut min_block_txns = block_size; let mut chain_health_backoff = ConsensusConfig::default().chain_health_backoff; @@ -1781,26 +1837,22 @@ fn changing_working_quorum_test_helper( item.backoff_if_below_participating_voting_power_percentage = 90 - i * 5; } } - helm_values["validator"]["config"]["consensus"]["quorum_store"] - ["sender_max_batch_txns"] = min_block_txns.into(); - helm_values["validator"]["config"]["consensus"]["quorum_store"] - ["receiver_max_batch_txns"] = min_block_txns.into(); + config.consensus.quorum_store.sender_max_batch_txns = min_block_txns as usize; + config.consensus.quorum_store.receiver_max_batch_txns = min_block_txns as usize; - helm_values["validator"]["config"]["consensus"]["chain_health_backoff"] = - serde_yaml::to_value(chain_health_backoff).unwrap(); + config.consensus.chain_health_backoff = chain_health_backoff; // Override the syncing mode of all nodes to use transaction output syncing. // TODO(joshlind): remove me once we move back to output syncing by default. if apply_txn_outputs { - helm_values["validator"]["config"]["state_sync"]["state_sync_driver"] - ["bootstrapping_mode"] = "ApplyTransactionOutputsFromGenesis".into(); - helm_values["validator"]["config"]["state_sync"]["state_sync_driver"] - ["continuous_syncing_mode"] = "ApplyTransactionOutputs".into(); - - helm_values["fullnode"]["config"]["state_sync"]["state_sync_driver"] - ["bootstrapping_mode"] = "ApplyTransactionOutputsFromGenesis".into(); - helm_values["fullnode"]["config"]["state_sync"]["state_sync_driver"] - ["continuous_syncing_mode"] = "ApplyTransactionOutputs".into(); + state_sync_config_apply_transaction_outputs(&mut config.state_sync); + } + })) + .with_fullnode_override_node_config_fn(Arc::new(move |config, _| { + // Override the syncing mode of all nodes to use transaction output syncing. + // TODO(joshlind): remove me once we move back to output syncing by default. + if apply_txn_outputs { + state_sync_config_apply_transaction_outputs(&mut config.state_sync); } })) .with_emit_job( @@ -1849,13 +1901,11 @@ fn large_db_test( .with_initial_fullnode_count(std::cmp::max(2, target_tps / 1000)) .add_network_test(PerformanceBenchmark) .with_existing_db(existing_db_tag.clone()) - .with_node_helm_config_fn(Arc::new(move |helm_values| { - helm_values["validator"]["storage"]["labels"]["tag"] = existing_db_tag.clone().into(); - helm_values["fullnode"]["storage"]["labels"]["tag"] = existing_db_tag.clone().into(); - helm_values["validator"]["config"]["base"]["working_dir"] = - "/opt/aptos/data/checkpoint".into(); - helm_values["fullnode"]["config"]["base"]["working_dir"] = - "/opt/aptos/data/checkpoint".into(); + .with_validator_override_node_config_fn(Arc::new(move |config, _| { + config.base.working_dir = Some(PathBuf::from("/opt/aptos/data/checkpoint")); + })) + .with_fullnode_override_node_config_fn(Arc::new(move |config, _| { + config.base.working_dir = Some(PathBuf::from("/opt/aptos/data/checkpoint")); })) .with_emit_job( EmitJobRequest::default() @@ -1944,15 +1994,7 @@ fn multiregion_benchmark_test() -> ForgeConfig { helm_values["genesis"]["multicluster"]["enabled"] = true.into(); })) - .with_node_helm_config_fn(Arc::new(|helm_values| { - helm_values["multicluster"]["enabled"] = true.into(); - // Create headless services for validators and fullnodes. - // Note: chaos-mesh will not work with clusterIP services. - helm_values["service"]["validator"]["internal"]["type"] = "ClusterIP".into(); - helm_values["service"]["validator"]["internal"]["headless"] = true.into(); - helm_values["service"]["fullnode"]["internal"]["type"] = "ClusterIP".into(); - helm_values["service"]["fullnode"]["internal"]["headless"] = true.into(); - })) + .with_multi_region_config() .with_success_criteria( SuccessCriteria::new(4500) .add_no_restarts() @@ -2020,13 +2062,7 @@ fn pfn_performance( add_network_emulation: bool, ) -> ForgeConfig { // Determine the minimum expected TPS - let min_expected_tps = if add_cpu_chaos { - 3000 - } else if add_network_emulation { - 4000 - } else { - 4500 - }; + let min_expected_tps = 4500; // Create the forge config ForgeConfig::default() diff --git a/testsuite/forge/src/backend/k8s/fullnode.rs b/testsuite/forge/src/backend/k8s/fullnode.rs index bbea29c4ada4d..c0ca476465d89 100644 --- a/testsuite/forge/src/backend/k8s/fullnode.rs +++ b/testsuite/forge/src/backend/k8s/fullnode.rs @@ -10,8 +10,8 @@ use crate::{ use anyhow::Context; use aptos_config::{ config::{ - merge_node_config, ApiConfig, BaseConfig, DiscoveryMethod, ExecutionConfig, NetworkConfig, - NodeConfig, RoleType, WaypointConfig, + ApiConfig, BaseConfig, DiscoveryMethod, ExecutionConfig, NetworkConfig, NodeConfig, + OverrideNodeConfig, RoleType, WaypointConfig, }, network_id::NetworkId, }; @@ -73,12 +73,12 @@ fn get_fullnode_image_from_validator_image( /// Create a ConfigMap with the given NodeConfig, with a constant key async fn create_node_config_configmap( node_config_config_map_name: String, - node_config: &NodeConfig, + node_config: &OverrideNodeConfig, ) -> Result { let mut data: BTreeMap = BTreeMap::new(); data.insert( FULLNODE_CONFIG_MAP_KEY.to_string(), - serde_yaml::to_string(&node_config)?, + serde_yaml::to_string(&node_config.get_yaml()?)?, ); let node_config_config_map = ConfigMap { binary_data: None, @@ -337,27 +337,23 @@ pub async fn install_public_fullnode<'a>( persistent_volume_claim_api: Arc>, service_api: Arc>, version: &'a Version, - node_config: &'a NodeConfig, + node_config: &'a OverrideNodeConfig, era: String, namespace: String, use_port_forward: bool, index: usize, ) -> Result<(PeerId, K8sNode)> { - let default_node_config = get_default_pfn_node_config(); - - let merged_node_config = - merge_node_config(default_node_config, serde_yaml::to_value(node_config)?)?; - - let node_peer_id = node_config.get_peer_id().unwrap_or_else(PeerId::random); + let node_peer_id = node_config + .override_config() + .get_peer_id() + .unwrap_or_else(PeerId::random); let fullnode_name = format!("public-fullnode-{}-{}", index, node_peer_id.short_str()); // create the NodeConfig configmap let fullnode_node_config_config_map_name = format!("{}-config", fullnode_name.clone()); - let fullnode_node_config_config_map = create_node_config_configmap( - fullnode_node_config_config_map_name.clone(), - &merged_node_config, - ) - .await?; + let fullnode_node_config_config_map = + create_node_config_configmap(fullnode_node_config_config_map_name.clone(), node_config) + .await?; configmap_api .create(&PostParams::default(), &fullnode_node_config_config_map) .await?; @@ -621,10 +617,11 @@ mod tests { async fn test_create_node_config_map() { let config_map_name = "aptos-node-0-validator-0-config".to_string(); let node_config = NodeConfig::default(); + let override_config = OverrideNodeConfig::new_with_default_base(node_config.clone()); // expect that the one we get is the same as the one we created let created_config_map = - create_node_config_configmap(config_map_name.clone(), &node_config) + create_node_config_configmap(config_map_name.clone(), &override_config) .await .unwrap(); @@ -739,6 +736,7 @@ mod tests { let mut node_config = get_default_pfn_node_config(); node_config.full_node_networks[0].identity = Identity::from_config(PrivateKey::generate_for_testing(), peer_id); + let override_config = OverrideNodeConfig::new_with_default_base(node_config); let era = "42069".to_string(); let namespace = "forge42069".to_string(); @@ -749,7 +747,7 @@ mod tests { persistent_volume_claim_api, service_api, &version, - &node_config, + &override_config, era, namespace, false, diff --git a/testsuite/forge/src/backend/k8s/swarm.rs b/testsuite/forge/src/backend/k8s/swarm.rs index fb6b48e33ba4f..8809a154cbc4a 100644 --- a/testsuite/forge/src/backend/k8s/swarm.rs +++ b/testsuite/forge/src/backend/k8s/swarm.rs @@ -13,7 +13,7 @@ use crate::{ }; use ::aptos_logger::*; use anyhow::{anyhow, bail, format_err}; -use aptos_config::config::NodeConfig; +use aptos_config::config::{NodeConfig, OverrideNodeConfig}; use aptos_retrier::fixed_retry_strategy; use aptos_sdk::{ crypto::ed25519::Ed25519PrivateKey, @@ -150,7 +150,7 @@ impl K8sSwarm { async fn install_public_fullnode_resources<'a>( &mut self, version: &'a Version, - node_config: &'a NodeConfig, + node_config: &'a OverrideNodeConfig, ) -> Result<(PeerId, K8sNode)> { // create APIs let stateful_set_api: Arc> = Arc::new(K8sApi::::from_client( @@ -302,14 +302,18 @@ impl Swarm for K8sSwarm { fn add_validator_full_node( &mut self, _version: &Version, - _template: NodeConfig, + _config: OverrideNodeConfig, _id: PeerId, ) -> Result { todo!() } - async fn add_full_node(&mut self, version: &Version, template: NodeConfig) -> Result { - self.install_public_fullnode_resources(version, &template) + async fn add_full_node( + &mut self, + version: &Version, + config: OverrideNodeConfig, + ) -> Result { + self.install_public_fullnode_resources(version, &config) .await .map(|(peer_id, node)| { self.fullnodes.insert(peer_id, node); diff --git a/testsuite/forge/src/backend/local/mod.rs b/testsuite/forge/src/backend/local/mod.rs index 814a93b30e6dc..ae533eb1d8162 100644 --- a/testsuite/forge/src/backend/local/mod.rs +++ b/testsuite/forge/src/backend/local/mod.rs @@ -4,9 +4,9 @@ use crate::{Factory, GenesisConfig, GenesisConfigFn, NodeConfigFn, Result, Swarm, Version}; use anyhow::{bail, Context}; -use aptos_config::config::NodeConfig; +use aptos_config::config::{NodeConfig, OverrideNodeConfig}; use aptos_framework::ReleaseBundle; -use aptos_genesis::builder::{InitConfigFn, InitGenesisConfigFn}; +use aptos_genesis::builder::{InitConfigFn, InitGenesisConfigFn, InitGenesisStakeFn}; use aptos_infallible::Mutex; use rand::rngs::StdRng; use std::{ @@ -122,6 +122,7 @@ impl LocalFactory { genesis_framework: Option, init_config: Option, vfn_config: Option, + init_genesis_stake: Option, init_genesis_config: Option, guard: ActiveNodesGuard, ) -> Result @@ -136,6 +137,7 @@ impl LocalFactory { self.versions.clone(), Some(version.clone()), init_config, + init_genesis_stake, init_genesis_config, swarmdir, genesis_framework, @@ -148,17 +150,14 @@ impl LocalFactory { .await .with_context(|| format!("Swarm logs can be found here: {}", swarm.logs_location()))?; + let vfn_config = vfn_config.unwrap_or_else(NodeConfig::get_default_vfn_config); + let vfn_override_config = OverrideNodeConfig::new_with_default_base(vfn_config); + // Add and launch the fullnodes let validator_peer_ids = swarm.validators().map(|v| v.peer_id()).collect::>(); for validator_peer_id in validator_peer_ids.iter().take(number_of_fullnodes) { let _ = swarm - .add_validator_fullnode( - version, - vfn_config - .clone() - .unwrap_or_else(NodeConfig::get_default_vfn_config), - *validator_peer_id, - ) + .add_validator_fullnode(version, vfn_override_config.clone(), *validator_peer_id) .unwrap(); } swarm.wait_all_alive(Duration::from_secs(60)).await?; @@ -209,6 +208,7 @@ impl Factory for LocalFactory { None, None, None, + None, guard, ) .await?; diff --git a/testsuite/forge/src/backend/local/node.rs b/testsuite/forge/src/backend/local/node.rs index 2904d9b120060..1bdcb562d864c 100644 --- a/testsuite/forge/src/backend/local/node.rs +++ b/testsuite/forge/src/backend/local/node.rs @@ -5,7 +5,7 @@ use crate::{FullNode, HealthCheckError, LocalVersion, Node, NodeExt, Validator, Version}; use anyhow::{anyhow, ensure, Context, Result}; use aptos_config::{config::NodeConfig, keys::ConfigKey}; -use aptos_db::{LEDGER_DB_NAME, STATE_MERKLE_DB_NAME}; +use aptos_db::{fast_sync_storage_wrapper::SECONDARY_DB_DIR, LEDGER_DB_NAME, STATE_MERKLE_DB_NAME}; use aptos_logger::{debug, info}; use aptos_sdk::{ crypto::ed25519::Ed25519PrivateKey, @@ -85,6 +85,10 @@ impl LocalNode { }) } + pub fn base_dir(&self) -> PathBuf { + self.directory.clone() + } + pub fn config_path(&self) -> PathBuf { self.directory.join("node.yaml") } @@ -291,13 +295,15 @@ impl Node for LocalNode { let state_db_path = node_config.storage.dir().join(STATE_MERKLE_DB_NAME); let secure_storage_path = node_config.get_working_dir().join("secure_storage.json"); let state_sync_db_path = node_config.storage.dir().join(STATE_SYNC_DB_NAME); + let secondary_db_path = node_config.storage.dir().join(SECONDARY_DB_DIR); debug!( - "Deleting ledger, state, secure and state sync db paths ({:?}, {:?}, {:?}, {:?}) for node {:?}", + "Deleting ledger, state, secure and state sync db paths ({:?}, {:?}, {:?}, {:?}, {:?}) for node {:?}", ledger_db_path.as_path(), state_db_path.as_path(), secure_storage_path.as_path(), state_sync_db_path.as_path(), + secondary_db_path.as_path(), self.name ); @@ -318,6 +324,13 @@ impl Node for LocalNode { fs::remove_dir_all(state_sync_db_path) .map_err(anyhow::Error::from) .context("Failed to delete state_sync_db_path")?; + + // remove secondary db if the path exists + if secondary_db_path.as_path().exists() { + fs::remove_dir_all(secondary_db_path) + .map_err(anyhow::Error::from) + .context("Failed to delete secondary_db_path")?; + } if self.config.base.role.is_validator() { fs::remove_file(secure_storage_path) .map_err(anyhow::Error::from) diff --git a/testsuite/forge/src/backend/local/swarm.rs b/testsuite/forge/src/backend/local/swarm.rs index c0134e8faf890..23c6f7627cce1 100644 --- a/testsuite/forge/src/backend/local/swarm.rs +++ b/testsuite/forge/src/backend/local/swarm.rs @@ -9,12 +9,14 @@ use crate::{ use anyhow::{anyhow, bail, Result}; use aptos::common::types::EncodingType; use aptos_config::{ - config::{NetworkConfig, NodeConfig}, + config::{NetworkConfig, NodeConfig, OverrideNodeConfig, PersistableConfig}, keys::ConfigKey, network_id::NetworkId, }; use aptos_framework::ReleaseBundle; -use aptos_genesis::builder::{FullnodeNodeConfig, InitConfigFn, InitGenesisConfigFn}; +use aptos_genesis::builder::{ + FullnodeNodeConfig, InitConfigFn, InitGenesisConfigFn, InitGenesisStakeFn, +}; use aptos_infallible::Mutex; use aptos_logger::{info, warn}; use aptos_sdk::{ @@ -111,6 +113,7 @@ impl LocalSwarm { versions: Arc>, initial_version: Option, init_config: Option, + init_genesis_stake: Option, init_genesis_config: Option, dir: Option, genesis_framework: Option, @@ -137,31 +140,30 @@ impl LocalSwarm { .unwrap_or_else(|| aptos_cached_packages::head_release_bundle().clone()), )? .with_num_validators(number_of_validators) - .with_init_config(Some(Arc::new( - move |index, config, genesis_stake_amount| { - // for local tests, turn off parallel execution: - config.execution.concurrency_level = 1; - - // Single node orders blocks too fast which would trigger backpressure and stall for 1 sec - // which cause flakiness in tests. - if number_of_validators.get() == 1 { - // this delays empty block by (30-1) * 30ms - config.consensus.quorum_store_poll_time_ms = 900; - config - .state_sync - .state_sync_driver - .enable_auto_bootstrapping = true; - config - .state_sync - .state_sync_driver - .max_connection_deadline_secs = 1; - } - - if let Some(init_config) = &init_config { - (init_config)(index, config, genesis_stake_amount); - } - }, - ))) + .with_init_config(Some(Arc::new(move |index, config, base| { + // for local tests, turn off parallel execution: + config.execution.concurrency_level = 1; + + // Single node orders blocks too fast which would trigger backpressure and stall for 1 sec + // which cause flakiness in tests. + if number_of_validators.get() == 1 { + // this delays empty block by (30-1) * 30ms + config.consensus.quorum_store_poll_time_ms = 900; + config + .state_sync + .state_sync_driver + .enable_auto_bootstrapping = true; + config + .state_sync + .state_sync_driver + .max_connection_deadline_secs = 1; + } + + if let Some(init_config) = &init_config { + (init_config)(index, config, base); + } + }))) + .with_init_genesis_stake(init_genesis_stake) .with_init_genesis_config(init_genesis_config) .build(rng)?; @@ -195,7 +197,9 @@ impl LocalSwarm { let public_networks = validators .values_mut() .map(|validator| { - let mut validator_config = validator.config().clone(); + let mut validator_override_config = + OverrideNodeConfig::load_config(validator.config_path())?; + let validator_config = validator_override_config.override_config_mut(); // Grab the public network config from the validator and insert it into the VFN's config // The validator's public network identity is the same as the VFN's public network identity @@ -209,10 +213,10 @@ impl LocalSwarm { .expect("Validator should have a public network"); validator_config.full_node_networks.remove(i) }; - + validator_config.set_data_dir(validator.base_dir()); + *validator.config_mut() = validator_config.clone(); // Since the validator's config has changed we need to save it - validator_config.save_to_path(validator.config_path())?; - *validator.config_mut() = validator_config; + validator_override_config.save_config(validator.config_path())?; Ok((validator.peer_id(), public_network)) }) @@ -333,7 +337,7 @@ impl LocalSwarm { pub fn add_validator_fullnode( &mut self, version: &Version, - template: NodeConfig, + config: OverrideNodeConfig, validator_peer_id: PeerId, ) -> Result { let validator = self @@ -356,7 +360,7 @@ impl LocalSwarm { let fullnode_config = FullnodeNodeConfig::validator_fullnode( name, self.dir.as_ref(), - template, + config, validator.config(), &self.genesis_waypoint, &self.genesis, @@ -381,14 +385,14 @@ impl LocalSwarm { Ok(peer_id) } - fn add_fullnode(&mut self, version: &Version, template: NodeConfig) -> Result { + fn add_fullnode(&mut self, version: &Version, config: OverrideNodeConfig) -> Result { let name = self.node_name_counter.to_string(); let index = self.node_name_counter; self.node_name_counter += 1; let fullnode_config = FullnodeNodeConfig::public_fullnode( name, self.dir.as_ref(), - template, + config, &self.genesis_waypoint, &self.genesis, )?; @@ -550,14 +554,18 @@ impl Swarm for LocalSwarm { fn add_validator_full_node( &mut self, version: &Version, - template: NodeConfig, + config: OverrideNodeConfig, id: PeerId, ) -> Result { - self.add_validator_fullnode(version, template, id) + self.add_validator_fullnode(version, config, id) } - async fn add_full_node(&mut self, version: &Version, template: NodeConfig) -> Result { - self.add_fullnode(version, template) + async fn add_full_node( + &mut self, + version: &Version, + config: OverrideNodeConfig, + ) -> Result { + self.add_fullnode(version, config) } fn remove_full_node(&mut self, id: PeerId) -> Result<()> { diff --git a/testsuite/forge/src/interface/swarm.rs b/testsuite/forge/src/interface/swarm.rs index 922d095957a07..5e60cabd0ccdf 100644 --- a/testsuite/forge/src/interface/swarm.rs +++ b/testsuite/forge/src/interface/swarm.rs @@ -6,7 +6,7 @@ use crate::{ AptosPublicInfo, ChainInfo, FullNode, NodeExt, Result, SwarmChaos, Validator, Version, }; use anyhow::{anyhow, bail}; -use aptos_config::config::NodeConfig; +use aptos_config::config::{NodeConfig, OverrideNodeConfig}; use aptos_logger::info; use aptos_rest_client::Client as RestClient; use aptos_sdk::types::PeerId; @@ -58,12 +58,16 @@ pub trait Swarm: Sync { fn add_validator_full_node( &mut self, version: &Version, - template: NodeConfig, + config: OverrideNodeConfig, id: PeerId, ) -> Result; /// Adds a FullNode to the swarm and returns the PeerId - async fn add_full_node(&mut self, version: &Version, template: NodeConfig) -> Result; + async fn add_full_node( + &mut self, + version: &Version, + config: OverrideNodeConfig, + ) -> Result; /// Removes the FullNode with the provided PeerId fn remove_full_node(&mut self, id: PeerId) -> Result<()>; diff --git a/testsuite/forge/src/runner.rs b/testsuite/forge/src/runner.rs index f1693a63f2214..116a206d9545b 100644 --- a/testsuite/forge/src/runner.rs +++ b/testsuite/forge/src/runner.rs @@ -8,6 +8,7 @@ use crate::{ *, }; use anyhow::{bail, format_err, Error, Result}; +use aptos_config::config::{NodeConfig, OverrideNodeConfig}; use aptos_framework::ReleaseBundle; use clap::{Parser, ValueEnum}; use rand::{rngs::OsRng, Rng, SeedableRng}; @@ -116,6 +117,8 @@ pub enum InitialVersion { pub type NodeConfigFn = Arc; pub type GenesisConfigFn = Arc; +/// override_config, base_config (see OverrideNodeConfig) +pub type OverrideNodeConfigFn = Arc; pub struct ForgeConfig { aptos_tests: Vec>, @@ -137,8 +140,13 @@ pub struct ForgeConfig { /// Optional genesis helm values init function genesis_helm_config_fn: Option, - /// Optional node helm values init function - node_helm_config_fn: Option, + /// Optional validator node config override function + validator_override_node_config_fn: Option, + + /// Optional fullnode node config override function + fullnode_override_node_config_fn: Option, + + multi_region_config: bool, /// Transaction workload to run on the swarm emit_job_request: EmitJobRequest, @@ -200,11 +208,65 @@ impl ForgeConfig { self } - pub fn with_node_helm_config_fn(mut self, node_helm_config_fn: NodeConfigFn) -> Self { - self.node_helm_config_fn = Some(node_helm_config_fn); + pub fn with_validator_override_node_config_fn(mut self, f: OverrideNodeConfigFn) -> Self { + self.validator_override_node_config_fn = Some(f); + self + } + + pub fn with_fullnode_override_node_config_fn(mut self, f: OverrideNodeConfigFn) -> Self { + self.fullnode_override_node_config_fn = Some(f); + self + } + + pub fn with_multi_region_config(mut self) -> Self { + self.multi_region_config = true; self } + fn override_node_config_from_fn(config_fn: OverrideNodeConfigFn) -> OverrideNodeConfig { + let mut override_config = NodeConfig::default(); + let mut base_config = NodeConfig::default(); + config_fn(&mut override_config, &mut base_config); + OverrideNodeConfig::new(override_config, base_config) + } + + pub fn build_node_helm_config_fn(&self) -> Option { + let validator_override_node_config = self + .validator_override_node_config_fn + .clone() + .map(|config_fn| Self::override_node_config_from_fn(config_fn)); + let fullnode_override_node_config = self + .fullnode_override_node_config_fn + .clone() + .map(|config_fn| Self::override_node_config_from_fn(config_fn)); + let multi_region_config = self.multi_region_config; + let existing_db_tag = self.existing_db_tag.clone(); + + Some(Arc::new(move |helm_values: &mut serde_yaml::Value| { + if let Some(override_config) = &validator_override_node_config { + helm_values["validator"]["config"] = override_config.get_yaml().unwrap(); + } + if let Some(override_config) = &fullnode_override_node_config { + helm_values["validator"]["config"] = override_config.get_yaml().unwrap(); + } + if multi_region_config { + helm_values["multicluster"]["enabled"] = true.into(); + // Create headless services for validators and fullnodes. + // Note: chaos-mesh will not work with clusterIP services. + helm_values["service"]["validator"]["internal"]["type"] = "ClusterIP".into(); + helm_values["service"]["validator"]["internal"]["headless"] = true.into(); + helm_values["service"]["fullnode"]["internal"]["type"] = "ClusterIP".into(); + helm_values["service"]["fullnode"]["internal"]["headless"] = true.into(); + } + if let Some(existing_db_tag) = &existing_db_tag { + helm_values["validator"]["storage"]["labels"]["tag"] = + existing_db_tag.clone().into(); + helm_values["fullnode"]["storage"]["labels"]["tag"] = + existing_db_tag.clone().into(); + } + })) + } + pub fn with_initial_version(mut self, initial_version: InitialVersion) -> Self { self.initial_version = initial_version; self @@ -353,7 +415,9 @@ impl Default for ForgeConfig { initial_version: InitialVersion::Oldest, genesis_config: None, genesis_helm_config_fn: None, - node_helm_config_fn: None, + validator_override_node_config_fn: None, + fullnode_override_node_config_fn: None, + multi_region_config: false, emit_job_request: EmitJobRequest::default().mode(EmitJobMode::MaxLoad { mempool_backlog: 40000, }), @@ -441,7 +505,7 @@ impl<'cfg, F: Factory> Forge<'cfg, F> { self.tests.genesis_config.as_ref(), self.global_duration + Duration::from_secs(NAMESPACE_CLEANUP_DURATION_BUFFER_SECS), self.tests.genesis_helm_config_fn.clone(), - self.tests.node_helm_config_fn.clone(), + self.tests.build_node_helm_config_fn(), self.tests.existing_db_tag.clone(), ))?; diff --git a/testsuite/generate-format/tests/staged/network.yaml b/testsuite/generate-format/tests/staged/network.yaml index c3c9ef60f4b13..bde46eecb8fd0 100644 --- a/testsuite/generate-format/tests/staged/network.yaml +++ b/testsuite/generate-format/tests/staged/network.yaml @@ -152,6 +152,10 @@ ProtocolId: ConsensusRpcCompressed: UNIT 12: ConsensusDirectSendCompressed: UNIT + 13: + NetbenchDirectSend: UNIT + 14: + NetbenchRpc: UNIT ProtocolIdSet: NEWTYPESTRUCT: TYPENAME: BitVec diff --git a/testsuite/indexer_grpc_local.py b/testsuite/indexer_grpc_local.py index d6a1c786143f5..636f06c17c4cb 100755 --- a/testsuite/indexer_grpc_local.py +++ b/testsuite/indexer_grpc_local.py @@ -19,6 +19,12 @@ GRPCURL_PATH = os.environ.get("GRPCURL_PATH", "grpcurl") INDEXER_GRPC_DOCKER_COMPOSE_FILE = "docker/compose/indexer-grpc/docker-compose.yaml" +INDEXER_GRPC_DATA_SERVICE_CERT_FILE = ( + "docker/compose/indexer-grpc/data-service-grpc-server.crt" +) +INDEXER_GRPC_DATA_SERVICE_KEY_FILE = ( + "docker/compose/indexer-grpc/data-service-grpc-server.key" +) VALIDATOR_TESTNET_DOCKER_COMPOSE_FILE = ( "docker/compose/validator-testnet/docker-compose.yaml" ) @@ -26,7 +32,25 @@ INDEXER_FULLNODE_REST_API_URL = "http://localhost:8080" INDEXER_DATA_SERVICE_READINESS_URL = "http://localhost:18084/readiness" GRPC_INDEXER_FULLNODE_URL = "localhost:50051" -GRPC_DATA_SERVICE_URL = "localhost:50052" +GRPC_DATA_SERVICE_NON_TLS_URL = "localhost:50052" +GRPC_DATA_SERVICE_TLS_URL = "localhost:50053" + +GRPC_IS_READY_MESSAGE = f""" + ====================================== + Transaction Stream Service(indexer grpc) is ready to serve! + + You can use grpcurl to test it out: + + - For non-TLS: + grpcurl -plaintext -d '{{ "starting_version": 0 }}' \\ + -H "x-aptos-data-authorization:dummy_token" \\ + {GRPC_DATA_SERVICE_NON_TLS_URL} aptos.indexer.v1.RawData/GetTransactions + - For TLS: + grpcurl -insecure -d '{{ "starting_version": 0 }}' \\ + -H "x-aptos-data-authorization:dummy_token" \\ + {GRPC_DATA_SERVICE_TLS_URL} aptos.indexer.v1.RawData/GetTransactions + ====================================== +""" SHARED_DOCKER_VOLUME_NAMES = ["aptos-shared", "indexer-grpc-file-store"] @@ -51,6 +75,32 @@ def run_docker_command( command = (list(pre_args) if pre_args else []) + base + ["docker"] + list(args) return self.shell.run(command, stream_output=stream_output) + def create_grpc_testing_certificates_if_absent(self) -> None: + # Check if the certificates are already present + if os.path.isfile(INDEXER_GRPC_DATA_SERVICE_CERT_FILE) and os.path.isfile( + INDEXER_GRPC_DATA_SERVICE_KEY_FILE + ): + return + # If not, create them + log.info("Creating grpc testing certificates") + command = [ + "openssl", + "req", + "-x509", + "-newkey", + "rsa:4096", + "-subj", + "/C=US/ST=CA/L=SF/O=Testing/CN=www.testing.com", + "-keyout", + INDEXER_GRPC_DATA_SERVICE_KEY_FILE, + "-out", + INDEXER_GRPC_DATA_SERVICE_CERT_FILE, + "-days", + "365", + "-nodes", + ] + self.shell.run(command) + class DockerComposeAction(Enum): UP = "up" @@ -102,6 +152,7 @@ def start_single_validator_testnet(context: SystemContext) -> None: def start_indexer_grpc(context: SystemContext, redis_only: bool = False) -> None: + context.create_grpc_testing_certificates_if_absent() extra_indexer_grpc_docker_args = [] if redis_only: extra_indexer_grpc_docker_args = [ @@ -197,7 +248,7 @@ def wait_for_indexer_grpc_progress(context: SystemContext) -> None: "-proto", "aptos/indexer/v1/raw_data.proto", "-plaintext", - GRPC_DATA_SERVICE_URL, + GRPC_DATA_SERVICE_NON_TLS_URL, "aptos.indexer.v1.RawData/GetTransactions", ], timeout_secs=GRPC_PROGRESS_THRESHOLD_SECS, @@ -230,6 +281,7 @@ def start(context: SystemContext, no_indexer_grpc: bool = False) -> None: if not no_indexer_grpc: wait_for_indexer_grpc_progress(context) + log.info(GRPC_IS_READY_MESSAGE) def stop(context: SystemContext) -> None: @@ -308,6 +360,10 @@ def check_system(context: SystemContext) -> None: if not shutil.which(GRPCURL_PATH): raise RuntimeError(f"{GRPCURL_PATH} is not installed or not in PATH") + # Check that openssl is installed. + if not shutil.which("openssl"): + raise RuntimeError("openssl is not installed or not in PATH") + def main() -> None: # Change to the root of aptos-core. diff --git a/testsuite/poetry.lock b/testsuite/poetry.lock index 5eaa26fe4248f..fc3a700eb7f61 100644 --- a/testsuite/poetry.lock +++ b/testsuite/poetry.lock @@ -64,14 +64,14 @@ files = [ [[package]] name = "certifi" -version = "2023.5.7" +version = "2023.7.22" description = "Python package for providing Mozilla's CA Bundle." category = "main" optional = false python-versions = ">=3.6" files = [ - {file = "certifi-2023.5.7-py3-none-any.whl", hash = "sha256:c6c2e98f5c7869efca1f8916fed228dd91539f9f1b444c314c06eef02980c716"}, - {file = "certifi-2023.5.7.tar.gz", hash = "sha256:0f0d56dc5a6ad56fd4ba36484d6cc34451e1c6548c61daad8c320169f91eddc7"}, + {file = "certifi-2023.7.22-py3-none-any.whl", hash = "sha256:92d6037539857d8206b8f6ae472e8b77db8058fec5937a1ef3f54304089edbb9"}, + {file = "certifi-2023.7.22.tar.gz", hash = "sha256:539cc1d13202e33ca466e88b2807e29f4c13049d6d87031a3c110744495cb082"}, ] [[package]] diff --git a/testsuite/single_node_performance.py b/testsuite/single_node_performance.py index aecf3387b88b5..c9ec854a075c6 100755 --- a/testsuite/single_node_performance.py +++ b/testsuite/single_node_performance.py @@ -48,26 +48,26 @@ class RunGroupConfig: # Local machine numbers will be higher. # fmt: off TESTS = [ - RunGroupConfig(expected_tps=20500, key=RunGroupKey("no-op"), included_in=Flow.LAND_BLOCKING), - RunGroupConfig(expected_tps=2800, key=RunGroupKey("no-op", module_working_set_size=1000), included_in=Flow.LAND_BLOCKING), - RunGroupConfig(expected_tps=13000, key=RunGroupKey("coin-transfer"), included_in=Flow.LAND_BLOCKING | Flow.REPRESENTATIVE), + RunGroupConfig(expected_tps=24000, key=RunGroupKey("no-op"), included_in=Flow.LAND_BLOCKING), + RunGroupConfig(expected_tps=3000, key=RunGroupKey("no-op", module_working_set_size=1000), included_in=Flow.LAND_BLOCKING), + RunGroupConfig(expected_tps=16000, key=RunGroupKey("coin-transfer"), included_in=Flow.LAND_BLOCKING | Flow.REPRESENTATIVE), RunGroupConfig(expected_tps=30300, key=RunGroupKey("coin-transfer", executor_type="native"), included_in=Flow.LAND_BLOCKING), - RunGroupConfig(expected_tps=12500, key=RunGroupKey("account-generation"), included_in=Flow.LAND_BLOCKING | Flow.REPRESENTATIVE), + RunGroupConfig(expected_tps=14000, key=RunGroupKey("account-generation"), included_in=Flow.LAND_BLOCKING | Flow.REPRESENTATIVE), RunGroupConfig(expected_tps=26500, key=RunGroupKey("account-generation", executor_type="native"), included_in=Flow.CONTINUOUS), - RunGroupConfig(expected_tps=18500, key=RunGroupKey("account-resource32-b"), included_in=Flow.LAND_BLOCKING), - RunGroupConfig(expected_tps=3550, key=RunGroupKey("modify-global-resource"), included_in=Flow.LAND_BLOCKING | Flow.REPRESENTATIVE), - RunGroupConfig(expected_tps=10600, key=RunGroupKey("modify-global-resource", module_working_set_size=10), included_in=Flow.LAND_BLOCKING), - RunGroupConfig(expected_tps=125, key=RunGroupKey("publish-package"), included_in=Flow.LAND_BLOCKING | Flow.REPRESENTATIVE), - RunGroupConfig(expected_tps=1600, key=RunGroupKey( + RunGroupConfig(expected_tps=20500, key=RunGroupKey("account-resource32-b"), included_in=Flow.LAND_BLOCKING), + RunGroupConfig(expected_tps=4200, key=RunGroupKey("modify-global-resource"), included_in=Flow.LAND_BLOCKING | Flow.REPRESENTATIVE), + RunGroupConfig(expected_tps=12500, key=RunGroupKey("modify-global-resource", module_working_set_size=10), included_in=Flow.LAND_BLOCKING), + RunGroupConfig(expected_tps=130, key=RunGroupKey("publish-package"), included_in=Flow.LAND_BLOCKING | Flow.REPRESENTATIVE), + RunGroupConfig(expected_tps=2650, key=RunGroupKey( "mix_publish_transfer", transaction_type_override="publish-package coin-transfer", transaction_weights_override="1 500", ), included_in=Flow.LAND_BLOCKING, waived=True), - RunGroupConfig(expected_tps=390, key=RunGroupKey("batch100-transfer"), included_in=Flow.LAND_BLOCKING), + RunGroupConfig(expected_tps=430, key=RunGroupKey("batch100-transfer"), included_in=Flow.LAND_BLOCKING), RunGroupConfig(expected_tps=940, key=RunGroupKey("batch100-transfer", executor_type="native"), included_in=Flow.CONTINUOUS), - RunGroupConfig(expected_tps=1550, key=RunGroupKey("token-v1ft-mint-and-transfer"), included_in=Flow.LAND_BLOCKING), - RunGroupConfig(expected_tps=7700, key=RunGroupKey("token-v1ft-mint-and-transfer", module_working_set_size=20), included_in=Flow.LAND_BLOCKING), + RunGroupConfig(expected_tps=1980, key=RunGroupKey("token-v1ft-mint-and-transfer"), included_in=Flow.LAND_BLOCKING), + RunGroupConfig(expected_tps=9300, key=RunGroupKey("token-v1ft-mint-and-transfer", module_working_set_size=20), included_in=Flow.LAND_BLOCKING), RunGroupConfig(expected_tps=1000, key=RunGroupKey("token-v1nft-mint-and-transfer-sequential"), included_in=Flow.CONTINUOUS), RunGroupConfig(expected_tps=5150, key=RunGroupKey("token-v1nft-mint-and-transfer-sequential", module_working_set_size=20), included_in=Flow.CONTINUOUS), RunGroupConfig(expected_tps=1300, key=RunGroupKey("token-v1nft-mint-and-transfer-parallel"), included_in=Flow.CONTINUOUS), @@ -80,8 +80,8 @@ class RunGroupConfig: RunGroupConfig(expected_tps=18000, key=RunGroupKey("no-op2-signers"), included_in=Flow.CONTINUOUS), RunGroupConfig(expected_tps=18000, key=RunGroupKey("no-op5-signers"), included_in=Flow.CONTINUOUS), - RunGroupConfig(expected_tps=1500, key=RunGroupKey("token-v2-ambassador-mint"), included_in=Flow.LAND_BLOCKING | Flow.REPRESENTATIVE), - RunGroupConfig(expected_tps=5500, key=RunGroupKey("token-v2-ambassador-mint", module_working_set_size=20), included_in=Flow.LAND_BLOCKING | Flow.REPRESENTATIVE), + RunGroupConfig(expected_tps=1820, key=RunGroupKey("token-v2-ambassador-mint"), included_in=Flow.LAND_BLOCKING | Flow.REPRESENTATIVE), + RunGroupConfig(expected_tps=6250, key=RunGroupKey("token-v2-ambassador-mint", module_working_set_size=20), included_in=Flow.LAND_BLOCKING | Flow.REPRESENTATIVE), RunGroupConfig(expected_tps=50000, key=RunGroupKey("coin_transfer_connected_components", executor_type="sharded", sharding_traffic_flags="--connected-tx-grps 5000", transaction_type_override=""), included_in=Flow.REPRESENTATIVE), RunGroupConfig(expected_tps=50000, key=RunGroupKey("coin_transfer_hotspot", executor_type="sharded", sharding_traffic_flags="--hotspot-probability 0.8", transaction_type_override=""), included_in=Flow.REPRESENTATIVE), diff --git a/testsuite/smoke-test/Cargo.toml b/testsuite/smoke-test/Cargo.toml index 84802209312e3..2447cc6187401 100644 --- a/testsuite/smoke-test/Cargo.toml +++ b/testsuite/smoke-test/Cargo.toml @@ -58,7 +58,6 @@ aptos-logger = { workspace = true } aptos-secure-storage = { workspace = true } aptos-time-service = { workspace = true } aptos-vault-client = { workspace = true } -aptos-writeset-generator = { workspace = true } base64 = { workspace = true } futures = { workspace = true } num_cpus = { workspace = true } diff --git a/testsuite/smoke-test/src/aptos_cli/validator.rs b/testsuite/smoke-test/src/aptos_cli/validator.rs index 9b43949458611..6944ec5df2ef0 100644 --- a/testsuite/smoke-test/src/aptos_cli/validator.rs +++ b/testsuite/smoke-test/src/aptos_cli/validator.rs @@ -44,7 +44,7 @@ use std::{ async fn test_analyze_validators() { let (mut swarm, cli, _faucet) = SwarmBuilder::new_local(1) .with_aptos() - .with_init_config(Arc::new(|_i, _conf, genesis_stake_amount| { + .with_init_genesis_stake(Arc::new(|_i, genesis_stake_amount| { *genesis_stake_amount = 100000; })) .build_with_cli(0) @@ -540,7 +540,7 @@ async fn test_large_total_stake() { // just barelly below u64::MAX const BASE: u64 = 10_000_000_000_000_000_000; let (mut swarm, mut cli, _faucet) = SwarmBuilder::new_local(4) - .with_init_config(Arc::new(|_, _, genesis_stake_amount| { + .with_init_genesis_stake(Arc::new(|_, genesis_stake_amount| { // make sure we have quorum *genesis_stake_amount = BASE; })) @@ -606,12 +606,13 @@ async fn test_nodes_rewards() { const BASE: u64 = 3600u64 * 24 * 365 * 10 * 100; let (mut swarm, mut cli, _faucet) = SwarmBuilder::new_local(4) - .with_init_config(Arc::new(|i, conf, genesis_stake_amount| { + .with_init_config(Arc::new(|_, conf, _| { // reduce timeout, as we will have dead node during rounds conf.consensus.round_initial_timeout_ms = 200; conf.consensus.quorum_store_poll_time_ms = 100; conf.api.failpoints_enabled = true; - + })) + .with_init_genesis_stake(Arc::new(|i, genesis_stake_amount| { // make sure we have quorum *genesis_stake_amount = if i < 2 { 10 * BASE } else { BASE }; })) @@ -1035,10 +1036,12 @@ async fn test_register_and_update_validator() { async fn test_join_and_leave_validator() { let (mut swarm, mut cli, _faucet) = SwarmBuilder::new_local(1) .with_aptos() - .with_init_config(Arc::new(|_i, conf, genesis_stake_amount| { + .with_init_config(Arc::new(|_i, conf, _| { // reduce timeout, as we will have dead node during rounds conf.consensus.round_initial_timeout_ms = 200; conf.consensus.quorum_store_poll_time_ms = 100; + })) + .with_init_genesis_stake(Arc::new(|_i, genesis_stake_amount| { *genesis_stake_amount = 100000; })) .with_init_genesis_config(Arc::new(|genesis_config| { @@ -1197,10 +1200,12 @@ async fn test_join_and_leave_validator() { async fn test_owner_create_and_delegate_flow() { let (mut swarm, mut cli, _faucet) = SwarmBuilder::new_local(1) .with_aptos() - .with_init_config(Arc::new(|_i, conf, genesis_stake_amount| { + .with_init_config(Arc::new(|_i, conf, _| { // reduce timeout, as we will have dead node during rounds conf.consensus.round_initial_timeout_ms = 200; conf.consensus.quorum_store_poll_time_ms = 100; + })) + .with_init_genesis_stake(Arc::new(|_i, genesis_stake_amount| { // enough for quorum *genesis_stake_amount = 5000000; })) diff --git a/testsuite/smoke-test/src/consensus/quorum_store_fault_tolerance.rs b/testsuite/smoke-test/src/consensus/quorum_store_fault_tolerance.rs index 6d8ba7fde399b..a9eea2cd2b55d 100644 --- a/testsuite/smoke-test/src/consensus/quorum_store_fault_tolerance.rs +++ b/testsuite/smoke-test/src/consensus/quorum_store_fault_tolerance.rs @@ -268,10 +268,13 @@ async fn test_batch_id_on_restart(do_wipe_db: bool) { info!("stop node 0"); swarm.validator_mut(node_to_restart).unwrap().stop(); if do_wipe_db { - info!("wipe only quorum store db"); let node0_config = swarm.validator(node_to_restart).unwrap().config().clone(); let db_dir = node0_config.storage.dir(); let quorum_store_db_dir = db_dir.join(QUORUM_STORE_DB_NAME); + info!( + "wipe only quorum store db: {}", + quorum_store_db_dir.display() + ); fs::remove_dir_all(quorum_store_db_dir).unwrap(); } else { info!("don't do anything to quorum store db"); diff --git a/testsuite/smoke-test/src/full_nodes.rs b/testsuite/smoke-test/src/full_nodes.rs index 7abff7eafdf0f..2360c8a5cb78c 100644 --- a/testsuite/smoke-test/src/full_nodes.rs +++ b/testsuite/smoke-test/src/full_nodes.rs @@ -10,7 +10,7 @@ use crate::{ }, }; use aptos_config::{ - config::{DiscoveryMethod, NodeConfig, Peer, PeerRole, HANDSHAKE_VERSION}, + config::{DiscoveryMethod, NodeConfig, OverrideNodeConfig, Peer, PeerRole, HANDSHAKE_VERSION}, network_id::NetworkId, }; use aptos_forge::{LocalSwarm, NodeExt, Swarm, SwarmExt}; @@ -28,7 +28,10 @@ async fn test_full_node_basic_flow() { let vfn_peer_id = swarm.full_nodes().next().unwrap().peer_id(); let version = swarm.versions().max().unwrap(); let pfn_peer_id = swarm - .add_full_node(&version, NodeConfig::get_default_pfn_config()) + .add_full_node( + &version, + OverrideNodeConfig::new_with_default_base(NodeConfig::get_default_pfn_config()), + ) .await .unwrap(); for fullnode in swarm.full_nodes_mut() { @@ -214,7 +217,13 @@ async fn test_private_full_node() { NetworkId::Public, PeerRole::PreferredUpstream, ); - let private = swarm.add_full_node(&version, private_config).await.unwrap(); + let private = swarm + .add_full_node( + &version, + OverrideNodeConfig::new_with_default_base(private_config), + ) + .await + .unwrap(); // And connect the user to the private swarm add_node_to_seeds( @@ -223,7 +232,13 @@ async fn test_private_full_node() { NetworkId::Public, PeerRole::PreferredUpstream, ); - let user = swarm.add_full_node(&version, user_config).await.unwrap(); + let user = swarm + .add_full_node( + &version, + OverrideNodeConfig::new_with_default_base(user_config), + ) + .await + .unwrap(); swarm .wait_for_connectivity(Instant::now() + Duration::from_secs(MAX_CONNECTIVITY_WAIT_SECS)) diff --git a/testsuite/smoke-test/src/fullnode.rs b/testsuite/smoke-test/src/fullnode.rs index 6611c3ab21ef7..3cc1081ed3a8d 100644 --- a/testsuite/smoke-test/src/fullnode.rs +++ b/testsuite/smoke-test/src/fullnode.rs @@ -7,7 +7,7 @@ use crate::{ }; use anyhow::bail; use aptos_cached_packages::aptos_stdlib; -use aptos_config::config::NodeConfig; +use aptos_config::config::{NodeConfig, OverrideNodeConfig}; use aptos_forge::{NodeExt, Result, Swarm}; use aptos_rest_client::Client as RestClient; use aptos_types::account_address::AccountAddress; @@ -19,14 +19,17 @@ async fn test_indexer() { let version = swarm.versions().max().unwrap(); let fullnode_peer_id = swarm - .add_full_node(&version, NodeConfig::get_default_pfn_config()) + .add_full_node( + &version, + OverrideNodeConfig::new_with_default_base(NodeConfig::get_default_pfn_config()), + ) .await .unwrap(); let validator_peer_id = swarm.validators().next().unwrap().peer_id(); let _vfn_peer_id = swarm .add_validator_full_node( &version, - NodeConfig::get_default_vfn_config(), + OverrideNodeConfig::new_with_default_base(NodeConfig::get_default_vfn_config()), validator_peer_id, ) .unwrap(); diff --git a/testsuite/smoke-test/src/network.rs b/testsuite/smoke-test/src/network.rs index ed945499f95de..8ca6423f53c36 100644 --- a/testsuite/smoke-test/src/network.rs +++ b/testsuite/smoke-test/src/network.rs @@ -10,8 +10,8 @@ use crate::{ use aptos::{common::types::EncodingType, test::CliTestFramework}; use aptos_config::{ config::{ - DiscoveryMethod, FileDiscovery, Identity, NetworkConfig, NodeConfig, Peer, PeerSet, - RestDiscovery, + DiscoveryMethod, FileDiscovery, Identity, NetworkConfig, NodeConfig, OverrideNodeConfig, + Peer, PeerSet, RestDiscovery, }, network_id::NetworkId, }; @@ -53,7 +53,11 @@ async fn test_connection_limiting() { }); let vfn_peer_id = swarm - .add_validator_fullnode(&version, full_node_config, validator_peer_id) + .add_validator_fullnode( + &version, + OverrideNodeConfig::new_with_default_base(full_node_config), + validator_peer_id, + ) .unwrap(); // Wait till nodes are healthy @@ -68,12 +72,12 @@ async fn test_connection_limiting() { let pfn_peer_id = swarm .add_full_node( &version, - add_identity_to_config( + OverrideNodeConfig::new_with_default_base(add_identity_to_config( NodeConfig::get_default_pfn_config(), &NetworkId::Public, private_key, peer_set, - ), + )), ) .await .unwrap(); @@ -109,12 +113,12 @@ async fn test_connection_limiting() { let pfn_peer_id_fail = swarm .add_full_node( &version, - add_identity_to_config( + OverrideNodeConfig::new_with_default_base(add_identity_to_config( NodeConfig::get_default_pfn_config(), &NetworkId::Public, private_key, peer_set, - ), + )), ) .await .unwrap(); @@ -158,7 +162,10 @@ async fn test_rest_discovery() { // Start a new node that should connect to the previous node only via REST // The startup wait time should check if it connects successfully swarm - .add_full_node(&version, full_node_config) + .add_full_node( + &version, + OverrideNodeConfig::new_with_default_base(full_node_config), + ) .await .unwrap(); } diff --git a/testsuite/smoke-test/src/smoke_test_environment.rs b/testsuite/smoke-test/src/smoke_test_environment.rs index e5db1a1d0bdd1..5ceb2959911aa 100644 --- a/testsuite/smoke-test/src/smoke_test_environment.rs +++ b/testsuite/smoke-test/src/smoke_test_environment.rs @@ -8,7 +8,7 @@ use aptos_crypto::ed25519::Ed25519PrivateKey; use aptos_faucet_core::server::{FunderKeyEnum, RunConfig}; use aptos_forge::{ActiveNodesGuard, Factory, LocalFactory, LocalSwarm, Node}; use aptos_framework::ReleaseBundle; -use aptos_genesis::builder::{InitConfigFn, InitGenesisConfigFn}; +use aptos_genesis::builder::{InitConfigFn, InitGenesisConfigFn, InitGenesisStakeFn}; use aptos_infallible::Mutex; use aptos_logger::prelude::*; use aptos_types::chain_id::ChainId; @@ -27,6 +27,7 @@ pub struct SwarmBuilder { genesis_framework: Option, init_config: Option, vfn_config: Option, + init_genesis_stake: Option, init_genesis_config: Option, } @@ -39,6 +40,7 @@ impl SwarmBuilder { genesis_framework: None, init_config: None, vfn_config: None, + init_genesis_stake: None, init_genesis_config: None, } } @@ -67,6 +69,11 @@ impl SwarmBuilder { self } + pub fn with_init_genesis_stake(mut self, init_genesis_stake: InitGenesisStakeFn) -> Self { + self.init_genesis_stake = Some(init_genesis_stake); + self + } + pub fn with_init_genesis_config(mut self, init_genesis_config: InitGenesisConfigFn) -> Self { self.init_genesis_config = Some(init_genesis_config); self @@ -105,6 +112,7 @@ impl SwarmBuilder { builder.genesis_framework, builder.init_config, builder.vfn_config, + builder.init_genesis_stake, Some(Arc::new(move |genesis_config| { if let Some(init_genesis_config) = &init_genesis_config { (init_genesis_config)(genesis_config); diff --git a/testsuite/smoke-test/src/state_sync.rs b/testsuite/smoke-test/src/state_sync.rs index 336802db82679..638e5ba3f1c00 100644 --- a/testsuite/smoke-test/src/state_sync.rs +++ b/testsuite/smoke-test/src/state_sync.rs @@ -9,7 +9,9 @@ use crate::{ MAX_CATCH_UP_WAIT_SECS, MAX_HEALTHY_WAIT_SECS, }, }; -use aptos_config::config::{BootstrappingMode, ContinuousSyncingMode, NodeConfig}; +use aptos_config::config::{ + BootstrappingMode, ContinuousSyncingMode, NodeConfig, OverrideNodeConfig, +}; use aptos_forge::{LocalSwarm, Node, NodeExt, Swarm, SwarmExt}; use aptos_inspection_service::inspection_client::InspectionClient; use aptos_rest_client::Client as RestClient; @@ -228,7 +230,7 @@ async fn create_full_node(full_node_config: NodeConfig, swarm: &mut LocalSwarm) let vfn_peer_id = swarm .add_validator_fullnode( &swarm.versions().max().unwrap(), - full_node_config, + OverrideNodeConfig::new_with_default_base(full_node_config), validator_peer_id, ) .unwrap(); diff --git a/testsuite/smoke-test/src/test_smoke_tests.rs b/testsuite/smoke-test/src/test_smoke_tests.rs index 36da735b3ccd5..6c071fd581f52 100644 --- a/testsuite/smoke-test/src/test_smoke_tests.rs +++ b/testsuite/smoke-test/src/test_smoke_tests.rs @@ -5,7 +5,7 @@ use crate::{ smoke_test_environment::SwarmBuilder, test_utils::{MAX_CONNECTIVITY_WAIT_SECS, MAX_HEALTHY_WAIT_SECS}, }; -use aptos_config::config::NodeConfig; +use aptos_config::config::{NodeConfig, OverrideNodeConfig}; use aptos_forge::{NodeExt, Swarm}; use std::{ sync::Arc, @@ -33,7 +33,11 @@ async fn test_aptos_node_after_get_bin() { let validator = validator_peer_ids[0]; let _vfn = swarm - .add_validator_fullnode(&version, NodeConfig::get_default_vfn_config(), validator) + .add_validator_fullnode( + &version, + OverrideNodeConfig::new_with_default_base(NodeConfig::get_default_vfn_config()), + validator, + ) .unwrap(); for fullnode in swarm.full_nodes_mut() { diff --git a/testsuite/smoke-test/src/txn_broadcast.rs b/testsuite/smoke-test/src/txn_broadcast.rs index 88ec232f292af..816ba1f989aa4 100644 --- a/testsuite/smoke-test/src/txn_broadcast.rs +++ b/testsuite/smoke-test/src/txn_broadcast.rs @@ -8,7 +8,7 @@ use crate::{ MAX_CONNECTIVITY_WAIT_SECS, MAX_HEALTHY_WAIT_SECS, }, }; -use aptos_config::config::NodeConfig; +use aptos_config::config::{NodeConfig, OverrideNodeConfig}; use aptos_forge::{NodeExt, Swarm, SwarmExt}; use std::{ sync::Arc, @@ -32,7 +32,11 @@ async fn test_txn_broadcast() { let validator = validator_peer_ids[1]; let vfn = swarm - .add_validator_fullnode(&version, NodeConfig::get_default_vfn_config(), validator) + .add_validator_fullnode( + &version, + OverrideNodeConfig::new_with_default_base(NodeConfig::get_default_vfn_config()), + validator, + ) .unwrap(); for fullnode in swarm.full_nodes_mut() { diff --git a/testsuite/smoke-test/src/upgrade.rs b/testsuite/smoke-test/src/upgrade.rs index 1d7214946d3c3..4eac208f0b19b 100644 --- a/testsuite/smoke-test/src/upgrade.rs +++ b/testsuite/smoke-test/src/upgrade.rs @@ -217,7 +217,7 @@ async fn test_upgrade_flow() { #[tokio::test(flavor = "multi_thread")] async fn test_release_validate_tool_multi_step() { let (mut env, _, _) = SwarmBuilder::new_local(1) - .with_init_config(Arc::new(|_, _, genesis_stake_amount| { + .with_init_genesis_stake(Arc::new(|_, genesis_stake_amount| { // make sure we have quorum *genesis_stake_amount = 2000000000000000; })) diff --git a/testsuite/testcases/Cargo.toml b/testsuite/testcases/Cargo.toml index 6519af0511638..34a224cfe2a2c 100644 --- a/testsuite/testcases/Cargo.toml +++ b/testsuite/testcases/Cargo.toml @@ -15,6 +15,7 @@ rust-version = { workspace = true } [dependencies] anyhow = { workspace = true } aptos = { workspace = true, features = ["fuzzing"] } +aptos-config = { workspace = true } aptos-forge = { workspace = true } aptos-genesis = { workspace = true } aptos-global-constants = { workspace = true } diff --git a/testsuite/testcases/src/data/two_region_link_stats.csv b/testsuite/testcases/src/data/two_region_link_stats.csv new file mode 100644 index 0000000000000..de738dfab91b2 --- /dev/null +++ b/testsuite/testcases/src/data/two_region_link_stats.csv @@ -0,0 +1,3 @@ +sending_region,receiving_region,bitrate_bps,avgrtt +aws--sa-east-1,aws--ap-northeast-1,32505856,255.289 +aws--ap-northeast-1,aws--sa-east-1,32243712,255.323 \ No newline at end of file diff --git a/testsuite/testcases/src/forge_setup_test.rs b/testsuite/testcases/src/forge_setup_test.rs index d88a91dde089b..6a69224bb1bc4 100644 --- a/testsuite/testcases/src/forge_setup_test.rs +++ b/testsuite/testcases/src/forge_setup_test.rs @@ -3,6 +3,7 @@ use crate::generate_traffic; use anyhow::Context; +use aptos_config::config::OverrideNodeConfig; use aptos_forge::{NetworkContext, NetworkTest, Result, Test}; use aptos_logger::info; use rand::{ @@ -64,7 +65,8 @@ impl NetworkTest for ForgeSetupTest { let num_pfns = 5; for _ in 0..num_pfns { let pfn_version = swarm.versions().max().unwrap(); - let pfn_node_config = swarm.get_default_pfn_node_config(); + let pfn_node_config = + OverrideNodeConfig::new_with_default_base(swarm.get_default_pfn_node_config()); let pfn_peer_id = runtime.block_on(swarm.add_full_node(&pfn_version, pfn_node_config))?; diff --git a/testsuite/testcases/src/multi_region_network_test.rs b/testsuite/testcases/src/multi_region_network_test.rs index dc5b216da1e6c..422a38de4b9a3 100644 --- a/testsuite/testcases/src/multi_region_network_test.rs +++ b/testsuite/testcases/src/multi_region_network_test.rs @@ -11,17 +11,14 @@ use std::collections::BTreeMap; /// The link stats are obtained from https://github.com/doitintl/intercloud-throughput/blob/master/results_202202/results.csv /// The four regions were hand-picked from the dataset to simulate a multi-region setup /// with high latencies and low bandwidth. -macro_rules! FOUR_REGION_LINK_STATS_CSV { - () => { - "data/four_region_link_stats.csv" - }; -} +const FOUR_REGION_LINK_STATS: &[u8] = include_bytes!("data/four_region_link_stats.csv"); +/// The two regions were chosen as the most distant regions among the four regions set. +const TWO_REGION_LINK_STATS: &[u8] = include_bytes!("data/two_region_link_stats.csv"); -fn get_link_stats_table() -> BTreeMap> { +fn get_link_stats_table(csv: &[u8]) -> BTreeMap> { let mut stats_table = BTreeMap::new(); - let mut rdr = - csv::Reader::from_reader(include_bytes!(FOUR_REGION_LINK_STATS_CSV!()).as_slice()); + let mut rdr = csv::Reader::from_reader(csv); rdr.deserialize() .for_each(|result: Result<(String, String, u64, f64), _>| { if let Ok((from, to, bitrate, latency)) = result { @@ -205,13 +202,22 @@ pub struct MultiRegionNetworkEmulationConfig { impl Default for MultiRegionNetworkEmulationConfig { fn default() -> Self { Self { - link_stats_table: get_link_stats_table(), + link_stats_table: get_link_stats_table(FOUR_REGION_LINK_STATS), inter_region_config: InterRegionNetEmConfig::default(), intra_region_config: Some(IntraRegionNetEmConfig::default()), } } } +impl MultiRegionNetworkEmulationConfig { + pub fn two_region() -> Self { + Self { + link_stats_table: get_link_stats_table(TWO_REGION_LINK_STATS), + ..Default::default() + } + } +} + /// A test to emulate network conditions for a multi-region setup. #[derive(Default)] pub struct MultiRegionNetworkEmulationTest { diff --git a/testsuite/testcases/src/public_fullnode_performance.rs b/testsuite/testcases/src/public_fullnode_performance.rs index a29359341bbd9..4a9909e0cd3fd 100644 --- a/testsuite/testcases/src/public_fullnode_performance.rs +++ b/testsuite/testcases/src/public_fullnode_performance.rs @@ -7,6 +7,7 @@ use crate::{ NetworkLoadTest, }; use anyhow::Error; +use aptos_config::config::OverrideNodeConfig; use aptos_forge::{ NetworkContext, NetworkTest, Result, Swarm, SwarmChaos, SwarmCpuStress, SwarmNetEm, Test, }; @@ -176,10 +177,11 @@ fn create_and_add_pfns(ctx: &mut NetworkContext, num_pfns: u64) -> Result, baseline: RefCell, - resource_store: RefCell>>, + resource_store: RefCell>, } fn test_account() -> AccountAddress { @@ -385,12 +386,12 @@ impl<'a> ModuleResolver for HarnessProxy<'a> { vec![] } - fn get_module(&self, id: &ModuleId) -> Result>, Error> { + fn get_module(&self, id: &ModuleId) -> Result, Error> { Ok(self .harness .module_cache .get(id.name()) - .map(|c| c.serialize(None))) + .map(|c| c.serialize(None).into())) } } @@ -400,7 +401,7 @@ impl<'a> ResourceResolver for HarnessProxy<'a> { address: &AccountAddress, typ: &StructTag, _metadata: &[Metadata], - ) -> anyhow::Result<(Option>, usize)> { + ) -> anyhow::Result<(Option, usize)> { let res = self .harness .resource_store diff --git a/third_party/move/extensions/move-table-extension/Cargo.toml b/third_party/move/extensions/move-table-extension/Cargo.toml index 1c9666d6b9b47..1a857f2eaf723 100644 --- a/third_party/move/extensions/move-table-extension/Cargo.toml +++ b/third_party/move/extensions/move-table-extension/Cargo.toml @@ -11,9 +11,10 @@ publish = false [dependencies] anyhow = "1.0.52" better_any = "0.1.1" +bytes = "1.4.0" move-binary-format = { path = "../../move-binary-format" } move-core-types = { path = "../../move-core/types" } -move-vm-runtime = { path = "../../move-vm/runtime", features = ["debugging"] } +move-vm-runtime = { path = "../../move-vm/runtime" } move-vm-types = { path = "../../move-vm/types" } once_cell = "1.7.2" sha3 = "0.9.1" diff --git a/third_party/move/extensions/move-table-extension/src/lib.rs b/third_party/move/extensions/move-table-extension/src/lib.rs index a3688e8c3ee23..5a769fbfc40bf 100644 --- a/third_party/move/extensions/move-table-extension/src/lib.rs +++ b/third_party/move/extensions/move-table-extension/src/lib.rs @@ -8,6 +8,7 @@ //! See [`README.md`](../README.md) for integration into an adapter. use better_any::{Tid, TidAble}; +use bytes::Bytes; use move_binary_format::errors::{PartialVMError, PartialVMResult}; use move_core_types::{ account_address::AccountAddress, @@ -82,7 +83,7 @@ pub struct TableChangeSet { /// A change of a single table. pub struct TableChange { - pub entries: BTreeMap, Op>>, + pub entries: BTreeMap, Op>, } /// A table resolver which needs to be provided by the environment. This allows to lookup @@ -92,7 +93,7 @@ pub trait TableResolver { &self, handle: &TableHandle, key: &[u8], - ) -> Result>, anyhow::Error>; + ) -> Result, anyhow::Error>; } /// The native table context extension. This needs to be attached to the NativeContextExtensions @@ -176,11 +177,11 @@ impl<'a> NativeTableContext<'a> { match op { Op::New(val) => { let bytes = serialize(&value_layout, &val)?; - entries.insert(key, Op::New(bytes)); + entries.insert(key, Op::New(bytes.into())); }, Op::Modify(val) => { let bytes = serialize(&value_layout, &val)?; - entries.insert(key, Op::Modify(bytes)); + entries.insert(key, Op::Modify(bytes.into())); }, Op::Delete => { entries.insert(key, Op::Delete); diff --git a/third_party/move/move-analyzer/Cargo.toml b/third_party/move/move-analyzer/Cargo.toml index 91348e9549fb3..5d789d16dfa4b 100644 --- a/third_party/move/move-analyzer/Cargo.toml +++ b/third_party/move/move-analyzer/Cargo.toml @@ -14,7 +14,7 @@ clap = { version = "4.3.9", features = ["derive"] } codespan-reporting = "0.11.1" crossbeam = "0.8" derivative = "2.2.0" -dunce = "1.0.2" +dunce = "1.0.4" im = "15.1.0" lsp-server = "0.5.1" lsp-types = "0.90.1" diff --git a/third_party/move/move-binary-format/src/compatibility.rs b/third_party/move/move-binary-format/src/compatibility.rs index e882a4d7a7dd6..6954f0c9ee65d 100644 --- a/third_party/move/move-binary-format/src/compatibility.rs +++ b/third_party/move/move-binary-format/src/compatibility.rs @@ -192,17 +192,17 @@ impl Compatibility { if self.check_struct_and_pub_function_linking && !struct_and_pub_function_linking { return Err(PartialVMError::new( StatusCode::BACKWARD_INCOMPATIBLE_MODULE_UPDATE, - )); + ).with_message(format!("Module Update Failure: Public function/struct signature of new module differs from existing module in {:?}::{}", old_module.address, old_module.name))); } if self.check_struct_layout && !struct_layout { return Err(PartialVMError::new( StatusCode::BACKWARD_INCOMPATIBLE_MODULE_UPDATE, - )); + ).with_message(format!("Module Update Failure: Struct layout of new module differs from existing modul in {:?}::{}", old_module.address, old_module.name))); } if self.check_friend_linking && !friend_linking { return Err(PartialVMError::new( StatusCode::BACKWARD_INCOMPATIBLE_MODULE_UPDATE, - )); + ).with_message(format!("Module Update Failure: Friend signature of new module differs from existing module in {:?}::{}", old_module.address, old_module.name))); } Ok(()) diff --git a/third_party/move/move-binary-format/src/errors.rs b/third_party/move/move-binary-format/src/errors.rs index c60568d97ba43..c4250875a09b4 100644 --- a/third_party/move/move-binary-format/src/errors.rs +++ b/third_party/move/move-binary-format/src/errors.rs @@ -234,6 +234,70 @@ impl VMError { offsets, })) } + + pub fn format_test_output(&self, verbose: bool, comparison_mode: bool) -> String { + let location_string = match &self.location() { + Location::Undefined => "undefined".to_owned(), + Location::Script => "script".to_owned(), + Location::Module(id) => { + format!("0x{}::{}", id.address().short_str_lossless(), id.name()) + }, + }; + let indices = if comparison_mode { + // During comparison testing, abstract this data. + "redacted".to_string() + } else { + format!("{:?}", self.indices()) + }; + let offsets = if comparison_mode { + // During comparison testing, abstract this data. + "redacted".to_string() + } else { + format!("{:?}", self.offsets()) + }; + + if verbose { + let message_str = match &self.message() { + Some(message_str) => message_str, + None => "None", + }; + format!( + "{{ + message: {message}, + major_status: {major_status:?}, + sub_status: {sub_status:?}, + location: {location_string}, + indices: {indices}, + offsets: {offsets}, + exec_state: {exec_state:?}, +}}", + message = message_str, + major_status = self.major_status(), + sub_status = self.sub_status(), + location_string = location_string, + exec_state = self.exec_state(), + // TODO maybe include source map info? + indices = indices, + offsets = offsets, + ) + } else { + format!( + "{{ + major_status: {major_status:?}, + sub_status: {sub_status:?}, + location: {location_string}, + indices: {indices}, + offsets: {offsets}, +}}", + major_status = self.major_status(), + sub_status = self.sub_status(), + location_string = location_string, + // TODO maybe include source map info? + indices = indices, + offsets = offsets, + ) + } + } } impl fmt::Debug for VMError { diff --git a/third_party/move/move-binary-format/src/file_format.rs b/third_party/move/move-binary-format/src/file_format.rs index c23bd86ff1980..85ae771791f6e 100644 --- a/third_party/move/move-binary-format/src/file_format.rs +++ b/third_party/move/move-binary-format/src/file_format.rs @@ -382,9 +382,8 @@ pub struct StructDefinition { impl StructDefinition { pub fn declared_field_count(&self) -> PartialVMResult { match &self.field_information { - // TODO we might want a more informative error here StructFieldInformation::Native => Err(PartialVMError::new(StatusCode::LINKER_ERROR) - .with_message("Looking for field in native structure".to_string())), + .with_message("Looking for field in native structure. Native structures have no accessible fields.".to_string())), StructFieldInformation::Declared(fields) => Ok(fields.len() as u16), } } diff --git a/third_party/move/move-bytecode-verifier/transactional-tests/tests/type_safety/bug_9459_notworking.exp b/third_party/move/move-bytecode-verifier/transactional-tests/tests/type_safety/bug_9459_notworking.exp new file mode 100644 index 0000000000000..6cd67db3f6472 --- /dev/null +++ b/third_party/move/move-bytecode-verifier/transactional-tests/tests/type_safety/bug_9459_notworking.exp @@ -0,0 +1 @@ +processed 1 task diff --git a/third_party/move/move-bytecode-verifier/transactional-tests/tests/type_safety/bug_9459_notworking.move b/third_party/move/move-bytecode-verifier/transactional-tests/tests/type_safety/bug_9459_notworking.move new file mode 100644 index 0000000000000..1d6c6a539c00a --- /dev/null +++ b/third_party/move/move-bytecode-verifier/transactional-tests/tests/type_safety/bug_9459_notworking.move @@ -0,0 +1,28 @@ +//# publish +module 0x1337::reproduce { + use std::option::{Self, Option}; + #[test_only] + use aptos_framework::account; + + const ESpaceAlreadyMarked: u64 = 0; + + public entry fun init(_account: &signer) { + let space: Option = option::none(); + check_if_space_is_open(&mut space); + } + + inline fun check_if_space_is_open(space: &Option) { + // TODO: Ensure given space is not already marked. If it is, abort with code: + // ESpaceAlreadyMarked + assert!( + option::is_none(space), + ESpaceAlreadyMarked + ); + } + + #[test] + fun test() { + let account = account::create_account_for_test(@0x1337); + init(&account); + } +} diff --git a/third_party/move/move-bytecode-verifier/transactional-tests/tests/type_safety/bug_9459_working.exp b/third_party/move/move-bytecode-verifier/transactional-tests/tests/type_safety/bug_9459_working.exp new file mode 100644 index 0000000000000..6cd67db3f6472 --- /dev/null +++ b/third_party/move/move-bytecode-verifier/transactional-tests/tests/type_safety/bug_9459_working.exp @@ -0,0 +1 @@ +processed 1 task diff --git a/third_party/move/move-bytecode-verifier/transactional-tests/tests/type_safety/bug_9459_working.move b/third_party/move/move-bytecode-verifier/transactional-tests/tests/type_safety/bug_9459_working.move new file mode 100644 index 0000000000000..b74dacd0f0179 --- /dev/null +++ b/third_party/move/move-bytecode-verifier/transactional-tests/tests/type_safety/bug_9459_working.move @@ -0,0 +1,22 @@ +//# publish +module 0x1337::reproduce { + use std::option::{Self, Option}; + #[test_only] + use aptos_framework::account; + + const ESpaceAlreadyMarked: u64 = 0; + + public entry fun init(_account: &signer) { + let space: Option = option::none(); + assert!( + option::is_none(&mut space), + ESpaceAlreadyMarked + ); + } + + #[test] + fun test() { + let account = account::create_account_for_test(@0x1337); + init(&account); + } +} diff --git a/third_party/move/move-bytecode-verifier/transactional-tests/tests/type_safety/gerbens_test.exp b/third_party/move/move-bytecode-verifier/transactional-tests/tests/type_safety/gerbens_test.exp new file mode 100644 index 0000000000000..21191ffa107a6 --- /dev/null +++ b/third_party/move/move-bytecode-verifier/transactional-tests/tests/type_safety/gerbens_test.exp @@ -0,0 +1,10 @@ +processed 1 task + +task 0 'publish'. lines 1-93: +Error: error[E04024]: invalid usage of function type + ┌─ TEMPFILE:9:62 + │ +9 │ public fun for_each_ref(v: &vector, f: |&Element|) { + │ ^^^^^^^^^^ function type only allowed for inline function arguments + + diff --git a/third_party/move/move-bytecode-verifier/transactional-tests/tests/type_safety/gerbens_test.move b/third_party/move/move-bytecode-verifier/transactional-tests/tests/type_safety/gerbens_test.move new file mode 100644 index 0000000000000..a0a1d27cdb562 --- /dev/null +++ b/third_party/move/move-bytecode-verifier/transactional-tests/tests/type_safety/gerbens_test.move @@ -0,0 +1,93 @@ +//# publish +// Try to pass a lambda to a non-inline function by mistake. +// Should generate an error. +module 0x42::test_case { + use std::vector; + + /// The index into the vector is out of bounds + const EINVALID_RANGE: u64 = 0x20001; + + /// Apply the function to a reference of each element in the vector. + public fun for_each_ref(v: &vector, f: |&Element|) { + let i = 0; + while (i < vector::length(v)) { + f(vector::borrow(v, i)); + i = i + 1 + } + } + + /// Map the function over the references of the elements of the vector, producing a new vector without modifying the + /// original map. + public inline fun map_ref( + v: &vector, + f: |&Element|NewElement + ): vector { + let result = vector[]; + for_each_ref(v, |elem| vector::push_back(&mut result, f(elem))); + result + } + + /// Reverses the order of the elements [left, right) in the vector `v` in place. + public fun reverse_slice(v: &mut vector, left: u64, right: u64) { + assert!(left <= right, EINVALID_RANGE); + if (left == right) return; + right = right - 1; + while (left < right) { + vector::swap(v, left, right); + left = left + 1; + right = right - 1; + } + } + /// Same as above but on a sub-slice of an array [left, right) with left <= rot <= right + /// returns the + public fun rotate_slice( + v: &mut vector, + left: u64, + rot: u64, + right: u64 + ): u64 { + reverse_slice(v, left, rot); + reverse_slice(v, rot, right); + reverse_slice(v, left, right); + left + (right - rot) + } + + /// For in-place stable partition we need recursion so we cannot use inline functions + /// and thus we cannot use lambdas. Luckily it so happens that we can precompute the predicate + /// in a secondary array. Note how the algorithm belows only start shuffling items after the + /// predicate is checked. + public fun stable_partition_internal( + v: &mut vector, + pred: &vector, + left: u64, + right: u64 + ): u64 { + if (left == right) { + left + } else if (left + 1 == right) { + if (*vector::borrow(pred, left)) right else left + } else { + let mid = left + ((right - left) >> 1); + let p1 = stable_partition_internal(v, pred, left, mid); + let p2 = stable_partition_internal(v, pred, mid, right); + rotate_slice(v, p1, mid, p2) + } + } + + /// Partition the array based on a predicate p, this routine is stable and thus + /// preserves the relative order of the elements in the two partitions. + public inline fun stable_partition( + v: &mut vector, + p: |&Element|bool + ): u64 { + let pred = map_ref(v, |e| p(e)); + let len = vector::length(v); + stable_partition_internal(v, &pred,0, len) + } + + fun test_stable_partition() { + let v = vector[1, 2, 3, 4, 5]; + assert!(stable_partition(&mut v, |n| *n % 2 == 0) == 2, 0); + assert!(&v == &vector[2, 4, 1, 3, 5], 1); + } +} diff --git a/third_party/move/move-bytecode-verifier/transactional-tests/tests/type_safety/gerbens_test1.exp b/third_party/move/move-bytecode-verifier/transactional-tests/tests/type_safety/gerbens_test1.exp new file mode 100644 index 0000000000000..6cd67db3f6472 --- /dev/null +++ b/third_party/move/move-bytecode-verifier/transactional-tests/tests/type_safety/gerbens_test1.exp @@ -0,0 +1 @@ +processed 1 task diff --git a/third_party/move/move-bytecode-verifier/transactional-tests/tests/type_safety/gerbens_test1.move b/third_party/move/move-bytecode-verifier/transactional-tests/tests/type_safety/gerbens_test1.move new file mode 100644 index 0000000000000..0027b0e024cd0 --- /dev/null +++ b/third_party/move/move-bytecode-verifier/transactional-tests/tests/type_safety/gerbens_test1.move @@ -0,0 +1,93 @@ +//# publish +// Should succeed. +module 0x42::test_case { + use std::vector; + + /// The index into the vector is out of bounds + const EINVALID_RANGE: u64 = 0x20001; + + /// Apply the function to a reference of each element in the vector. + public inline fun for_each_ref(v: &vector, f: |&Element|) { + let i = 0; + while (i < vector::length(v)) { + f(vector::borrow(v, i)); + i = i + 1 + } + } + + /// Map the function over the references of the elements of the vector, producing a new vector without modifying the + /// original map. + public inline fun map_ref( + v: &vector, + f: |&Element|NewElement + ): vector { + let result = vector[]; + for_each_ref(v, |elem| vector::push_back(&mut result, f(elem))); + result + } + + /// Reverses the order of the elements [left, right) in the vector `v` in place. + public fun reverse_slice(v: &mut vector, left: u64, right: u64) { + assert!(left <= right, EINVALID_RANGE); + if (left == right) return; + right = right - 1; + while (left < right) { + vector::swap(v, left, right); + left = left + 1; + right = right - 1; + } + } + /// Same as above but on a sub-slice of an array [left, right) with left <= rot <= right + /// returns the + public fun rotate_slice( + v: &mut vector, + left: u64, + rot: u64, + right: u64 + ): u64 { + reverse_slice(v, left, rot); + reverse_slice(v, rot, right); + reverse_slice(v, left, right); + left + (right - rot) + } + + /// For in-place stable partition we need recursion so we cannot use inline functions + /// and thus we cannot use lambdas. Luckily it so happens that we can precompute the predicate + /// in a secondary array. Note how the algorithm belows only start shuffling items after the + /// predicate is checked. + public fun stable_partition_internal( + v: &mut vector, + pred: &vector, + left: u64, + right: u64 + ): u64 { + if (left == right) { + left + } else if (left + 1 == right) { + if (*vector::borrow(pred, left)) right else left + } else { + let mid = left + ((right - left) >> 1); + let p1 = stable_partition_internal(v, pred, left, mid); + let p2 = stable_partition_internal(v, pred, mid, right); + rotate_slice(v, p1, mid, p2) + } + } + + /// Partition the array based on a predicate p, this routine is stable and thus + /// preserves the relative order of the elements in the two partitions. + public inline fun stable_partition( + v: &mut vector, + p: |&Element|bool + ): u64 { + let pred = map_ref(v, |e| p(e)); + let len = vector::length(v); + stable_partition_internal(v, &pred,0, len) + } + + fun test_stable_partition() { + let v = vector[1, 2, 3, 4, 5]; + let t = stable_partition(&mut v, |n| *n % 2 == 0); + assert!(t == 2, 0); + assert!(&v == &vector[2, 4, 1, 3, 5], 1); + } +} diff --git a/third_party/move/move-compiler/src/command_line/compiler.rs b/third_party/move/move-compiler/src/command_line/compiler.rs index e410c5481a871..bdaaa9d8a777b 100644 --- a/third_party/move/move-compiler/src/command_line/compiler.rs +++ b/third_party/move/move-compiler/src/command_line/compiler.rs @@ -12,7 +12,7 @@ use crate::{ expansion, hlir, inlining, interface_generator, naming, parser, parser::{comments::*, *}, shared::{ - CompilationEnv, Flags, IndexedPackagePath, NamedAddressMap, NamedAddressMaps, + ast_debug, CompilationEnv, Flags, IndexedPackagePath, NamedAddressMap, NamedAddressMaps, NumericalAddress, PackagePaths, }, to_bytecode, typing, unit_test, verification, @@ -780,8 +780,20 @@ fn run( let prog = parser::merge_spec_modules::program(compilation_env, prog); let prog = unit_test::filter_test_members::program(compilation_env, prog); let prog = verification::ast_filter::program(compilation_env, prog); + if compilation_env.flags().debug() { + eprintln!( + "Before expansion: program = {}", + ast_debug::display_verbose(&prog) + ) + }; let eprog = expansion::translate::program(compilation_env, pre_compiled_lib, prog); compilation_env.check_diags_at_or_above_severity(Severity::Bug)?; + if compilation_env.flags().debug() { + eprintln!( + "After expansion: program = {}", + ast_debug::display_verbose(&eprog) + ) + }; run( compilation_env, pre_compiled_lib, @@ -793,6 +805,12 @@ fn run( PassResult::Expansion(eprog) => { let nprog = naming::translate::program(compilation_env, pre_compiled_lib, eprog); compilation_env.check_diags_at_or_above_severity(Severity::Bug)?; + if compilation_env.flags().debug() { + eprintln!( + "After naming: program = {}", + ast_debug::display_verbose(&nprog) + ) + }; run( compilation_env, pre_compiled_lib, @@ -804,6 +822,12 @@ fn run( PassResult::Naming(nprog) => { let tprog = typing::translate::program(compilation_env, pre_compiled_lib, nprog); compilation_env.check_diags_at_or_above_severity(Severity::BlockingError)?; + if compilation_env.flags().debug() { + eprintln!( + "After typing: program = {}", + ast_debug::display_verbose(&tprog) + ) + }; run( compilation_env, pre_compiled_lib, @@ -815,6 +839,12 @@ fn run( PassResult::Typing(mut tprog) => { inlining::translate::run_inlining(compilation_env, &mut tprog); compilation_env.check_diags_at_or_above_severity(Severity::BlockingError)?; + if compilation_env.flags().debug() { + eprintln!( + "After inlining: program = {}", + ast_debug::display_verbose(&tprog) + ) + }; run( compilation_env, pre_compiled_lib, @@ -826,6 +856,12 @@ fn run( PassResult::Inlining(tprog) => { let hprog = hlir::translate::program(compilation_env, pre_compiled_lib, tprog); compilation_env.check_diags_at_or_above_severity(Severity::Bug)?; + if compilation_env.flags().debug() { + eprintln!( + "After hlir: program = {}", + ast_debug::display_verbose(&hprog) + ) + }; run( compilation_env, pre_compiled_lib, @@ -837,6 +873,12 @@ fn run( PassResult::HLIR(hprog) => { let cprog = cfgir::translate::program(compilation_env, pre_compiled_lib, hprog); compilation_env.check_diags_at_or_above_severity(Severity::NonblockingError)?; + if compilation_env.flags().debug() { + eprintln!( + "After cfgir: program = {}", + ast_debug::display_verbose(&cprog) + ) + }; run( compilation_env, pre_compiled_lib, diff --git a/third_party/move/move-compiler/src/command_line/mod.rs b/third_party/move/move-compiler/src/command_line/mod.rs index e0badc1288326..fa09bfb459f8f 100644 --- a/third_party/move/move-compiler/src/command_line/mod.rs +++ b/third_party/move/move-compiler/src/command_line/mod.rs @@ -37,3 +37,7 @@ pub const COLOR_MODE_ENV_VAR: &str = "COLOR_MODE"; pub const MOVE_COMPILED_INTERFACES_DIR: &str = "mv_interfaces"; pub const COMPILED_NAMED_ADDRESS_MAPPING: &str = "compiled-module-address-name"; + +pub const MOVE_COMPILER_DEBUG_ENV_VAR: &str = "MOVE_COMPILER_DEBUG"; + +pub const DEBUG_FLAG: &str = "debug"; diff --git a/third_party/move/move-compiler/src/diagnostics/codes.rs b/third_party/move/move-compiler/src/diagnostics/codes.rs index 04fe20fe839a1..5e5623e5bf41e 100644 --- a/third_party/move/move-compiler/src/diagnostics/codes.rs +++ b/third_party/move/move-compiler/src/diagnostics/codes.rs @@ -248,6 +248,7 @@ codes!( Recursion: { msg: "recursion during function inlining not allowed", severity: BlockingError }, AfterExpansion: { msg: "Inlined code invalid in this context", severity: BlockingError }, Unsupported: { msg: "feature not supported in inlined functions", severity: BlockingError }, + UnexpectedLambda: { msg: "lambda parameter only permitted as parameter to inlined function", severity: BlockingError }, ], ); diff --git a/third_party/move/move-compiler/src/expansion/translate.rs b/third_party/move/move-compiler/src/expansion/translate.rs index 971a0791fe8d1..46d0a816744e2 100644 --- a/third_party/move/move-compiler/src/expansion/translate.rs +++ b/third_party/move/move-compiler/src/expansion/translate.rs @@ -27,7 +27,6 @@ use crate::{ use move_command_line_common::parser::{parse_u16, parse_u256, parse_u32}; use move_ir_types::location::*; use move_symbol_pool::Symbol; -use once_cell::sync::Lazy; use std::{ collections::{BTreeMap, BTreeSet, VecDeque}, iter::IntoIterator, @@ -48,7 +47,6 @@ struct Context<'env, 'map> { in_spec_context: bool, exp_specs: BTreeMap, env: &'env mut CompilationEnv, - in_aptos_stdlib: bool, // TODO(https://github.com/aptos-labs/aptos-core/issues/9410) remove after bugfix propagates. } impl<'env, 'map> Context<'env, 'map> { fn new( @@ -63,7 +61,6 @@ impl<'env, 'map> Context<'env, 'map> { aliases: AliasMap::new(), is_source_definition: false, in_spec_context: false, - in_aptos_stdlib: false, exp_specs: BTreeMap::new(), } } @@ -395,32 +392,6 @@ fn set_sender_address( }) } -// This is a hack to recognize APTOS StdLib to avoid warnings on some old errors. -// This will be removed after library attributes are cleaned up. -// (See https://github.com/aptos-labs/aptos-core/issues/9410) -fn module_is_in_aptos_stdlib(module_address: Option>) -> bool { - const APTOS_STDLIB_NAME: &str = "aptos_std"; - static APTOS_STDLIB_NUMERICAL_ADDRESS: Lazy = - Lazy::new(|| NumericalAddress::parse_str("0x1").unwrap()); - match &module_address { - Some(spanned_address) => { - let address = spanned_address.value; - match address { - Address::Numerical(optional_name, spanned_numerical_address) => match optional_name - { - Some(spanned_symbol) => { - (&spanned_symbol.value as &str) == APTOS_STDLIB_NAME - && (spanned_numerical_address.value == *APTOS_STDLIB_NUMERICAL_ADDRESS) - }, - None => false, - }, - Address::NamedUnassigned(_) => false, - } - }, - None => false, - } -} - fn module_( context: &mut Context, package_name: Option, @@ -435,7 +406,6 @@ fn module_( name, members, } = mdef; - context.in_aptos_stdlib = module_is_in_aptos_stdlib(module_address); let attributes = flatten_attributes(context, AttributePosition::Module, attributes); assert!(context.address.is_none()); @@ -634,16 +604,13 @@ fn unique_attributes( let flags = &context.env.flags(); if !flags.skip_attribute_checks() { let known_attributes = &context.env.get_known_attributes(); - // TODO(See https://github.com/aptos-labs/aptos-core/issues/9410) remove after bugfix propagates. if !is_nested && !known_attributes.contains(sym.as_str()) { - if !context.in_aptos_stdlib { - let msg = format!("Attribute name '{}' is unknown (use --{} CLI option to ignore); known attributes are '{:?}'.", - sym.as_str(), - SKIP_ATTRIBUTE_CHECKS, known_attributes); - context - .env - .add_diag(diag!(Declarations::UnknownAttribute, (nloc, msg))); - } + let msg = format!("Attribute name '{}' is unknown (use --{} CLI option to ignore); known attributes are '{:?}'.", + sym.as_str(), + SKIP_ATTRIBUTE_CHECKS, known_attributes); + context + .env + .add_diag(diag!(Declarations::UnknownAttribute, (nloc, msg))); } else if is_nested && known_attributes.contains(sym.as_str()) { let msg = format!( "Known attribute '{}' is not expected in a nested attribute position.", diff --git a/third_party/move/move-compiler/src/hlir/translate.rs b/third_party/move/move-compiler/src/hlir/translate.rs index eb381f71f7ee5..529bed6f35bd5 100644 --- a/third_party/move/move-compiler/src/hlir/translate.rs +++ b/third_party/move/move-compiler/src/hlir/translate.rs @@ -1375,7 +1375,13 @@ fn exp_impl( }; HE::Spec(hanchor) }, - TE::Lambda(..) => panic!("ICE unexpected lambda"), + TE::Lambda(_lvalue_list, _boxed_exp) => { + context.env.add_diag(diag!( + Inlining::UnexpectedLambda, + (eloc, "unexpected lambda") + )); + HE::UnresolvedError + }, TE::UnresolvedError => { assert!(context.env.has_errors()); HE::UnresolvedError diff --git a/third_party/move/move-compiler/src/inlining/translate.rs b/third_party/move/move-compiler/src/inlining/translate.rs index 9e1e58931924f..d7100ed3d330f 100644 --- a/third_party/move/move-compiler/src/inlining/translate.rs +++ b/third_party/move/move-compiler/src/inlining/translate.rs @@ -4,7 +4,7 @@ use crate::{ diag, expansion::ast::{AbilitySet, ModuleIdent, ModuleIdent_, SpecId, Visibility}, - inlining::visitor::{Dispatcher, TypedDispatcher, TypedVisitor, Visitor, VisitorContinuation}, + inlining::visitor::{Dispatcher, Visitor, VisitorContinuation}, naming, naming::ast::{ FunctionSignature, StructDefinition, StructTypeParameter, TParam, TParamID, Type, @@ -19,6 +19,7 @@ use crate::{ SpecLambdaLiftedFunction, UnannotatedExp_, }, core::{infer_abilities, InferAbilityContext, Subst}, + translate::lvalues_expected_types, }, }; use move_ir_types::location::{sp, Loc}; @@ -230,7 +231,8 @@ struct SubstitutionVisitor<'l, 'r> { impl<'l, 'r> Visitor for SubstitutionVisitor<'l, 'r> { fn type_(&mut self, ty: &mut Type) -> VisitorContinuation { - visit_type(&self.type_arguments, ty) + visit_type(&self.type_arguments, ty); + VisitorContinuation::Descend } fn exp(&mut self, ex: &mut Exp) -> VisitorContinuation { @@ -304,7 +306,7 @@ impl<'l, 'r> Visitor for SubstitutionVisitor<'l, 'r> { self.shadowed.pop_front(); } - fn var_decl(&mut self, var: &mut Var) { + fn var_decl(&mut self, _ty: &mut Type, var: &mut Var) { self.shadowed .front_mut() .expect("scoped") @@ -517,7 +519,7 @@ impl<'l, 'r> Visitor for RenamingVisitor<'l, 'r> { } } - fn var_decl(&mut self, var: &mut Var) { + fn var_decl(&mut self, _ty: &mut Type, var: &mut Var) { let new_name = Symbol::from(format!("{}#{}", var.0.value, self.inliner.rename_counter)); self.inliner.rename_counter += 1; self.renamings @@ -527,7 +529,7 @@ impl<'l, 'r> Visitor for RenamingVisitor<'l, 'r> { var.0.value = new_name; } - fn var_use(&mut self, var: &mut Var) { + fn var_use(&mut self, _ty: &mut Type, var: &mut Var) { for mapping in &self.renamings { if let Some(new_name) = mapping.get(&var.0.value) { var.0.value = *new_name @@ -548,8 +550,8 @@ struct SignatureExtractionVisitor<'l, 'r> { used_type_params: BTreeSet, } -impl<'l, 'r> TypedVisitor for SignatureExtractionVisitor<'l, 'r> { - fn ty(&mut self, t: &mut Type) -> VisitorContinuation { +impl<'l, 'r> Visitor for SignatureExtractionVisitor<'l, 'r> { + fn type_(&mut self, t: &mut Type) -> VisitorContinuation { if let Type_::Param(param) = &t.value { self.used_type_params.insert(param.clone()); } @@ -634,23 +636,43 @@ impl<'l> Inliner<'l> { mcall.name.0.value ), }; - let type_arguments = fdef + let type_arguments: BTreeMap = fdef .signature .type_parameters .iter() .zip(mcall.type_arguments.iter()) .map(|(p, t)| (p.id, t.clone())) .collect(); + let mut inliner_visitor = OuterVisitor { inliner: self }; let mut inlined_args = mcall.arguments.clone(); Dispatcher::new(&mut inliner_visitor).exp(&mut inlined_args); - let mapped_params = fdef + + // Expand Type formal params in types of other params. + let mut param_visitor = TypeSubstitutionVisitor { + type_arguments: type_arguments.clone(), + }; + let mut param_dispatcher = Dispatcher::new(&mut param_visitor); + let fix_types = |(var, mut spanned_type): (Var, Type)| { + param_dispatcher.type_(&mut spanned_type); + self.infer_abilities(&mut spanned_type); + (var, spanned_type) + }; + let mapped_params: Vec<_> = fdef .signature .parameters .iter() .cloned() - .zip(get_args_from_exp(&inlined_args)); - let (decls_for_let, bindings) = self.process_parameters(call_loc, mapped_params); + .map(fix_types) + .zip(get_args_from_exp(&inlined_args)) + .collect(); + let (decls_for_let, bindings) = + self.process_parameters(call_loc, mapped_params.into_iter()); + + // Expand Type formal params in result type + let mut result_type = fdef.signature.return_type.clone(); + param_dispatcher.type_(&mut result_type); + self.infer_abilities(&mut result_type); // Expand the body in its own independent visitor self.inline_stack.push_front(global_name); // for cycle detection @@ -666,7 +688,16 @@ impl<'l> Inliner<'l> { for decl in decls_for_let.into_iter().rev() { seq.push_front(decl) } - Some(UnannotatedExp_::Block(seq)) + + let body_loc = fdef.body.loc; + let block_expr = sp(body_loc, UnannotatedExp_::Block(seq)); + Some(UnannotatedExp_::Annotate( + Box::new(Exp { + exp: block_expr, + ty: result_type.clone(), + }), + Box::new(result_type), + )) } else { None } @@ -685,27 +716,36 @@ impl<'l> Inliner<'l> { let mut tys = vec![]; let mut exps = vec![]; - for ((var, _), e) in params { - let ty = e.ty.clone(); + for ((var, ty), e) in params { if ty.value.is_fun() { bindings.insert(var.0.value, e); } else { lvalues.push(sp(loc, LValue_::Var(var, Box::new(ty.clone())))); - tys.push(ty); + tys.push(ty.clone()); exps.push(e); } } - let opt_tys = tys.iter().map(|t| Some(t.clone())).collect(); - let exp = match exps.len() { 0 => Exp { ty: sp(loc, Type_::Unit), exp: sp(loc, UnannotatedExp_::Unit { trailing: false }), }, - 1 => exps.pop().unwrap(), + 1 => { + let exp1 = exps.pop().unwrap(); + let mut ty = tys.pop().unwrap(); + self.infer_abilities(&mut ty); + + Exp { + ty: ty.clone(), + exp: sp( + loc, + UnannotatedExp_::Annotate(Box::new(exp1), Box::new(ty.clone())), + ), + } + }, _ => { - let mut ty = Type_::multiple(loc, tys); + let mut ty = Type_::multiple(loc, tys.clone()); self.infer_abilities(&mut ty); Exp { @@ -714,9 +754,21 @@ impl<'l> Inliner<'l> { loc, UnannotatedExp_::ExpList( exps.into_iter() - .map(|e| { - let ty = e.ty.clone(); - ExpListItem::Single(e, Box::new(ty)) + .zip(tys.into_iter()) + .map(|(e, ty)| { + ExpListItem::Single( + Exp { + exp: sp( + loc, + UnannotatedExp_::Annotate( + Box::new(e), + Box::new(ty.clone()), + ), + ), + ty: ty.clone(), + }, + Box::new(ty.clone()), + ) }) .collect(), ), @@ -725,9 +777,12 @@ impl<'l> Inliner<'l> { }, }; + let spanned_lvalues = sp(loc, lvalues); + let lvalue_ty = lvalues_expected_types(&spanned_lvalues); + let decl = sp( loc, - SequenceItem_::Bind(sp(loc, lvalues), opt_tys, Box::new(exp)), + SequenceItem_::Bind(spanned_lvalues, lvalue_ty, Box::new(exp)), ); (vec![decl], bindings) } @@ -879,7 +934,7 @@ fn lift_lambda_as_function( used_local_vars: BTreeMap::new(), used_type_params: BTreeSet::new(), }; - TypedDispatcher::new(&mut extraction_visitor).exp(&mut lambda); + Dispatcher::new(&mut extraction_visitor).exp(&mut lambda); let SignatureExtractionVisitor { inliner: _, declared_vars: _, @@ -967,3 +1022,13 @@ fn visit_type(subs: &BTreeMap, ty: &mut Type) -> VisitorContinua } VisitorContinuation::Descend } + +struct TypeSubstitutionVisitor { + type_arguments: BTreeMap, +} + +impl Visitor for TypeSubstitutionVisitor { + fn type_(&mut self, ty: &mut Type) -> VisitorContinuation { + visit_type(&self.type_arguments, ty) + } +} diff --git a/third_party/move/move-compiler/src/inlining/visitor.rs b/third_party/move/move-compiler/src/inlining/visitor.rs index 399c7e386e709..e0756e6ad78da 100644 --- a/third_party/move/move-compiler/src/inlining/visitor.rs +++ b/third_party/move/move-compiler/src/inlining/visitor.rs @@ -28,8 +28,8 @@ pub trait Visitor { VisitorContinuation::Descend } fn enter_scope(&mut self) {} - fn var_decl(&mut self, _var: &mut Var) {} - fn var_use(&mut self, _var: &mut Var) {} + fn var_decl(&mut self, _ty: &mut Type, _var: &mut Var) {} + fn var_use(&mut self, _ty: &mut Type, _var: &mut Var) {} fn exit_scope(&mut self) {} fn infer_abilities(&mut self, _ty: &mut Type) {} } @@ -43,285 +43,6 @@ impl<'l, V: Visitor> Dispatcher<'l, V> { Self { visitor } } - pub fn function(&mut self, fdef: &mut Function) { - self.visitor.enter_scope(); - for (var, _) in fdef.signature.parameters.iter_mut() { - self.visitor.var_decl(var) - } - match &mut fdef.body.value { - FunctionBody_::Native => {}, - FunctionBody_::Defined(seq) => self.sequence(seq), - } - self.visitor.exit_scope() - } - - pub fn type_(&mut self, ty: &mut Type) { - if self.visitor.type_(ty) == VisitorContinuation::Stop { - return; - } - match &mut ty.value { - Type_::Ref(_, ty) => self.type_(ty.as_mut()), - Type_::Apply(_, _, tys) => { - self.types(tys.iter_mut()); - self.visitor.infer_abilities(ty) - }, - Type_::Unit - | Type_::Param(_) - | Type_::Var(_) - | Type_::Anything - | Type_::UnresolvedError => {}, - } - } - - fn types<'r>(&mut self, tys: impl Iterator) { - for ty in tys { - self.type_(ty) - } - } - - pub fn exp(&mut self, ex: &mut Exp) { - self.type_(&mut ex.ty); - if self.visitor.exp(ex) == VisitorContinuation::Stop { - return; - } - self.exp_unannotated(ex.exp.loc, &mut ex.exp.value) - } - - pub fn exp_unannotated(&mut self, _loc: Loc, ex: &mut UnannotatedExp_) { - match ex { - UnannotatedExp_::ModuleCall(mc) => { - let ModuleCall { - type_arguments, - arguments, - parameter_types, - .. - } = mc.as_mut(); - self.types(type_arguments.iter_mut()); - self.types(parameter_types.iter_mut()); - self.exp(arguments) - }, - UnannotatedExp_::Use(var) - | UnannotatedExp_::Copy { from_user: _, var } - | UnannotatedExp_::Move { from_user: _, var } - | UnannotatedExp_::BorrowLocal(_, var) => self.visitor.var_use(var), - - UnannotatedExp_::VarCall(var, ex) => { - self.visitor.var_use(var); - self.exp(ex) - }, - UnannotatedExp_::Lambda(decls, body) => { - self.visitor.enter_scope(); - self.lvalue_list(decls, /*declared*/ true); - self.exp(body); - self.visitor.exit_scope(); - }, - - UnannotatedExp_::IfElse(cex, iex, eex) => { - self.exp(cex.as_mut()); - self.exp(iex.as_mut()); - self.exp(eex.as_mut()); - }, - UnannotatedExp_::While(cex, bex) => { - self.exp(cex.as_mut()); - self.exp(bex.as_mut()); - }, - UnannotatedExp_::Block(seq) => self.sequence(seq), - UnannotatedExp_::Mutate(dex, sex) => { - self.exp(dex.as_mut()); - self.exp(sex.as_mut()); - }, - UnannotatedExp_::BinopExp(lex, _, ty, rex) => { - self.type_(ty.as_mut()); - self.exp(lex.as_mut()); - self.exp(rex.as_mut()); - }, - UnannotatedExp_::Pack(_, _, tys, fields) => { - self.types(tys.iter_mut()); - for (_, _, (_, (ty, ex))) in fields.iter_mut() { - self.type_(ty); - self.exp(ex); - } - }, - UnannotatedExp_::ExpList(items) => { - for item in items.iter_mut() { - match item { - ExpListItem::Single(ex, ty) => { - self.type_(ty.as_mut()); - self.exp(ex) - }, - ExpListItem::Splat(_, ex, tys) => { - self.types(tys.iter_mut()); - self.exp(ex) - }, - } - } - }, - UnannotatedExp_::Assign(lhs, tys, ex) => { - self.lvalue_list(lhs, /*declared*/ false); - self.types(tys.iter_mut().filter_map(|f| f.as_mut())); - self.exp(ex.as_mut()); - }, - UnannotatedExp_::Vector(_, _, ty, ex) => { - self.type_(ty.as_mut()); - self.exp(ex.as_mut()) - }, - UnannotatedExp_::Cast(ex, ty) | UnannotatedExp_::Annotate(ex, ty) => { - self.type_(ty.as_mut()); - self.exp(ex.as_mut()) - }, - - UnannotatedExp_::Loop { body: ex, .. } - | UnannotatedExp_::Return(ex) - | UnannotatedExp_::Abort(ex) - | UnannotatedExp_::Dereference(ex) - | UnannotatedExp_::UnaryExp(_, ex) - | UnannotatedExp_::Borrow(_, ex, _) - | UnannotatedExp_::TempBorrow(_, ex) => self.exp(ex.as_mut()), - - UnannotatedExp_::Builtin(fun, ex) => { - self.builtin_function(fun.as_mut()); - self.exp(ex.as_mut()) - }, - - UnannotatedExp_::Spec(anchor) => { - let SpecAnchor { - id: _, - origin: _, - used_locals, - used_lambda_funs, - } = anchor; - - // re-organize the locals - { - let keys: Vec<_> = used_locals.keys().cloned().collect(); - let mut temp = BTreeMap::new(); - for key in keys { - let (orig_var, (mut ty, mut var)) = used_locals.remove_entry(&key).unwrap(); - self.type_(&mut ty); - self.visitor.var_use(&mut var); - temp.insert(orig_var, (ty, var)); - } - used_locals.append(&mut temp); - } - - // re-organize the lambdas - { - let keys: Vec<_> = used_lambda_funs.keys().cloned().collect(); - let mut temp = BTreeMap::new(); - for key in keys { - let (name, mut fun) = used_lambda_funs.remove_entry(&key).unwrap(); - - self.visitor.enter_scope(); - for (v, t) in fun.signature.parameters.iter_mut() { - self.type_(t); - self.visitor.var_decl(v); - } - self.type_(&mut fun.signature.return_type); - self.exp(fun.body.as_mut()); - self.visitor.exit_scope(); - - temp.insert(name, fun); - } - used_lambda_funs.append(&mut temp); - } - }, - - UnannotatedExp_::Unit { .. } - | UnannotatedExp_::Value(_) - | UnannotatedExp_::Constant(_, _) - | UnannotatedExp_::Break - | UnannotatedExp_::Continue - | UnannotatedExp_::UnresolvedError => {}, - } - } - - fn builtin_function(&mut self, fun: &mut BuiltinFunction) { - match &mut fun.value { - BuiltinFunction_::MoveTo(ty) - | BuiltinFunction_::MoveFrom(ty) - | BuiltinFunction_::BorrowGlobal(_, ty) - | BuiltinFunction_::Exists(ty) - | BuiltinFunction_::Freeze(ty) => self.type_(ty), - BuiltinFunction_::Assert(_) => {}, - } - } - - pub fn sequence(&mut self, seq: &mut Sequence) { - let mut scope_cnt = 0; - for item in seq.iter_mut() { - match &mut item.value { - SequenceItem_::Bind(decls, tys, e) => { - self.exp(e.as_mut()); - self.types(tys.iter_mut().filter_map(|t| t.as_mut())); - self.visitor.enter_scope(); - self.lvalue_list(decls, /*declared*/ true); - scope_cnt += 1; - }, - SequenceItem_::Declare(decls) => { - self.visitor.enter_scope(); - self.lvalue_list(decls, /*declared*/ true); - scope_cnt += 1; - }, - SequenceItem_::Seq(e) => self.exp(e.as_mut()), - } - } - while scope_cnt > 0 { - self.visitor.exit_scope(); - scope_cnt -= 1 - } - } - - pub fn lvalue_list(&mut self, decls: &mut LValueList, declared: bool) { - for lv in &mut decls.value { - self.lvalue(lv, declared) - } - } - - fn lvalue(&mut self, lv: &mut LValue, declared: bool) { - match &mut lv.value { - LValue_::Var(var, ty) => { - self.type_(ty.as_mut()); - if declared { - self.visitor.var_decl(var) - } else { - self.visitor.var_use(var) - } - }, - LValue_::Unpack(_, _, tys, fields) | LValue_::BorrowUnpack(_, _, _, tys, fields) => { - self.types(tys.iter_mut()); - for (_, _, (_, (ty, slv))) in fields.iter_mut() { - self.type_(ty); - self.lvalue(slv, declared); - } - }, - LValue_::Ignore => {}, - } - } -} - -pub trait TypedVisitor { - fn exp(&mut self, _ex: &mut Exp) -> VisitorContinuation { - VisitorContinuation::Descend - } - fn ty(&mut self, _t: &mut Type) -> VisitorContinuation { - VisitorContinuation::Descend - } - fn enter_scope(&mut self) {} - fn var_decl(&mut self, _ty: &mut Type, _var: &mut Var) {} - fn var_use(&mut self, _ty: &mut Type, _var: &mut Var) {} - fn exit_scope(&mut self) {} -} - -pub struct TypedDispatcher<'l, V: TypedVisitor> { - visitor: &'l mut V, -} - -impl<'l, V: TypedVisitor> TypedDispatcher<'l, V> { - pub fn new(visitor: &'l mut V) -> Self { - Self { visitor } - } - - #[allow(unused)] pub fn function(&mut self, fdef: &mut Function) { self.visitor.enter_scope(); for (var, ty) in fdef.signature.parameters.iter_mut() { @@ -337,13 +58,14 @@ impl<'l, V: TypedVisitor> TypedDispatcher<'l, V> { } pub fn type_(&mut self, ty: &mut Type) { - if self.visitor.ty(ty) == VisitorContinuation::Stop { + if self.visitor.type_(ty) == VisitorContinuation::Stop { return; } match &mut ty.value { Type_::Ref(_, ty) => self.type_(ty.as_mut()), Type_::Apply(_, _, tys) => { self.types(tys.iter_mut()); + self.visitor.infer_abilities(ty) }, Type_::Unit | Type_::Param(_) @@ -371,11 +93,13 @@ impl<'l, V: TypedVisitor> TypedDispatcher<'l, V> { match ex { UnannotatedExp_::ModuleCall(mc) => { let ModuleCall { - arguments, type_arguments, + arguments, + parameter_types, .. } = mc.as_mut(); self.types(type_arguments.iter_mut()); + self.types(parameter_types.iter_mut()); self.exp(arguments) }, UnannotatedExp_::Use(var) @@ -417,7 +141,7 @@ impl<'l, V: TypedVisitor> TypedDispatcher<'l, V> { self.exp(sex.as_mut()); }, UnannotatedExp_::BinopExp(lex, _, ty, rex) => { - self.type_(ty); + self.type_(ty.as_mut()); self.exp(lex.as_mut()); self.exp(rex.as_mut()); }, @@ -533,7 +257,7 @@ impl<'l, V: TypedVisitor> TypedDispatcher<'l, V> { } pub fn sequence(&mut self, seq: &mut Sequence) { - let mut scoped = false; + let mut scope_cnt = 0; for item in seq.iter_mut() { match &mut item.value { SequenceItem_::Bind(decls, tys, e) => { @@ -541,18 +265,19 @@ impl<'l, V: TypedVisitor> TypedDispatcher<'l, V> { self.types(tys.iter_mut().filter_map(|t| t.as_mut())); self.visitor.enter_scope(); self.lvalue_list(decls, /*declared*/ true); - scoped = true; + scope_cnt += 1; }, SequenceItem_::Declare(decls) => { self.visitor.enter_scope(); self.lvalue_list(decls, /*declared*/ true); - scoped = true; + scope_cnt += 1; }, SequenceItem_::Seq(e) => self.exp(e.as_mut()), } } - if scoped { + while scope_cnt > 0 { self.visitor.exit_scope(); + scope_cnt -= 1 } } diff --git a/third_party/move/move-compiler/src/shared/mod.rs b/third_party/move/move-compiler/src/shared/mod.rs index c55fb9053cd71..172a13b709cf9 100644 --- a/third_party/move/move-compiler/src/shared/mod.rs +++ b/third_party/move/move-compiler/src/shared/mod.rs @@ -8,8 +8,10 @@ use crate::{ naming::ast::ModuleDefinition, }; use clap::*; +use move_command_line_common::env::read_bool_env_var; use move_ir_types::location::*; use move_symbol_pool::Symbol; +use once_cell::sync::Lazy; use petgraph::{algo::astar as petgraph_astar, graphmap::DiGraphMap}; use std::{ collections::{BTreeMap, BTreeSet}, @@ -283,6 +285,20 @@ pub fn format_comma>(items: I) -> Str // Flags //************************************************************************************************** +pub fn debug_compiler_env_var() -> bool { + static DEBUG_COMPILER: Lazy = + Lazy::new(|| read_bool_env_var(cli::MOVE_COMPILER_DEBUG_ENV_VAR)); + *DEBUG_COMPILER +} + +pub fn debug_compiler_env_var_str() -> &'static str { + if debug_compiler_env_var() { + "true" + } else { + "false" + } +} + #[derive(Clone, Debug, Eq, PartialEq, Parser)] pub struct Flags { /// Compile in test mode @@ -329,7 +345,11 @@ pub struct Flags { #[clap( long = cli::SKIP_ATTRIBUTE_CHECKS, )] - pub skip_attribute_checks: bool, + skip_attribute_checks: bool, + + /// Debug compiler by printing out internal information + #[clap(long = cli::DEBUG_FLAG, default_value=debug_compiler_env_var_str())] + debug: bool, } impl Flags { @@ -342,6 +362,7 @@ impl Flags { bytecode_version: None, keep_testing_functions: false, skip_attribute_checks: false, + debug: debug_compiler_env_var(), } } @@ -354,6 +375,7 @@ impl Flags { bytecode_version: None, keep_testing_functions: false, skip_attribute_checks: false, + debug: debug_compiler_env_var(), } } @@ -366,6 +388,7 @@ impl Flags { bytecode_version: None, keep_testing_functions: false, skip_attribute_checks: false, + debug: debug_compiler_env_var(), } } @@ -378,6 +401,7 @@ impl Flags { bytecode_version: None, keep_testing_functions: true, skip_attribute_checks: false, + debug: false, } } @@ -440,6 +464,10 @@ impl Flags { ..self } } + + pub fn debug(&self) -> bool { + self.debug + } } //************************************************************************************************** diff --git a/third_party/move/move-compiler/src/typing/translate.rs b/third_party/move/move-compiler/src/typing/translate.rs index 8362b03d8c157..9cd97f941f47b 100644 --- a/third_party/move/move-compiler/src/typing/translate.rs +++ b/third_party/move/move-compiler/src/typing/translate.rs @@ -1064,7 +1064,7 @@ fn sequence(context: &mut Context, seq: N::Sequence) -> T::Sequence { e, } => { context.close_locals_scope(old_locals, declared); - let lvalue_ty = lvalues_expected_types(context, &b); + let lvalue_ty = lvalues_expected_types(&b); resulting_sequence.push_front(sp(loc, TS::Bind(b, lvalue_ty, e))) }, } @@ -1343,7 +1343,7 @@ fn exp_inner(context: &mut Context, sp!(eloc, ne_): N::Exp) -> T::Exp { NE::Assign(na, nr) => { let er = exp(context, nr); let a = assign_list(context, na, er.ty.clone()); - let lvalue_ty = lvalues_expected_types(context, &a); + let lvalue_ty = lvalues_expected_types(&a); (sp(eloc, Type_::Unit), TE::Assign(a, lvalue_ty, er)) }, @@ -1626,16 +1626,11 @@ fn loop_body( // Locals and LValues //************************************************************************************************** -fn lvalues_expected_types( - context: &mut Context, - sp!(_loc, bs_): &T::LValueList, -) -> Vec> { - bs_.iter() - .map(|b| lvalue_expected_types(context, b)) - .collect() +pub fn lvalues_expected_types(sp!(_loc, bs_): &T::LValueList) -> Vec> { + bs_.iter().map(lvalue_expected_types).collect() } -fn lvalue_expected_types(_context: &mut Context, sp!(loc, b_): &T::LValue) -> Option { +fn lvalue_expected_types(sp!(loc, b_): &T::LValue) -> Option { use N::Type_::*; use T::LValue_ as L; let loc = *loc; diff --git a/third_party/move/move-compiler/tests/move_check/inlining/non_lambda_arg.exp b/third_party/move/move-compiler/tests/move_check/inlining/non_lambda_arg.exp new file mode 100644 index 0000000000000..51dfb276b5342 --- /dev/null +++ b/third_party/move/move-compiler/tests/move_check/inlining/non_lambda_arg.exp @@ -0,0 +1,12 @@ +error[E04024]: invalid usage of function type + ┌─ tests/move_check/inlining/non_lambda_arg.move:4:71 + │ +4 │ public fun incorrect_sort(arr: &mut vector, a_less_b: |T, T| bool) { + │ ^^^^^^^^^^^ function type only allowed for inline function arguments + +error[E04024]: invalid usage of function type + ┌─ tests/move_check/inlining/non_lambda_arg.move:9:102 + │ +9 │ public fun incorrect_sort_recursive(arr: &mut vector, low: u64, high: u64, a_less_b: |T, T| bool) { + │ ^^^^^^^^^^^ function type only allowed for inline function arguments + diff --git a/third_party/move/move-compiler/tests/move_check/inlining/non_lambda_arg.move b/third_party/move/move-compiler/tests/move_check/inlining/non_lambda_arg.move new file mode 100644 index 0000000000000..83e9d4027661d --- /dev/null +++ b/third_party/move/move-compiler/tests/move_check/inlining/non_lambda_arg.move @@ -0,0 +1,17 @@ +module 0x42::sort { + use std::vector; + + public fun incorrect_sort(arr: &mut vector, a_less_b: |T, T| bool) { + let n = vector::length(arr); + incorrect_sort_recursive(arr, 0, n - 1, a_less_b) + } + + public fun incorrect_sort_recursive(arr: &mut vector, low: u64, high: u64, a_less_b: |T, T| bool) { + if (low < high) { + let pi = low + high / 2; + incorrect_sort_recursive(arr, low, pi - 1, a_less_b); + incorrect_sort_recursive(arr, pi + 1, high, a_less_b); + }; + } + +} diff --git a/third_party/move/move-compiler/tests/move_check/parser/aptos_stdlib_attributes.exp b/third_party/move/move-compiler/tests/move_check/parser/aptos_stdlib_attributes.exp index 563f0310aeee4..8670763d93819 100644 --- a/third_party/move/move-compiler/tests/move_check/parser/aptos_stdlib_attributes.exp +++ b/third_party/move/move-compiler/tests/move_check/parser/aptos_stdlib_attributes.exp @@ -1,3 +1,15 @@ +warning[W02016]: unknown attribute + ┌─ tests/move_check/parser/aptos_stdlib_attributes.move:4:7 + │ +4 │ #[a, a(x = 0)] + │ ^ Attribute name 'a' is unknown (use --skip-attribute-checks CLI option to ignore); known attributes are '{"bytecode_instruction", "deprecated", "expected_failure", "native_interface", "test", "test_only", "verify_only"}'. + +warning[W02016]: unknown attribute + ┌─ tests/move_check/parser/aptos_stdlib_attributes.move:4:10 + │ +4 │ #[a, a(x = 0)] + │ ^ Attribute name 'a' is unknown (use --skip-attribute-checks CLI option to ignore); known attributes are '{"bytecode_instruction", "deprecated", "expected_failure", "native_interface", "test", "test_only", "verify_only"}'. + error[E02001]: duplicate declaration, item, or annotation ┌─ tests/move_check/parser/aptos_stdlib_attributes.move:4:10 │ @@ -6,6 +18,18 @@ error[E02001]: duplicate declaration, item, or annotation │ │ │ Attribute previously given here +warning[W02016]: unknown attribute + ┌─ tests/move_check/parser/aptos_stdlib_attributes.move:7:7 + │ +7 │ #[testonly] + │ ^^^^^^^^ Attribute name 'testonly' is unknown (use --skip-attribute-checks CLI option to ignore); known attributes are '{"bytecode_instruction", "deprecated", "expected_failure", "native_interface", "test", "test_only", "verify_only"}'. + +warning[W02016]: unknown attribute + ┌─ tests/move_check/parser/aptos_stdlib_attributes.move:8:7 + │ +8 │ #[b(a, a = 0, a(x = 1))] + │ ^ Attribute name 'b' is unknown (use --skip-attribute-checks CLI option to ignore); known attributes are '{"bytecode_instruction", "deprecated", "expected_failure", "native_interface", "test", "test_only", "verify_only"}'. + error[E02001]: duplicate declaration, item, or annotation ┌─ tests/move_check/parser/aptos_stdlib_attributes.move:8:12 │ diff --git a/third_party/move/move-compiler/tests/move_check/parser/aptos_stdlib_attributes2.exp b/third_party/move/move-compiler/tests/move_check/parser/aptos_stdlib_attributes2.exp new file mode 100644 index 0000000000000..9f01ddc163d5c --- /dev/null +++ b/third_party/move/move-compiler/tests/move_check/parser/aptos_stdlib_attributes2.exp @@ -0,0 +1,6 @@ +warning[W02016]: unknown attribute + ┌─ tests/move_check/parser/aptos_stdlib_attributes2.move:4:7 + │ +4 │ #[testonly] + │ ^^^^^^^^ Attribute name 'testonly' is unknown (use --skip-attribute-checks CLI option to ignore); known attributes are '{"bytecode_instruction", "deprecated", "expected_failure", "native_interface", "test", "test_only", "verify_only"}'. + diff --git a/third_party/move/move-core/types/Cargo.toml b/third_party/move/move-core/types/Cargo.toml index 1bcc406e23f6f..ff65b21e832e2 100644 --- a/third_party/move/move-core/types/Cargo.toml +++ b/third_party/move/move-core/types/Cargo.toml @@ -12,6 +12,7 @@ edition = "2021" [dependencies] anyhow = "1.0.52" arbitrary = { version = "1.1.7", features = [ "derive_arbitrary"], optional = true } +bytes = { version = "1.4.0" } ethnum = "1.0.4" hex = "0.4.3" num = "0.4.0" diff --git a/third_party/move/move-core/types/src/effects.rs b/third_party/move/move-core/types/src/effects.rs index 6d4bfc595ed0e..78b31e7f65ede 100644 --- a/third_party/move/move-core/types/src/effects.rs +++ b/third_party/move/move-core/types/src/effects.rs @@ -8,6 +8,7 @@ use crate::{ language_storage::{ModuleId, StructTag}, }; use anyhow::{bail, Result}; +use bytes::Bytes; use std::collections::btree_map::{self, BTreeMap}; /// A storage operation. @@ -318,5 +319,5 @@ impl Changes { // These aliases are necessary because AccountChangeSet and ChangeSet were not // generic before. In order to minimise the code changes we alias new generic // types. -pub type AccountChangeSet = AccountChanges, Vec>; -pub type ChangeSet = Changes, Vec>; +pub type AccountChangeSet = AccountChanges; +pub type ChangeSet = Changes; diff --git a/third_party/move/move-core/types/src/resolver.rs b/third_party/move/move-core/types/src/resolver.rs index 5a43faaced949..373f85c23707f 100644 --- a/third_party/move/move-core/types/src/resolver.rs +++ b/third_party/move/move-core/types/src/resolver.rs @@ -8,6 +8,7 @@ use crate::{ metadata::Metadata, }; use anyhow::Error; +use bytes::Bytes; /// Traits for resolving Move modules and resources from persistent storage @@ -23,10 +24,10 @@ use anyhow::Error; pub trait ModuleResolver { fn get_module_metadata(&self, module_id: &ModuleId) -> Vec; - fn get_module(&self, id: &ModuleId) -> Result>, Error>; + fn get_module(&self, id: &ModuleId) -> Result, Error>; } -pub fn resource_size(resource: &Option>) -> usize { +pub fn resource_size(resource: &Option) -> usize { resource.as_ref().map(|bytes| bytes.len()).unwrap_or(0) } @@ -45,7 +46,7 @@ pub trait ResourceResolver { address: &AccountAddress, typ: &StructTag, metadata: &[Metadata], - ) -> Result<(Option>, usize), Error>; + ) -> Result<(Option, usize), Error>; } /// A persistent storage implementation that can resolve both resources and modules @@ -54,7 +55,7 @@ pub trait MoveResolver: ModuleResolver + ResourceResolver { &self, address: &AccountAddress, typ: &StructTag, - ) -> Result>, Error> { + ) -> Result, Error> { Ok(self .get_resource_with_metadata(address, typ, &self.get_module_metadata(&typ.module_id()))? .0) @@ -69,7 +70,7 @@ impl ResourceResolver for &T { address: &AccountAddress, tag: &StructTag, metadata: &[Metadata], - ) -> Result<(Option>, usize), Error> { + ) -> Result<(Option, usize), Error> { (**self).get_resource_with_metadata(address, tag, metadata) } } @@ -79,7 +80,7 @@ impl ModuleResolver for &T { (**self).get_module_metadata(module_id) } - fn get_module(&self, module_id: &ModuleId) -> Result>, Error> { + fn get_module(&self, module_id: &ModuleId) -> Result, Error> { (**self).get_module(module_id) } } diff --git a/third_party/move/move-model/bytecode/src/function_target_pipeline.rs b/third_party/move/move-model/bytecode/src/function_target_pipeline.rs index 8faccfd9f1809..5d1477718eca5 100644 --- a/third_party/move/move-model/bytecode/src/function_target_pipeline.rs +++ b/third_party/move/move-model/bytecode/src/function_target_pipeline.rs @@ -524,6 +524,9 @@ impl FunctionTargetPipeline { // check for fixedpoint in summaries for fid in scc { let func_env = env.get_function(*fid); + if func_env.is_inline() { + continue; + } for (_, target) in targets.get_targets(&func_env) { if !target.data.annotations.reached_fixedpoint() { continue 'fixedpoint; diff --git a/third_party/move/move-model/bytecode/src/usage_analysis.rs b/third_party/move/move-model/bytecode/src/usage_analysis.rs index 92a21813098a0..07eb2771cf0b0 100644 --- a/third_party/move/move-model/bytecode/src/usage_analysis.rs +++ b/third_party/move/move-model/bytecode/src/usage_analysis.rs @@ -369,6 +369,9 @@ impl FunctionTargetProcessor for UsageProcessor { continue; } for fun in module.get_functions() { + if fun.is_inline() { + continue; + } for (_, ref target) in targets.get_targets(&fun) { let usage = get_memory_usage(target); writeln!( diff --git a/third_party/move/move-model/src/model.rs b/third_party/move/move-model/src/model.rs index b180e87174d87..92002d53c4463 100644 --- a/third_party/move/move-model/src/model.rs +++ b/third_party/move/move-model/src/model.rs @@ -1202,6 +1202,7 @@ impl GlobalEnv { .collect(); let id = ModuleId(self.module_data.len() as RawIndex); + let used_modules = use_decls.iter().filter_map(|ud| ud.module_id).collect(); self.module_data.push(ModuleData { name, id, @@ -1219,9 +1220,9 @@ impl GlobalEnv { attributes, use_decls, spec_block_infos, - used_modules: Default::default(), + used_modules, used_modules_including_specs: Default::default(), - friend_modules: Default::default(), + friend_modules: Default::default(), // TODO: friend declarations }); id } diff --git a/third_party/move/move-prover/bytecode-pipeline/src/number_operation_analysis.rs b/third_party/move/move-prover/bytecode-pipeline/src/number_operation_analysis.rs index 9699d45cb8e54..f9eecb55719ca 100644 --- a/third_party/move/move-prover/bytecode-pipeline/src/number_operation_analysis.rs +++ b/third_party/move/move-prover/bytecode-pipeline/src/number_operation_analysis.rs @@ -64,6 +64,9 @@ impl NumberOperationProcessor { match item { Either::Left(fid) => { let func_env = env.get_function(*fid); + if func_env.is_inline() { + continue; + } for (_, target) in targets.get_targets(&func_env) { if target.data.code.is_empty() { continue; @@ -74,6 +77,9 @@ impl NumberOperationProcessor { Either::Right(scc) => { for fid in scc { let func_env = env.get_function(*fid); + if func_env.is_inline() { + continue; + } for (_, target) in targets.get_targets(&func_env) { if target.data.code.is_empty() { continue; diff --git a/third_party/move/move-prover/bytecode-pipeline/src/spec_instrumentation.rs b/third_party/move/move-prover/bytecode-pipeline/src/spec_instrumentation.rs index ff8329f5fe1b3..8cfedd910ccd8 100644 --- a/third_party/move/move-prover/bytecode-pipeline/src/spec_instrumentation.rs +++ b/third_party/move/move-prover/bytecode-pipeline/src/spec_instrumentation.rs @@ -154,6 +154,9 @@ impl FunctionTargetProcessor for SpecInstrumentationProcessor { continue; } for ref fun in module.get_functions() { + if fun.is_inline() { + continue; + } for (variant, target) in targets.get_targets(fun) { let spec = &*target.get_spec(); if !spec.conditions.is_empty() { diff --git a/third_party/move/move-vm/integration-tests/Cargo.toml b/third_party/move/move-vm/integration-tests/Cargo.toml index 4719a9570cb93..1d9de57a6fcd0 100644 --- a/third_party/move/move-vm/integration-tests/Cargo.toml +++ b/third_party/move/move-vm/integration-tests/Cargo.toml @@ -12,6 +12,7 @@ edition = "2021" [dependencies] anyhow = "1.0.52" +bytes = "1.4.0" memory-stats = "1.0.0" move-binary-format = { path = "../../move-binary-format" } move-bytecode-verifier = { path = "../../move-bytecode-verifier" } @@ -21,7 +22,7 @@ tempfile = "3.2.0" move-core-types = { path = "../../move-core/types" } move-stdlib = { path = "../../move-stdlib" } move-table-extension = { path = "../../extensions/move-table-extension", optional = true } -move-vm-runtime = { path = "../runtime" } +move-vm-runtime = { path = "../runtime", features = ["testing"] } move-vm-test-utils = { path = "../test-utils" } move-vm-types = { path = "../types" } diff --git a/third_party/move/move-vm/integration-tests/src/tests/bad_storage_tests.rs b/third_party/move/move-vm/integration-tests/src/tests/bad_storage_tests.rs index d7e585f5b6fc5..ef17cd05a9209 100644 --- a/third_party/move/move-vm/integration-tests/src/tests/bad_storage_tests.rs +++ b/third_party/move/move-vm/integration-tests/src/tests/bad_storage_tests.rs @@ -3,6 +3,7 @@ // SPDX-License-Identifier: Apache-2.0 use crate::compiler::{as_module, as_script, compile_units}; +use bytes::Bytes; use move_binary_format::errors::{Location, PartialVMError}; use move_core_types::{ account_address::AccountAddress, @@ -513,7 +514,7 @@ impl ModuleResolver for BogusStorage { vec![] } - fn get_module(&self, _module_id: &ModuleId) -> Result>, anyhow::Error> { + fn get_module(&self, _module_id: &ModuleId) -> Result, anyhow::Error> { Ok(Err( PartialVMError::new(self.bad_status_code).finish(Location::Undefined) )?) @@ -526,7 +527,7 @@ impl ResourceResolver for BogusStorage { _address: &AccountAddress, _tag: &StructTag, _metadata: &[Metadata], - ) -> anyhow::Result<(Option>, usize)> { + ) -> anyhow::Result<(Option, usize)> { Ok(Err( PartialVMError::new(self.bad_status_code).finish(Location::Undefined) )?) @@ -604,8 +605,12 @@ fn test_storage_returns_bogus_error_when_loading_resource() { m.serialize(&mut m_blob).unwrap(); s.serialize(&mut s_blob).unwrap(); let mut delta = ChangeSet::new(); - delta.add_module_op(m.self_id(), Op::New(m_blob)).unwrap(); - delta.add_module_op(s.self_id(), Op::New(s_blob)).unwrap(); + delta + .add_module_op(m.self_id(), Op::New(m_blob.into())) + .unwrap(); + delta + .add_module_op(s.self_id(), Op::New(s_blob.into())) + .unwrap(); let m_id = m.self_id(); let foo_name = Identifier::new("foo").unwrap(); diff --git a/third_party/move/move-vm/integration-tests/src/tests/mod.rs b/third_party/move/move-vm/integration-tests/src/tests/mod.rs index 262bb9618a090..dfc0b81b95bf1 100644 --- a/third_party/move/move-vm/integration-tests/src/tests/mod.rs +++ b/third_party/move/move-vm/integration-tests/src/tests/mod.rs @@ -12,6 +12,7 @@ mod invariant_violation_tests; mod leak_tests; mod loader_tests; mod mutated_accounts_tests; +mod native_tests; mod nested_loop_tests; mod regression_tests; mod return_value_tests; diff --git a/third_party/move/move-vm/integration-tests/src/tests/native_tests.rs b/third_party/move/move-vm/integration-tests/src/tests/native_tests.rs new file mode 100644 index 0000000000000..1c9df82a41868 --- /dev/null +++ b/third_party/move/move-vm/integration-tests/src/tests/native_tests.rs @@ -0,0 +1,101 @@ +// Copyright (c) The Move Contributors +// SPDX-License-Identifier: Apache-2.0 + +use crate::compiler::{as_module, compile_units}; +use move_binary_format::errors::PartialVMResult; +use move_bytecode_verifier::VerifierConfig; +use move_core_types::{ + account_address::AccountAddress, gas_algebra::InternalGas, identifier::Identifier, +}; +use move_vm_runtime::{config::VMConfig, move_vm::MoveVM, native_functions::NativeFunction}; +use move_vm_test_utils::InMemoryStorage; +use move_vm_types::{gas::UnmeteredGasMeter, natives::function::NativeResult}; +use std::sync::Arc; + +const TEST_ADDR: AccountAddress = AccountAddress::new([42; AccountAddress::LENGTH]); + +fn make_failed_native() -> NativeFunction { + Arc::new(move |_, _, _| -> PartialVMResult { + Ok(NativeResult::Abort { + cost: InternalGas::new(0), + abort_code: 12, + }) + }) +} + +#[test] +fn test_publish_module_with_nested_loops() { + // Compile the modules and scripts. + // TODO: find a better way to include the Signer module. + let code = r#" + module {{ADDR}}::M { + entry fun foo() { + Self::bar(); + } + + entry fun foo2() { + Self::foo1(); + } + + fun foo1() { + Self::bar(); + } + + native fun bar(); + } + "#; + let code = code.replace("{{ADDR}}", &format!("0x{}", TEST_ADDR.to_hex())); + let mut units = compile_units(&code).unwrap(); + + let m = as_module(units.pop().unwrap()); + let mut m_blob = vec![]; + m.serialize(&mut m_blob).unwrap(); + + // Should succeed with max_loop_depth = 2 + { + let storage = InMemoryStorage::new(); + + let natives = vec![( + TEST_ADDR, + Identifier::new("M").unwrap(), + Identifier::new("bar").unwrap(), + make_failed_native(), + )]; + let vm = MoveVM::new_with_config(natives.into_iter(), VMConfig { + verifier: VerifierConfig { + max_loop_depth: Some(2), + ..Default::default() + }, + ..Default::default() + }) + .unwrap(); + + let mut sess = vm.new_session(&storage); + sess.publish_module(m_blob.clone(), TEST_ADDR, &mut UnmeteredGasMeter) + .unwrap(); + + let err1 = sess + .execute_entry_function( + &m.self_id(), + &Identifier::new("foo").unwrap(), + vec![], + Vec::>::new(), + &mut UnmeteredGasMeter, + ) + .unwrap_err(); + + assert!(err1.exec_state().unwrap().stack_trace().is_empty()); + + let err2 = sess + .execute_entry_function( + &m.self_id(), + &Identifier::new("foo2").unwrap(), + vec![], + Vec::>::new(), + &mut UnmeteredGasMeter, + ) + .unwrap_err(); + + assert!(err2.exec_state().unwrap().stack_trace().len() == 1); + } +} diff --git a/third_party/move/move-vm/runtime/Cargo.toml b/third_party/move/move-vm/runtime/Cargo.toml index 3d0b2141f4e24..9c9e8ca160b5e 100644 --- a/third_party/move/move-vm/runtime/Cargo.toml +++ b/third_party/move/move-vm/runtime/Cargo.toml @@ -12,6 +12,7 @@ edition = "2021" [dependencies] better_any = "0.1.1" +bytes = "1.4.0" fail = "0.4.0" move-binary-format = { path = "../../move-binary-format" } once_cell = "1.7.2" diff --git a/third_party/move/move-vm/runtime/src/data_cache.rs b/third_party/move/move-vm/runtime/src/data_cache.rs index c9b2f737ea766..4aa7848f18047 100644 --- a/third_party/move/move-vm/runtime/src/data_cache.rs +++ b/third_party/move/move-vm/runtime/src/data_cache.rs @@ -3,6 +3,7 @@ // SPDX-License-Identifier: Apache-2.0 use crate::loader::Loader; +use bytes::Bytes; use move_binary_format::errors::*; use move_core_types::{ account_address::AccountAddress, @@ -23,7 +24,7 @@ use std::collections::btree_map::BTreeMap; pub struct AccountDataCache { data_map: BTreeMap, - module_map: BTreeMap, bool)>, + module_map: BTreeMap, } impl AccountDataCache { @@ -68,13 +69,15 @@ impl<'r> TransactionDataCache<'r> { /// /// Gives all proper guarantees on lifetime of global data as well. pub(crate) fn into_effects(self, loader: &Loader) -> PartialVMResult { - let resource_converter = - |value: Value, layout: MoveTypeLayout| -> PartialVMResult> { - value.simple_serialize(&layout).ok_or_else(|| { + let resource_converter = |value: Value, layout: MoveTypeLayout| -> PartialVMResult { + value + .simple_serialize(&layout) + .map(Into::into) + .ok_or_else(|| { PartialVMError::new(StatusCode::INTERNAL_TYPE_ERROR) .with_message(format!("Error when serializing resource {}.", value)) }) - }; + }; self.into_custom_effects(&resource_converter, loader) } @@ -84,7 +87,7 @@ impl<'r> TransactionDataCache<'r> { self, resource_converter: &dyn Fn(Value, MoveTypeLayout) -> PartialVMResult, loader: &Loader, - ) -> PartialVMResult, Resource>> { + ) -> PartialVMResult> { let mut change_set = Changes::new(); for (addr, account_data_cache) in self.account_map.into_iter() { let mut modules = BTreeMap::new(); @@ -219,7 +222,7 @@ impl<'r> TransactionDataCache<'r> { )) } - pub(crate) fn load_module(&self, module_id: &ModuleId) -> VMResult> { + pub(crate) fn load_module(&self, module_id: &ModuleId) -> VMResult { if let Some(account_cache) = self.account_map.get(module_id.address()) { if let Some((blob, _is_republishing)) = account_cache.module_map.get(module_id.name()) { return Ok(blob.clone()); @@ -228,7 +231,10 @@ impl<'r> TransactionDataCache<'r> { match self.remote.get_module(module_id) { Ok(Some(bytes)) => Ok(bytes), Ok(None) => Err(PartialVMError::new(StatusCode::LINKER_ERROR) - .with_message(format!("Cannot find {:?} in data cache", module_id)) + .with_message(format!( + "Linker Error: Cannot find {:?} in data cache", + module_id + )) .finish(Location::Undefined)), Err(err) => { let msg = format!("Unexpected storage error: {:?}", err); @@ -252,7 +258,7 @@ impl<'r> TransactionDataCache<'r> { account_cache .module_map - .insert(module_id.name().to_owned(), (blob, is_republishing)); + .insert(module_id.name().to_owned(), (blob.into(), is_republishing)); Ok(()) } diff --git a/third_party/move/move-vm/runtime/src/interpreter.rs b/third_party/move/move-vm/runtime/src/interpreter.rs index c7187fea6ef2f..9dc8610d31866 100644 --- a/third_party/move/move-vm/runtime/src/interpreter.rs +++ b/third_party/move/move-vm/runtime/src/interpreter.rs @@ -590,9 +590,15 @@ impl Interpreter { ty_args, ) .map_err(|e| match function.module_id() { - Some(id) => e - .at_code_offset(function.index(), 0) - .finish(Location::Module(id.clone())), + Some(id) => { + let e = if cfg!(feature = "testing") || cfg!(feature = "stacktrace") { + e.with_exec_state(self.get_internal_state()) + } else { + e + }; + e.at_code_offset(function.index(), 0) + .finish(Location::Module(id.clone())) + }, None => { let err = PartialVMError::new(StatusCode::UNKNOWN_INVARIANT_VIOLATION_ERROR) .with_message("Unexpected native function not located in a module".to_owned()); @@ -808,7 +814,9 @@ impl Interpreter { TypeWithLoader { ty, loader }, res.is_ok(), )?; - self.operand_stack.push(res?)?; + self.operand_stack.push(res.map_err(|err| { + err.with_message(format!("Failed to borrow global resource from {:?}", addr)) + })?)?; Ok(()) } @@ -839,22 +847,23 @@ impl Interpreter { addr: AccountAddress, ty: &Type, ) -> PartialVMResult<()> { - let resource = - match Self::load_resource(loader, data_store, gas_meter, addr, ty)?.move_from() { - Ok(resource) => { - gas_meter.charge_move_from( - is_generic, - TypeWithLoader { ty, loader }, - Some(&resource), - )?; - resource - }, - Err(err) => { - let val: Option<&Value> = None; - gas_meter.charge_move_from(is_generic, TypeWithLoader { ty, loader }, val)?; - return Err(err); - }, - }; + let resource = match Self::load_resource(loader, data_store, gas_meter, addr, ty)? + .move_from() + { + Ok(resource) => { + gas_meter.charge_move_from( + is_generic, + TypeWithLoader { ty, loader }, + Some(&resource), + )?; + resource + }, + Err(err) => { + let val: Option<&Value> = None; + gas_meter.charge_move_from(is_generic, TypeWithLoader { ty, loader }, val)?; + return Err(err.with_message(format!("Failed to move resource from {:?}", addr))); + }, + }; self.operand_stack.push(resource)?; Ok(()) } @@ -890,7 +899,7 @@ impl Interpreter { &resource, false, )?; - Err(err) + Err(err.with_message(format!("Failed to move resource into {:?}", addr))) }, } } diff --git a/third_party/move/move-vm/runtime/src/lib.rs b/third_party/move/move-vm/runtime/src/lib.rs index 24558c0c5121a..660bad0c8db43 100644 --- a/third_party/move/move-vm/runtime/src/lib.rs +++ b/third_party/move/move-vm/runtime/src/lib.rs @@ -20,7 +20,7 @@ pub mod native_functions; mod runtime; pub mod session; #[macro_use] -mod tracing; +pub mod tracing; pub mod config; // Only include debugging functionality in debug builds diff --git a/third_party/move/move-vm/runtime/src/session.rs b/third_party/move/move-vm/runtime/src/session.rs index 59d04f2fafaa0..135ff1aa90b84 100644 --- a/third_party/move/move-vm/runtime/src/session.rs +++ b/third_party/move/move-vm/runtime/src/session.rs @@ -6,6 +6,7 @@ use crate::{ config::VMConfig, data_cache::TransactionDataCache, loader::LoadedFunction, move_vm::MoveVM, native_extensions::NativeContextExtensions, }; +use bytes::Bytes; use move_binary_format::{ compatibility::Compatibility, errors::*, @@ -287,7 +288,7 @@ impl<'r, 'l> Session<'r, 'l> { pub fn finish_with_custom_effects( self, resource_converter: &dyn Fn(Value, MoveTypeLayout) -> PartialVMResult, - ) -> VMResult, Resource>> { + ) -> VMResult> { self.data_cache .into_custom_effects(resource_converter, self.move_vm.runtime.loader()) .map_err(|e| e.finish(Location::Undefined)) @@ -309,7 +310,7 @@ impl<'r, 'l> Session<'r, 'l> { pub fn finish_with_extensions_with_custom_effects( self, resource_converter: &dyn Fn(Value, MoveTypeLayout) -> PartialVMResult, - ) -> VMResult<(Changes, Resource>, NativeContextExtensions<'r>)> { + ) -> VMResult<(Changes, NativeContextExtensions<'r>)> { let Session { data_cache, native_extensions, @@ -333,7 +334,7 @@ impl<'r, 'l> Session<'r, 'l> { } /// Get the serialized format of a `CompiledModule` given a `ModuleId`. - pub fn load_module(&self, module_id: &ModuleId) -> VMResult> { + pub fn load_module(&self, module_id: &ModuleId) -> VMResult { self.data_cache.load_module(module_id) } diff --git a/third_party/move/move-vm/runtime/src/tracing.rs b/third_party/move/move-vm/runtime/src/tracing.rs index eb6b6754e2ba8..bda432021292d 100644 --- a/third_party/move/move-vm/runtime/src/tracing.rs +++ b/third_party/move/move-vm/runtime/src/tracing.rs @@ -18,15 +18,16 @@ use ::{ env, fs::{File, OpenOptions}, io::Write, - process, sync::Mutex, - thread, }, }; #[cfg(any(debug_assertions, feature = "debugging"))] const MOVE_VM_TRACING_ENV_VAR_NAME: &str = "MOVE_VM_TRACE"; +#[cfg(any(debug_assertions, feature = "debugging"))] +const MOVE_VM_TRACING_FLUSH_ENV_VAR_NAME: &str = "MOVE_VM_TRACE_FLUSH"; + #[cfg(any(debug_assertions, feature = "debugging"))] const MOVE_VM_STEPPING_ENV_VAR_NAME: &str = "MOVE_VM_STEP"; @@ -36,24 +37,31 @@ static FILE_PATH: Lazy = Lazy::new(|| { }); #[cfg(any(debug_assertions, feature = "debugging"))] -static TRACING_ENABLED: Lazy = Lazy::new(|| env::var(MOVE_VM_TRACING_ENV_VAR_NAME).is_ok()); +pub static TRACING_ENABLED: Lazy = + Lazy::new(|| env::var(MOVE_VM_TRACING_ENV_VAR_NAME).is_ok()); #[cfg(any(debug_assertions, feature = "debugging"))] static DEBUGGING_ENABLED: Lazy = Lazy::new(|| env::var(MOVE_VM_STEPPING_ENV_VAR_NAME).is_ok()); #[cfg(any(debug_assertions, feature = "debugging"))] -static LOGGING_FILE: Lazy> = Lazy::new(|| { - Mutex::new( - OpenOptions::new() - .write(true) - .create(true) - .append(true) - .open(&*FILE_PATH) - .unwrap(), - ) +pub static LOGGING_FILE_WRITER: Lazy>> = Lazy::new(|| { + let file = OpenOptions::new() + .write(true) + .create(true) + .append(true) + .open(&*FILE_PATH) + .unwrap(); + Mutex::new(std::io::BufWriter::with_capacity( + 4096 * 1024, /* 4096KB */ + file, + )) }); +#[cfg(any(debug_assertions, feature = "debugging"))] +pub static SINGLE_STEP_FLUSHING: Lazy = + Lazy::new(|| env::var(MOVE_VM_TRACING_FLUSH_ENV_VAR_NAME).is_ok()); + #[cfg(any(debug_assertions, feature = "debugging"))] static DEBUG_CONTEXT: Lazy> = Lazy::new(|| Mutex::new(DebugContext::new())); @@ -68,17 +76,13 @@ pub(crate) fn trace( interp: &Interpreter, ) { if *TRACING_ENABLED { - let f = &mut *LOGGING_FILE.lock().unwrap(); - writeln!( - f, - "{}-{:?},{},{},{:?}", - process::id(), - thread::current().id(), - function_desc.pretty_string(), - pc, - instr, - ) - .unwrap(); + let buf_writer = &mut *LOGGING_FILE_WRITER.lock().unwrap(); + buf_writer + .write_fmt(format_args!("{},{}\n", function_desc.pretty_string(), pc,)) + .unwrap(); + if *SINGLE_STEP_FLUSHING { + buf_writer.flush().unwrap(); + } } if *DEBUGGING_ENABLED { DEBUG_CONTEXT diff --git a/third_party/move/move-vm/runtime/src/unit_tests/vm_arguments_tests.rs b/third_party/move/move-vm/runtime/src/unit_tests/vm_arguments_tests.rs index b7d2c457fe536..d76710a0c421e 100644 --- a/third_party/move/move-vm/runtime/src/unit_tests/vm_arguments_tests.rs +++ b/third_party/move/move-vm/runtime/src/unit_tests/vm_arguments_tests.rs @@ -3,6 +3,7 @@ // SPDX-License-Identifier: Apache-2.0 use crate::move_vm::MoveVM; +use bytes::Bytes; use move_binary_format::{ errors::VMResult, file_format::{ @@ -231,7 +232,7 @@ fn make_script_function(signature: Signature) -> (CompiledModule, Identifier) { } struct RemoteStore { - modules: HashMap>, + modules: HashMap, } impl RemoteStore { @@ -245,7 +246,7 @@ impl RemoteStore { let id = compiled_module.self_id(); let mut bytes = vec![]; compiled_module.serialize(&mut bytes).unwrap(); - self.modules.insert(id, bytes); + self.modules.insert(id, bytes.into()); } } @@ -254,7 +255,7 @@ impl ModuleResolver for RemoteStore { vec![] } - fn get_module(&self, module_id: &ModuleId) -> Result>, anyhow::Error> { + fn get_module(&self, module_id: &ModuleId) -> Result, anyhow::Error> { Ok(self.modules.get(module_id).cloned()) } } @@ -265,7 +266,7 @@ impl ResourceResolver for RemoteStore { _address: &AccountAddress, _tag: &StructTag, _metadata: &[Metadata], - ) -> anyhow::Result<(Option>, usize)> { + ) -> anyhow::Result<(Option, usize)> { Ok((None, 0)) } } @@ -630,7 +631,11 @@ fn check_script() { .err() .unwrap() .major_status(); - assert_eq!(status, StatusCode::LINKER_ERROR); + assert_eq!( + status, + StatusCode::LINKER_ERROR, + "Linker Error: The signature is deprecated" + ); } // @@ -813,7 +818,12 @@ fn call_missing_item() { ) .err() .unwrap(); - assert_eq!(error.major_status(), StatusCode::LINKER_ERROR); + assert_eq!( + error.major_status(), + StatusCode::LINKER_ERROR, + "Linker Error: Call to item at a non-existent external module {:?}", + module + ); assert_eq!(error.status_type(), StatusType::Verification); drop(session); diff --git a/third_party/move/move-vm/test-utils/Cargo.toml b/third_party/move/move-vm/test-utils/Cargo.toml index 4a0a475b03857..b3fe1ee6785bf 100644 --- a/third_party/move/move-vm/test-utils/Cargo.toml +++ b/third_party/move/move-vm/test-utils/Cargo.toml @@ -12,6 +12,7 @@ edition = "2021" [dependencies] anyhow = "1.0.52" +bytes = "1.4.0" once_cell = "1.7.2" serde = { version = "1.0.124", features = ["derive", "rc"] } diff --git a/third_party/move/move-vm/test-utils/src/storage.rs b/third_party/move/move-vm/test-utils/src/storage.rs index 7eff9cc7724fd..07c486ffcdf3d 100644 --- a/third_party/move/move-vm/test-utils/src/storage.rs +++ b/third_party/move/move-vm/test-utils/src/storage.rs @@ -3,6 +3,7 @@ // SPDX-License-Identifier: Apache-2.0 use anyhow::{bail, Error, Result}; +use bytes::Bytes; use move_core_types::{ account_address::AccountAddress, effects::{AccountChangeSet, ChangeSet, Op}, @@ -33,7 +34,7 @@ impl ModuleResolver for BlankStorage { vec![] } - fn get_module(&self, _module_id: &ModuleId) -> Result>> { + fn get_module(&self, _module_id: &ModuleId) -> Result> { Ok(None) } } @@ -44,7 +45,7 @@ impl ResourceResolver for BlankStorage { _address: &AccountAddress, _tag: &StructTag, _metadata: &[Metadata], - ) -> Result<(Option>, usize)> { + ) -> Result<(Option, usize)> { Ok((None, 0)) } } @@ -55,7 +56,7 @@ impl TableResolver for BlankStorage { &self, _handle: &TableHandle, _key: &[u8], - ) -> Result>, Error> { + ) -> Result, Error> { Ok(None) } } @@ -73,7 +74,7 @@ impl<'a, 'b, S: ModuleResolver> ModuleResolver for DeltaStorage<'a, 'b, S> { vec![] } - fn get_module(&self, module_id: &ModuleId) -> Result>, Error> { + fn get_module(&self, module_id: &ModuleId) -> Result, Error> { if let Some(account_storage) = self.delta.accounts().get(module_id.address()) { if let Some(blob_opt) = account_storage.modules().get(module_id.name()) { return Ok(blob_opt.clone().ok()); @@ -90,7 +91,7 @@ impl<'a, 'b, S: ResourceResolver> ResourceResolver for DeltaStorage<'a, 'b, S> { address: &AccountAddress, tag: &StructTag, metadata: &[Metadata], - ) -> Result<(Option>, usize)> { + ) -> Result<(Option, usize)> { if let Some(account_storage) = self.delta.accounts().get(address) { if let Some(blob_opt) = account_storage.resources().get(tag) { let buf = blob_opt.clone().ok(); @@ -110,7 +111,7 @@ impl<'a, 'b, S: TableResolver> TableResolver for DeltaStorage<'a, 'b, S> { &self, handle: &TableHandle, key: &[u8], - ) -> std::result::Result>, Error> { + ) -> std::result::Result, Error> { // TODO: No support for table deltas self.base.resolve_table_entry(handle, key) } @@ -125,8 +126,8 @@ impl<'a, 'b, S: MoveResolver> DeltaStorage<'a, 'b, S> { /// Simple in-memory storage for modules and resources under an account. #[derive(Debug, Clone)] struct InMemoryAccountStorage { - resources: BTreeMap>, - modules: BTreeMap>, + resources: BTreeMap, + modules: BTreeMap, } /// Simple in-memory storage that can be used as a Move VM storage backend for testing purposes. @@ -134,7 +135,7 @@ struct InMemoryAccountStorage { pub struct InMemoryStorage { accounts: BTreeMap, #[cfg(feature = "table-extension")] - tables: BTreeMap, Vec>>, + tables: BTreeMap, Bytes>>, } fn apply_changes( @@ -268,7 +269,9 @@ impl InMemoryStorage { let account = get_or_insert(&mut self.accounts, *module_id.address(), || { InMemoryAccountStorage::new() }); - account.modules.insert(module_id.name().to_owned(), blob); + account + .modules + .insert(module_id.name().to_owned(), blob.into()); } pub fn publish_or_overwrite_resource( @@ -278,7 +281,7 @@ impl InMemoryStorage { blob: Vec, ) { let account = get_or_insert(&mut self.accounts, addr, InMemoryAccountStorage::new); - account.resources.insert(struct_tag, blob); + account.resources.insert(struct_tag, blob.into()); } } @@ -287,7 +290,7 @@ impl ModuleResolver for InMemoryStorage { vec![] } - fn get_module(&self, module_id: &ModuleId) -> Result>, Error> { + fn get_module(&self, module_id: &ModuleId) -> Result, Error> { if let Some(account_storage) = self.accounts.get(module_id.address()) { return Ok(account_storage.modules.get(module_id.name()).cloned()); } @@ -301,7 +304,7 @@ impl ResourceResolver for InMemoryStorage { address: &AccountAddress, tag: &StructTag, _metadata: &[Metadata], - ) -> Result<(Option>, usize)> { + ) -> Result<(Option, usize)> { if let Some(account_storage) = self.accounts.get(address) { let buf = account_storage.resources.get(tag).cloned(); let buf_size = resource_size(&buf); @@ -317,7 +320,7 @@ impl TableResolver for InMemoryStorage { &self, handle: &TableHandle, key: &[u8], - ) -> std::result::Result>, Error> { + ) -> std::result::Result, Error> { Ok(self.tables.get(handle).and_then(|t| t.get(key).cloned())) } } diff --git a/third_party/move/move-vm/transactional-tests/tests/builtins/get_missing_struct.exp b/third_party/move/move-vm/transactional-tests/tests/builtins/get_missing_struct.exp index bf7479ee962a4..12d91af83680e 100644 --- a/third_party/move/move-vm/transactional-tests/tests/builtins/get_missing_struct.exp +++ b/third_party/move/move-vm/transactional-tests/tests/builtins/get_missing_struct.exp @@ -2,9 +2,11 @@ processed 2 tasks task 1 'run'. lines 31-38: Error: Script execution failed with VMError: { + message: Failed to borrow global resource from 0000000000000000000000000000000000000000000000001111111111111110, major_status: MISSING_DATA, sub_status: None, location: 0x1::Token, indices: [], offsets: [(FunctionDefinitionIndex(2), 3)], + exec_state: Some(ExecutionState { stack_trace: [(None, FunctionDefinitionIndex(0), 0)] }), } diff --git a/third_party/move/move-vm/transactional-tests/tests/builtins/get_missing_struct.mvir b/third_party/move/move-vm/transactional-tests/tests/builtins/get_missing_struct.mvir index 3e9bfe02a526c..5e53b23e69a5e 100644 --- a/third_party/move/move-vm/transactional-tests/tests/builtins/get_missing_struct.mvir +++ b/third_party/move/move-vm/transactional-tests/tests/builtins/get_missing_struct.mvir @@ -28,7 +28,7 @@ module 0x1.Token { } -//# run +//# run --verbose import 0x1.Token; main() { diff --git a/third_party/move/move-vm/transactional-tests/tests/instructions/arithmetic_operators_u8.exp b/third_party/move/move-vm/transactional-tests/tests/instructions/arithmetic_operators_u8.exp index 4dfdee1a57a6b..0cfc9bb8085fc 100644 --- a/third_party/move/move-vm/transactional-tests/tests/instructions/arithmetic_operators_u8.exp +++ b/third_party/move/move-vm/transactional-tests/tests/instructions/arithmetic_operators_u8.exp @@ -2,108 +2,132 @@ processed 17 tasks task 1 'run'. lines 17-22: Error: Script execution failed with VMError: { + message: Addition overflow, major_status: ARITHMETIC_ERROR, sub_status: None, location: script, indices: [], offsets: [(FunctionDefinitionIndex(0), 2)], + exec_state: Some(ExecutionState { stack_trace: [] }), } task 2 'run'. lines 24-29: Error: Script execution failed with VMError: { + message: Addition overflow, major_status: ARITHMETIC_ERROR, sub_status: None, location: script, indices: [], offsets: [(FunctionDefinitionIndex(0), 2)], + exec_state: Some(ExecutionState { stack_trace: [] }), } task 4 'run'. lines 48-53: Error: Script execution failed with VMError: { + message: Subtraction overflow, major_status: ARITHMETIC_ERROR, sub_status: None, location: script, indices: [], offsets: [(FunctionDefinitionIndex(0), 2)], + exec_state: Some(ExecutionState { stack_trace: [] }), } task 5 'run'. lines 55-60: Error: Script execution failed with VMError: { + message: Subtraction overflow, major_status: ARITHMETIC_ERROR, sub_status: None, location: script, indices: [], offsets: [(FunctionDefinitionIndex(0), 2)], + exec_state: Some(ExecutionState { stack_trace: [] }), } task 7 'run'. lines 77-82: Error: Script execution failed with VMError: { + message: Multiplication overflow, major_status: ARITHMETIC_ERROR, sub_status: None, location: script, indices: [], offsets: [(FunctionDefinitionIndex(0), 2)], + exec_state: Some(ExecutionState { stack_trace: [] }), } task 8 'run'. lines 84-89: Error: Script execution failed with VMError: { + message: Multiplication overflow, major_status: ARITHMETIC_ERROR, sub_status: None, location: script, indices: [], offsets: [(FunctionDefinitionIndex(0), 2)], + exec_state: Some(ExecutionState { stack_trace: [] }), } task 10 'run'. lines 108-113: Error: Script execution failed with VMError: { + message: Division by zero, major_status: ARITHMETIC_ERROR, sub_status: None, location: script, indices: [], offsets: [(FunctionDefinitionIndex(0), 2)], + exec_state: Some(ExecutionState { stack_trace: [] }), } task 11 'run'. lines 115-120: Error: Script execution failed with VMError: { + message: Division by zero, major_status: ARITHMETIC_ERROR, sub_status: None, location: script, indices: [], offsets: [(FunctionDefinitionIndex(0), 2)], + exec_state: Some(ExecutionState { stack_trace: [] }), } task 12 'run'. lines 122-127: Error: Script execution failed with VMError: { + message: Division by zero, major_status: ARITHMETIC_ERROR, sub_status: None, location: script, indices: [], offsets: [(FunctionDefinitionIndex(0), 2)], + exec_state: Some(ExecutionState { stack_trace: [] }), } task 14 'run'. lines 146-151: Error: Script execution failed with VMError: { + message: Integer remainder by zero, major_status: ARITHMETIC_ERROR, sub_status: None, location: script, indices: [], offsets: [(FunctionDefinitionIndex(0), 2)], + exec_state: Some(ExecutionState { stack_trace: [] }), } task 15 'run'. lines 153-158: Error: Script execution failed with VMError: { + message: Integer remainder by zero, major_status: ARITHMETIC_ERROR, sub_status: None, location: script, indices: [], offsets: [(FunctionDefinitionIndex(0), 2)], + exec_state: Some(ExecutionState { stack_trace: [] }), } task 16 'run'. lines 160-165: Error: Script execution failed with VMError: { + message: Integer remainder by zero, major_status: ARITHMETIC_ERROR, sub_status: None, location: script, indices: [], offsets: [(FunctionDefinitionIndex(0), 2)], + exec_state: Some(ExecutionState { stack_trace: [] }), } diff --git a/third_party/move/move-vm/transactional-tests/tests/instructions/arithmetic_operators_u8.mvir b/third_party/move/move-vm/transactional-tests/tests/instructions/arithmetic_operators_u8.mvir index 2dac2f16a92c6..c3a07e5cdf86a 100644 --- a/third_party/move/move-vm/transactional-tests/tests/instructions/arithmetic_operators_u8.mvir +++ b/third_party/move/move-vm/transactional-tests/tests/instructions/arithmetic_operators_u8.mvir @@ -1,4 +1,4 @@ -//# run +//# run --verbose main() { label b0: assert(0u8 + 0u8 == 0u8, 1000); @@ -14,14 +14,14 @@ label b0: return; } -//# run +//# run --verbose main() { label b0: _ = 1u8 + 255u8; return; } -//# run +//# run --verbose main() { label b0: _ = 215u8 + 156u8; @@ -30,7 +30,7 @@ label b0: -//# run +//# run --verbose main() { label b0: assert(0u8 - 0u8 == 0u8, 2000); @@ -45,14 +45,14 @@ label b0: return; } -//# run +//# run --verbose main() { label b0: _ = 0u8 - 1u8; return; } -//# run +//# run --verbose main() { label b0: _ = 54u8 - 100u8; @@ -60,7 +60,7 @@ label b0: } -//# run +//# run --verbose main() { label b0: assert(0u8 * 0u8 == 0u8, 3000); @@ -74,14 +74,14 @@ label b0: return; } -//# run +//# run --verbose main() { label b0: _ = 16u8 * 16u8; return; } -//# run +//# run --verbose main() { label b0: _ = 128u8 * 2u8; @@ -90,7 +90,7 @@ label b0: -//# run +//# run --verbose main() { label b0: assert(0u8 / 1u8 == 0u8, 4000); @@ -105,21 +105,21 @@ label b0: return; } -//# run +//# run --verbose main() { label b0: _ = 0u8 / 0u8; return; } -//# run +//# run --verbose main() { label b0: _ = 1u8 / 0u8; return; } -//# run +//# run --verbose main() { label b0: _ = 255u8 / 0u8; @@ -128,7 +128,7 @@ label b0: -//# run +//# run --verbose main() { label b0: assert(0u8 % 1u8 == 0u8, 5000); @@ -143,21 +143,21 @@ label b0: return; } -//# run +//# run --verbose main() { label b0: _ = 0u8 % 0u8; return; } -//# run +//# run --verbose main() { label b0: _ = 1u8 % 0u8; return; } -//# run +//# run --verbose main() { label b0: _ = 255u8 % 0u8; diff --git a/third_party/move/move-vm/transactional-tests/tests/module_publishing/republish_module_incompatible_change_friend_fn.exp b/third_party/move/move-vm/transactional-tests/tests/module_publishing/republish_module_incompatible_change_friend_fn.exp index 156da5f25b6cf..3d2556fa937d2 100644 --- a/third_party/move/move-vm/transactional-tests/tests/module_publishing/republish_module_incompatible_change_friend_fn.exp +++ b/third_party/move/move-vm/transactional-tests/tests/module_publishing/republish_module_incompatible_change_friend_fn.exp @@ -2,9 +2,11 @@ processed 4 tasks task 3 'publish'. lines 28-36: Error: Unable to publish module '0000000000000000000000000000000000000000000000000000000000000042::M'. Got VMError: { + message: Module Update Failure: Friend signature of new module differs from existing module in 0000000000000000000000000000000000000000000000000000000000000042::M, major_status: BACKWARD_INCOMPATIBLE_MODULE_UPDATE, sub_status: None, location: undefined, indices: [], offsets: [], + exec_state: None, } diff --git a/third_party/move/move-vm/transactional-tests/tests/module_publishing/republish_module_incompatible_change_friend_fn.mvir b/third_party/move/move-vm/transactional-tests/tests/module_publishing/republish_module_incompatible_change_friend_fn.mvir index e16993d55d9fb..7df7250a459de 100644 --- a/third_party/move/move-vm/transactional-tests/tests/module_publishing/republish_module_incompatible_change_friend_fn.mvir +++ b/third_party/move/move-vm/transactional-tests/tests/module_publishing/republish_module_incompatible_change_friend_fn.mvir @@ -25,7 +25,7 @@ module 0x42.N { } } -//# publish +//# publish --verbose module 0x42.M { friend 0x42.N; // cannot change signature diff --git a/third_party/move/move-vm/transactional-tests/tests/module_publishing/republish_module_incompatible_change_unused_friend_fn.exp b/third_party/move/move-vm/transactional-tests/tests/module_publishing/republish_module_incompatible_change_unused_friend_fn.exp index ac509ab72f8e2..58a8653c2cd8d 100644 --- a/third_party/move/move-vm/transactional-tests/tests/module_publishing/republish_module_incompatible_change_unused_friend_fn.exp +++ b/third_party/move/move-vm/transactional-tests/tests/module_publishing/republish_module_incompatible_change_unused_friend_fn.exp @@ -2,9 +2,11 @@ processed 3 tasks task 2 'publish'. lines 14-22: Error: Unable to publish module '0000000000000000000000000000000000000000000000000000000000000042::M'. Got VMError: { + message: Module Update Failure: Friend signature of new module differs from existing module in 0000000000000000000000000000000000000000000000000000000000000042::M, major_status: BACKWARD_INCOMPATIBLE_MODULE_UPDATE, sub_status: None, location: undefined, indices: [], offsets: [], + exec_state: None, } diff --git a/third_party/move/move-vm/transactional-tests/tests/module_publishing/republish_module_incompatible_change_unused_friend_fn.mvir b/third_party/move/move-vm/transactional-tests/tests/module_publishing/republish_module_incompatible_change_unused_friend_fn.mvir index 361a74a8c8b62..c8a5c428ad297 100644 --- a/third_party/move/move-vm/transactional-tests/tests/module_publishing/republish_module_incompatible_change_unused_friend_fn.mvir +++ b/third_party/move/move-vm/transactional-tests/tests/module_publishing/republish_module_incompatible_change_unused_friend_fn.mvir @@ -11,7 +11,7 @@ module 0x42.M { } } -//# publish +//# publish --verbose module 0x42.M { friend 0x42.N; // cannot change function signature diff --git a/third_party/move/move-vm/transactional-tests/tests/module_publishing/republish_module_incompatible_delete_friend_fn.exp b/third_party/move/move-vm/transactional-tests/tests/module_publishing/republish_module_incompatible_delete_friend_fn.exp index 9e16630ca0701..afeb9af951c20 100644 --- a/third_party/move/move-vm/transactional-tests/tests/module_publishing/republish_module_incompatible_delete_friend_fn.exp +++ b/third_party/move/move-vm/transactional-tests/tests/module_publishing/republish_module_incompatible_delete_friend_fn.exp @@ -2,9 +2,11 @@ processed 4 tasks task 3 'publish'. lines 28-33: Error: Unable to publish module '0000000000000000000000000000000000000000000000000000000000000042::M'. Got VMError: { + message: Module Update Failure: Friend signature of new module differs from existing module in 0000000000000000000000000000000000000000000000000000000000000042::M, major_status: BACKWARD_INCOMPATIBLE_MODULE_UPDATE, sub_status: None, location: undefined, indices: [], offsets: [], + exec_state: None, } diff --git a/third_party/move/move-vm/transactional-tests/tests/module_publishing/republish_module_incompatible_delete_friend_fn.mvir b/third_party/move/move-vm/transactional-tests/tests/module_publishing/republish_module_incompatible_delete_friend_fn.mvir index bda8f580da42f..2b1a1a0811988 100644 --- a/third_party/move/move-vm/transactional-tests/tests/module_publishing/republish_module_incompatible_delete_friend_fn.mvir +++ b/third_party/move/move-vm/transactional-tests/tests/module_publishing/republish_module_incompatible_delete_friend_fn.mvir @@ -25,7 +25,7 @@ module 0x42.N { } } -//# publish +//# publish --verbose module 0x42.M { friend 0x42.N; // cannot remove visible function diff --git a/third_party/move/move-vm/transactional-tests/tests/module_publishing/republish_module_incompatible_delete_unused_friend_fn.exp b/third_party/move/move-vm/transactional-tests/tests/module_publishing/republish_module_incompatible_delete_unused_friend_fn.exp index ca8f6fa765395..ff8b588225af6 100644 --- a/third_party/move/move-vm/transactional-tests/tests/module_publishing/republish_module_incompatible_delete_unused_friend_fn.exp +++ b/third_party/move/move-vm/transactional-tests/tests/module_publishing/republish_module_incompatible_delete_unused_friend_fn.exp @@ -2,9 +2,11 @@ processed 3 tasks task 2 'publish'. lines 14-18: Error: Unable to publish module '0000000000000000000000000000000000000000000000000000000000000042::M'. Got VMError: { + message: Module Update Failure: Friend signature of new module differs from existing module in 0000000000000000000000000000000000000000000000000000000000000042::M, major_status: BACKWARD_INCOMPATIBLE_MODULE_UPDATE, sub_status: None, location: undefined, indices: [], offsets: [], + exec_state: None, } diff --git a/third_party/move/move-vm/transactional-tests/tests/module_publishing/republish_module_incompatible_delete_unused_friend_fn.mvir b/third_party/move/move-vm/transactional-tests/tests/module_publishing/republish_module_incompatible_delete_unused_friend_fn.mvir index b2b224b8f5c5b..692ee3019f89c 100644 --- a/third_party/move/move-vm/transactional-tests/tests/module_publishing/republish_module_incompatible_delete_unused_friend_fn.mvir +++ b/third_party/move/move-vm/transactional-tests/tests/module_publishing/republish_module_incompatible_delete_unused_friend_fn.mvir @@ -11,7 +11,7 @@ module 0x42.M { } } -//# publish +//# publish --verbose module 0x42.M { friend 0x42.N; // cannot remove non internal functions diff --git a/third_party/move/move-vm/transactional-tests/tests/module_publishing/republish_module_incompatible_drop_friend.exp b/third_party/move/move-vm/transactional-tests/tests/module_publishing/republish_module_incompatible_drop_friend.exp index 4a7cab0d83913..1eb3dedbb7114 100644 --- a/third_party/move/move-vm/transactional-tests/tests/module_publishing/republish_module_incompatible_drop_friend.exp +++ b/third_party/move/move-vm/transactional-tests/tests/module_publishing/republish_module_incompatible_drop_friend.exp @@ -2,9 +2,11 @@ processed 4 tasks task 3 'publish'. lines 28-35: Error: Unable to publish module '0000000000000000000000000000000000000000000000000000000000000042::M'. Got VMError: { + message: Module Update Failure: Friend signature of new module differs from existing module in 0000000000000000000000000000000000000000000000000000000000000042::M, major_status: BACKWARD_INCOMPATIBLE_MODULE_UPDATE, sub_status: None, location: undefined, indices: [], offsets: [], + exec_state: None, } diff --git a/third_party/move/move-vm/transactional-tests/tests/module_publishing/republish_module_incompatible_drop_friend.mvir b/third_party/move/move-vm/transactional-tests/tests/module_publishing/republish_module_incompatible_drop_friend.mvir index efca87c8d559b..b067e364ace8c 100644 --- a/third_party/move/move-vm/transactional-tests/tests/module_publishing/republish_module_incompatible_drop_friend.mvir +++ b/third_party/move/move-vm/transactional-tests/tests/module_publishing/republish_module_incompatible_drop_friend.mvir @@ -25,7 +25,7 @@ module 0x42.N { } } -//# publish +//# publish --verbose module 0x42.M { // cannot drop friends public(friend) foo() { diff --git a/third_party/move/move-vm/transactional-tests/tests/module_publishing/republish_module_incompatible_drop_unlinked_friend.exp b/third_party/move/move-vm/transactional-tests/tests/module_publishing/republish_module_incompatible_drop_unlinked_friend.exp index 9c9188a9c64b0..23d878577c143 100644 --- a/third_party/move/move-vm/transactional-tests/tests/module_publishing/republish_module_incompatible_drop_unlinked_friend.exp +++ b/third_party/move/move-vm/transactional-tests/tests/module_publishing/republish_module_incompatible_drop_unlinked_friend.exp @@ -2,9 +2,11 @@ processed 3 tasks task 2 'publish'. lines 14-21: Error: Unable to publish module '0000000000000000000000000000000000000000000000000000000000000042::M'. Got VMError: { + message: Module Update Failure: Friend signature of new module differs from existing module in 0000000000000000000000000000000000000000000000000000000000000042::M, major_status: BACKWARD_INCOMPATIBLE_MODULE_UPDATE, sub_status: None, location: undefined, indices: [], offsets: [], + exec_state: None, } diff --git a/third_party/move/move-vm/transactional-tests/tests/module_publishing/republish_module_incompatible_drop_unlinked_friend.mvir b/third_party/move/move-vm/transactional-tests/tests/module_publishing/republish_module_incompatible_drop_unlinked_friend.mvir index 6405ebd9f9787..54f7bd61bf4e6 100644 --- a/third_party/move/move-vm/transactional-tests/tests/module_publishing/republish_module_incompatible_drop_unlinked_friend.mvir +++ b/third_party/move/move-vm/transactional-tests/tests/module_publishing/republish_module_incompatible_drop_unlinked_friend.mvir @@ -11,7 +11,7 @@ module 0x42.M { } } -//# publish +//# publish --verbose module 0x42.M { // cannot remove friends public(friend) foo() { diff --git a/third_party/move/move-vm/transactional-tests/tests/module_publishing/republish_module_incompatible_entry_removed.exp b/third_party/move/move-vm/transactional-tests/tests/module_publishing/republish_module_incompatible_entry_removed.exp index e1799772e233f..c499538eb82b9 100644 --- a/third_party/move/move-vm/transactional-tests/tests/module_publishing/republish_module_incompatible_entry_removed.exp +++ b/third_party/move/move-vm/transactional-tests/tests/module_publishing/republish_module_incompatible_entry_removed.exp @@ -2,27 +2,33 @@ processed 6 tasks task 1 'publish'. lines 11-17: Error: Unable to publish module '0000000000000000000000000000000000000000000000000000000000000042::Priv'. Got VMError: { + message: Module Update Failure: Public function/struct signature of new module differs from existing module in 0000000000000000000000000000000000000000000000000000000000000042::Priv, major_status: BACKWARD_INCOMPATIBLE_MODULE_UPDATE, sub_status: None, location: undefined, indices: [], offsets: [], + exec_state: None, } task 3 'publish'. lines 27-33: Error: Unable to publish module '0000000000000000000000000000000000000000000000000000000000000042::Pub'. Got VMError: { + message: Module Update Failure: Public function/struct signature of new module differs from existing module in 0000000000000000000000000000000000000000000000000000000000000042::Pub, major_status: BACKWARD_INCOMPATIBLE_MODULE_UPDATE, sub_status: None, location: undefined, indices: [], offsets: [], + exec_state: None, } task 5 'publish'. lines 43-49: Error: Unable to publish module '0000000000000000000000000000000000000000000000000000000000000042::Fr'. Got VMError: { + message: Module Update Failure: Friend signature of new module differs from existing module in 0000000000000000000000000000000000000000000000000000000000000042::Fr, major_status: BACKWARD_INCOMPATIBLE_MODULE_UPDATE, sub_status: None, location: undefined, indices: [], offsets: [], + exec_state: None, } diff --git a/third_party/move/move-vm/transactional-tests/tests/module_publishing/republish_module_incompatible_entry_removed.mvir b/third_party/move/move-vm/transactional-tests/tests/module_publishing/republish_module_incompatible_entry_removed.mvir index 0997ad3614d11..b6386dd30e3bc 100644 --- a/third_party/move/move-vm/transactional-tests/tests/module_publishing/republish_module_incompatible_entry_removed.mvir +++ b/third_party/move/move-vm/transactional-tests/tests/module_publishing/republish_module_incompatible_entry_removed.mvir @@ -8,7 +8,7 @@ module 0x42.Priv { } } -//# publish +//# publish --verbose module 0x42.Priv { foo() { label b0: @@ -16,7 +16,7 @@ module 0x42.Priv { } } -//# publish +//# publish --verbose module 0x42.Pub { public entry foo() { label b0: @@ -24,7 +24,7 @@ module 0x42.Pub { } } -//# publish +//# publish --verbose module 0x42.Pub { public foo() { label b0: @@ -32,7 +32,7 @@ module 0x42.Pub { } } -//# publish +//# publish --verbose module 0x42.Fr { public(friend) entry foo() { label b0: @@ -40,7 +40,7 @@ module 0x42.Fr { } } -//# publish +//# publish --verbose module 0x42.Fr { public(friend) foo() { label b0: diff --git a/third_party/move/move-vm/transactional-tests/tests/module_publishing/republish_module_incompatible_friend_fn_to_private.exp b/third_party/move/move-vm/transactional-tests/tests/module_publishing/republish_module_incompatible_friend_fn_to_private.exp index 156da5f25b6cf..3d2556fa937d2 100644 --- a/third_party/move/move-vm/transactional-tests/tests/module_publishing/republish_module_incompatible_friend_fn_to_private.exp +++ b/third_party/move/move-vm/transactional-tests/tests/module_publishing/republish_module_incompatible_friend_fn_to_private.exp @@ -2,9 +2,11 @@ processed 4 tasks task 3 'publish'. lines 28-36: Error: Unable to publish module '0000000000000000000000000000000000000000000000000000000000000042::M'. Got VMError: { + message: Module Update Failure: Friend signature of new module differs from existing module in 0000000000000000000000000000000000000000000000000000000000000042::M, major_status: BACKWARD_INCOMPATIBLE_MODULE_UPDATE, sub_status: None, location: undefined, indices: [], offsets: [], + exec_state: None, } diff --git a/third_party/move/move-vm/transactional-tests/tests/module_publishing/republish_module_incompatible_friend_fn_to_private.mvir b/third_party/move/move-vm/transactional-tests/tests/module_publishing/republish_module_incompatible_friend_fn_to_private.mvir index 05811249b2335..cbb33e23c87a4 100644 --- a/third_party/move/move-vm/transactional-tests/tests/module_publishing/republish_module_incompatible_friend_fn_to_private.mvir +++ b/third_party/move/move-vm/transactional-tests/tests/module_publishing/republish_module_incompatible_friend_fn_to_private.mvir @@ -25,7 +25,7 @@ module 0x42.N { } } -//# publish +//# publish --verbose module 0x42.M { friend 0x42.N; // cannot make visibility more restrictive diff --git a/third_party/move/move-vm/transactional-tests/tests/module_publishing/republish_module_incompatible_layout.exp b/third_party/move/move-vm/transactional-tests/tests/module_publishing/republish_module_incompatible_layout.exp index d04d7f5ea2b22..d9f71cedb190a 100644 --- a/third_party/move/move-vm/transactional-tests/tests/module_publishing/republish_module_incompatible_layout.exp +++ b/third_party/move/move-vm/transactional-tests/tests/module_publishing/republish_module_incompatible_layout.exp @@ -2,9 +2,11 @@ processed 2 tasks task 1 'publish'. lines 6-10: Error: Unable to publish module '0000000000000000000000000000000000000000000000000000000000000042::Duplicate'. Got VMError: { + message: Module Update Failure: Struct layout of new module differs from existing modul in 0000000000000000000000000000000000000000000000000000000000000042::Duplicate, major_status: BACKWARD_INCOMPATIBLE_MODULE_UPDATE, sub_status: None, location: undefined, indices: [], offsets: [], + exec_state: None, } diff --git a/third_party/move/move-vm/transactional-tests/tests/module_publishing/republish_module_incompatible_layout.mvir b/third_party/move/move-vm/transactional-tests/tests/module_publishing/republish_module_incompatible_layout.mvir index 77940191babc6..d6b305ed4aff5 100644 --- a/third_party/move/move-vm/transactional-tests/tests/module_publishing/republish_module_incompatible_layout.mvir +++ b/third_party/move/move-vm/transactional-tests/tests/module_publishing/republish_module_incompatible_layout.mvir @@ -3,7 +3,7 @@ module 0x42.Duplicate { struct T { f: u64 } } -//# publish +//# publish --verbose module 0x42.Duplicate { struct T { f: u64, g: bool } // cannot add fields to public types diff --git a/third_party/move/move-vm/transactional-tests/tests/module_publishing/republish_module_incompatible_layout_struct.exp b/third_party/move/move-vm/transactional-tests/tests/module_publishing/republish_module_incompatible_layout_struct.exp index 675f71e25d7d1..8c247432ee505 100644 --- a/third_party/move/move-vm/transactional-tests/tests/module_publishing/republish_module_incompatible_layout_struct.exp +++ b/third_party/move/move-vm/transactional-tests/tests/module_publishing/republish_module_incompatible_layout_struct.exp @@ -2,9 +2,11 @@ processed 2 tasks task 1 'publish'. lines 7-13: Error: Unable to publish module '0000000000000000000000000000000000000000000000000000000000000042::Duplicate'. Got VMError: { + message: Module Update Failure: Struct layout of new module differs from existing modul in 0000000000000000000000000000000000000000000000000000000000000042::Duplicate, major_status: BACKWARD_INCOMPATIBLE_MODULE_UPDATE, sub_status: None, location: undefined, indices: [], offsets: [], + exec_state: None, } diff --git a/third_party/move/move-vm/transactional-tests/tests/module_publishing/republish_module_incompatible_layout_struct.mvir b/third_party/move/move-vm/transactional-tests/tests/module_publishing/republish_module_incompatible_layout_struct.mvir index 96b26cc629605..3cbffbae17abd 100644 --- a/third_party/move/move-vm/transactional-tests/tests/module_publishing/republish_module_incompatible_layout_struct.mvir +++ b/third_party/move/move-vm/transactional-tests/tests/module_publishing/republish_module_incompatible_layout_struct.mvir @@ -4,7 +4,7 @@ module 0x42.Duplicate { struct T { f: Self.U } } -//# publish +//# publish --verbose module 0x42.Duplicate { struct U { f: u64 } struct U2 { f: u64 } diff --git a/third_party/move/move-vm/transactional-tests/tests/module_publishing/republish_module_incompatible_linking.exp b/third_party/move/move-vm/transactional-tests/tests/module_publishing/republish_module_incompatible_linking.exp index 2a53da927843e..074055f193525 100644 --- a/third_party/move/move-vm/transactional-tests/tests/module_publishing/republish_module_incompatible_linking.exp +++ b/third_party/move/move-vm/transactional-tests/tests/module_publishing/republish_module_incompatible_linking.exp @@ -2,9 +2,11 @@ processed 2 tasks task 1 'publish'. lines 7-11: Error: Unable to publish module '0000000000000000000000000000000000000000000000000000000000000042::Duplicate'. Got VMError: { + message: Module Update Failure: Public function/struct signature of new module differs from existing module in 0000000000000000000000000000000000000000000000000000000000000042::Duplicate, major_status: BACKWARD_INCOMPATIBLE_MODULE_UPDATE, sub_status: None, location: undefined, indices: [], offsets: [], + exec_state: None, } diff --git a/third_party/move/move-vm/transactional-tests/tests/module_publishing/republish_module_incompatible_linking.mvir b/third_party/move/move-vm/transactional-tests/tests/module_publishing/republish_module_incompatible_linking.mvir index daa711bb06495..90c7f5bc3ab8c 100644 --- a/third_party/move/move-vm/transactional-tests/tests/module_publishing/republish_module_incompatible_linking.mvir +++ b/third_party/move/move-vm/transactional-tests/tests/module_publishing/republish_module_incompatible_linking.mvir @@ -4,7 +4,7 @@ module 0x42.Duplicate { public g() { label b0: return; } } -//# publish +//# publish --verbose module 0x42.Duplicate { public f() { label b0: return; } // cannot drop public functions diff --git a/third_party/move/move-vm/transactional-tests/tests/module_publishing/republish_module_skip_compatible_linking_hack_struct.exp b/third_party/move/move-vm/transactional-tests/tests/module_publishing/republish_module_skip_compatible_linking_hack_struct.exp index a0ea749d9dccf..7fda414fa2fca 100644 --- a/third_party/move/move-vm/transactional-tests/tests/module_publishing/republish_module_skip_compatible_linking_hack_struct.exp +++ b/third_party/move/move-vm/transactional-tests/tests/module_publishing/republish_module_skip_compatible_linking_hack_struct.exp @@ -11,11 +11,13 @@ Error: Unable to publish module '00000000000000000000000000000000000000000000000 task 3 'publish'. lines 25-45: Error: Unable to publish module '0000000000000000000000000000000000000000000000000000000000000042::A'. Got VMError: { + message: Module Update Failure: Public function/struct signature of new module differs from existing module in 0000000000000000000000000000000000000000000000000000000000000042::A, major_status: BACKWARD_INCOMPATIBLE_MODULE_UPDATE, sub_status: None, location: undefined, indices: [], offsets: [], + exec_state: None, } task 4 'run'. lines 47-47: diff --git a/third_party/move/move-vm/transactional-tests/tests/module_publishing/republish_module_skip_compatible_linking_hack_struct.mvir b/third_party/move/move-vm/transactional-tests/tests/module_publishing/republish_module_skip_compatible_linking_hack_struct.mvir index 00f2d8f21a7d6..c60f304a3d60a 100644 --- a/third_party/move/move-vm/transactional-tests/tests/module_publishing/republish_module_skip_compatible_linking_hack_struct.mvir +++ b/third_party/move/move-vm/transactional-tests/tests/module_publishing/republish_module_skip_compatible_linking_hack_struct.mvir @@ -22,7 +22,7 @@ module 0x42.A { } } -//# publish +//# publish --verbose module 0x42.A { struct A has store,drop { f: u64, diff --git a/third_party/move/move-vm/types/src/values/values_impl.rs b/third_party/move/move-vm/types/src/values/values_impl.rs index 1c7238324347b..4b13f30a65f1e 100644 --- a/third_party/move/move-vm/types/src/values/values_impl.rs +++ b/third_party/move/move-vm/types/src/values/values_impl.rs @@ -1466,7 +1466,10 @@ impl IntegerValue { return Err(PartialVMError::new(StatusCode::INTERNAL_TYPE_ERROR).with_message(msg)); }, }; - res.ok_or_else(|| PartialVMError::new(StatusCode::ARITHMETIC_ERROR)) + res.ok_or_else(|| { + PartialVMError::new(StatusCode::ARITHMETIC_ERROR) + .with_message("Addition overflow".to_string()) + }) } pub fn sub_checked(self, other: Self) -> PartialVMResult { @@ -1483,7 +1486,10 @@ impl IntegerValue { return Err(PartialVMError::new(StatusCode::INTERNAL_TYPE_ERROR).with_message(msg)); }, }; - res.ok_or_else(|| PartialVMError::new(StatusCode::ARITHMETIC_ERROR)) + res.ok_or_else(|| { + PartialVMError::new(StatusCode::ARITHMETIC_ERROR) + .with_message("Subtraction overflow".to_string()) + }) } pub fn mul_checked(self, other: Self) -> PartialVMResult { @@ -1500,7 +1506,10 @@ impl IntegerValue { return Err(PartialVMError::new(StatusCode::INTERNAL_TYPE_ERROR).with_message(msg)); }, }; - res.ok_or_else(|| PartialVMError::new(StatusCode::ARITHMETIC_ERROR)) + res.ok_or_else(|| { + PartialVMError::new(StatusCode::ARITHMETIC_ERROR) + .with_message("Multiplication overflow".to_string()) + }) } pub fn div_checked(self, other: Self) -> PartialVMResult { @@ -1517,7 +1526,10 @@ impl IntegerValue { return Err(PartialVMError::new(StatusCode::INTERNAL_TYPE_ERROR).with_message(msg)); }, }; - res.ok_or_else(|| PartialVMError::new(StatusCode::ARITHMETIC_ERROR)) + res.ok_or_else(|| { + PartialVMError::new(StatusCode::ARITHMETIC_ERROR) + .with_message("Division by zero".to_string()) + }) } pub fn rem_checked(self, other: Self) -> PartialVMResult { @@ -1534,7 +1546,10 @@ impl IntegerValue { return Err(PartialVMError::new(StatusCode::INTERNAL_TYPE_ERROR).with_message(msg)); }, }; - res.ok_or_else(|| PartialVMError::new(StatusCode::ARITHMETIC_ERROR)) + res.ok_or_else(|| { + PartialVMError::new(StatusCode::ARITHMETIC_ERROR) + .with_message("Integer remainder by zero".to_string()) + }) } pub fn bit_or(self, other: Self) -> PartialVMResult { @@ -1589,37 +1604,16 @@ impl IntegerValue { use IntegerValue::*; Ok(match self { - U8(x) => { - if n_bits >= 8 { - return Err(PartialVMError::new(StatusCode::ARITHMETIC_ERROR)); - } - IntegerValue::U8(x << n_bits) - }, - U16(x) => { - if n_bits >= 16 { - return Err(PartialVMError::new(StatusCode::ARITHMETIC_ERROR)); - } - IntegerValue::U16(x << n_bits) - }, - U32(x) => { - if n_bits >= 32 { - return Err(PartialVMError::new(StatusCode::ARITHMETIC_ERROR)); - } - IntegerValue::U32(x << n_bits) - }, - U64(x) => { - if n_bits >= 64 { - return Err(PartialVMError::new(StatusCode::ARITHMETIC_ERROR)); - } - IntegerValue::U64(x << n_bits) - }, - U128(x) => { - if n_bits >= 128 { - return Err(PartialVMError::new(StatusCode::ARITHMETIC_ERROR)); - } - IntegerValue::U128(x << n_bits) - }, + U8(x) if n_bits < 8 => IntegerValue::U8(x << n_bits), + U16(x) if n_bits < 16 => IntegerValue::U16(x << n_bits), + U32(x) if n_bits < 32 => IntegerValue::U32(x << n_bits), + U64(x) if n_bits < 64 => IntegerValue::U64(x << n_bits), + U128(x) if n_bits < 128 => IntegerValue::U128(x << n_bits), U256(x) => IntegerValue::U256(x << n_bits), + _ => { + return Err(PartialVMError::new(StatusCode::ARITHMETIC_ERROR) + .with_message("Shift Left overflow".to_string())); + }, }) } @@ -1627,37 +1621,16 @@ impl IntegerValue { use IntegerValue::*; Ok(match self { - U8(x) => { - if n_bits >= 8 { - return Err(PartialVMError::new(StatusCode::ARITHMETIC_ERROR)); - } - IntegerValue::U8(x >> n_bits) - }, - U16(x) => { - if n_bits >= 16 { - return Err(PartialVMError::new(StatusCode::ARITHMETIC_ERROR)); - } - IntegerValue::U16(x >> n_bits) - }, - U32(x) => { - if n_bits >= 32 { - return Err(PartialVMError::new(StatusCode::ARITHMETIC_ERROR)); - } - IntegerValue::U32(x >> n_bits) - }, - U64(x) => { - if n_bits >= 64 { - return Err(PartialVMError::new(StatusCode::ARITHMETIC_ERROR)); - } - IntegerValue::U64(x >> n_bits) - }, - U128(x) => { - if n_bits >= 128 { - return Err(PartialVMError::new(StatusCode::ARITHMETIC_ERROR)); - } - IntegerValue::U128(x >> n_bits) - }, + U8(x) if n_bits < 8 => IntegerValue::U8(x >> n_bits), + U16(x) if n_bits < 16 => IntegerValue::U16(x >> n_bits), + U32(x) if n_bits < 32 => IntegerValue::U32(x >> n_bits), + U64(x) if n_bits < 64 => IntegerValue::U64(x >> n_bits), + U128(x) if n_bits < 128 => IntegerValue::U128(x >> n_bits), U256(x) => IntegerValue::U256(x >> n_bits), + _ => { + return Err(PartialVMError::new(StatusCode::ARITHMETIC_ERROR) + .with_message("Shift Right overflow".to_string())); + }, }) } diff --git a/third_party/move/testing-infra/test-generation/src/lib.rs b/third_party/move/testing-infra/test-generation/src/lib.rs index b909aba8b4c33..18c141565008d 100644 --- a/third_party/move/testing-infra/test-generation/src/lib.rs +++ b/third_party/move/testing-infra/test-generation/src/lib.rs @@ -151,7 +151,7 @@ fn execute_function_in_module( let mut blob = vec![]; module.serialize(&mut blob).unwrap(); changeset - .add_module_op(module_id.clone(), Op::New(blob)) + .add_module_op(module_id.clone(), Op::New(blob.into())) .unwrap(); let delta_storage = DeltaStorage::new(storage, &changeset); let mut sess = vm.new_session(&delta_storage); diff --git a/third_party/move/testing-infra/transactional-test-runner/src/framework.rs b/third_party/move/testing-infra/transactional-test-runner/src/framework.rs index 9512f779ceee7..e89a8a99b93ac 100644 --- a/third_party/move/testing-infra/transactional-test-runner/src/framework.rs +++ b/third_party/move/testing-infra/transactional-test-runner/src/framework.rs @@ -935,14 +935,20 @@ fn handle_known_task<'a, Adapter: MoveTestAdapter<'a>>( let task_name = task.name.to_owned(); let start_line = task.start_line; let stop_line = task.stop_line; + let data_path = match &task.data { + Some(f) => f.path().to_str().unwrap().to_string(), + None => "".to_string(), + }; let result = adapter.handle_command(task); - let result_string = match result { + let mut result_string = match result { Ok(None) => return, Ok(Some(s)) => s, Err(e) => format!("Error: {}", e), }; + if !data_path.is_empty() { + result_string = result_string.replace(&data_path, "TEMPFILE"); + } assert!(!result_string.is_empty()); - writeln!( output, "\ntask {} '{}'. lines {}-{}:\n{}", diff --git a/third_party/move/testing-infra/transactional-test-runner/src/vm_test_harness.rs b/third_party/move/testing-infra/transactional-test-runner/src/vm_test_harness.rs index 530fb47803a82..5ec10c4b6fde4 100644 --- a/third_party/move/testing-infra/transactional-test-runner/src/vm_test_harness.rs +++ b/third_party/move/testing-infra/transactional-test-runner/src/vm_test_harness.rs @@ -9,13 +9,10 @@ use crate::{ use anyhow::{anyhow, Result}; use clap::Parser; use move_binary_format::{ - compatibility::Compatibility, - errors::{Location, VMError, VMResult}, - file_format::CompiledScript, - CompiledModule, + compatibility::Compatibility, errors::VMResult, file_format::CompiledScript, CompiledModule, }; use move_command_line_common::{ - address::ParsedAddress, files::verify_and_create_named_address_mapping, + address::ParsedAddress, env::read_bool_env_var, files::verify_and_create_named_address_mapping, }; use move_compiler::{ compiled_unit::AnnotatedCompiledUnit, @@ -88,12 +85,23 @@ pub struct AdapterPublishArgs { #[clap(long)] /// is skip the check friend link, if true, treat `friend` as `private` pub skip_check_friend_linking: bool, + /// print more complete information for VMErrors on publish + #[clap(long)] + pub verbose: bool, } #[derive(Debug, Parser)] pub struct AdapterExecuteArgs { #[clap(long)] pub check_runtime_types: bool, + /// print more complete information for VMErrors on run + #[clap(long)] + pub verbose: bool, +} + +fn move_test_debug() -> bool { + static MOVE_TEST_DEBUG: Lazy = Lazy::new(|| read_bool_env_var("MOVE_TEST_DEBUG")); + *MOVE_TEST_DEBUG } impl<'a> MoveTestAdapter<'a> for SimpleVMTestAdapter<'a> { @@ -199,6 +207,7 @@ impl<'a> MoveTestAdapter<'a> for SimpleVMTestAdapter<'a> { let id = module.self_id(); let sender = *id.address(); + let verbose = extra_args.verbose; match self.perform_session_action( gas_budget, |session, gas_status| { @@ -218,10 +227,10 @@ impl<'a> MoveTestAdapter<'a> for SimpleVMTestAdapter<'a> { VMConfig::production(), ) { Ok(()) => Ok((None, module)), - Err(e) => Err(anyhow!( + Err(vm_error) => Err(anyhow!( "Unable to publish module '{}'. Got VMError: {}", module.self_id(), - format_vm_error(&e, self.comparison_mode) + vm_error.format_test_output(move_test_debug() || verbose, self.comparison_mode) )), } } @@ -253,6 +262,7 @@ impl<'a> MoveTestAdapter<'a> for SimpleVMTestAdapter<'a> { .map(|a| MoveValue::Signer(*a).simple_serialize().unwrap()) .chain(args) .collect(); + let verbose = extra_args.verbose; let serialized_return_values = self .perform_session_action( gas_budget, @@ -261,10 +271,10 @@ impl<'a> MoveTestAdapter<'a> for SimpleVMTestAdapter<'a> { }, VMConfig::from(extra_args), ) - .map_err(|e| { + .map_err(|vm_error| { anyhow!( "Script execution failed with VMError: {}", - format_vm_error(&e, self.comparison_mode) + vm_error.format_test_output(move_test_debug() || verbose, self.comparison_mode) ) })?; Ok((None, serialized_return_values)) @@ -295,6 +305,7 @@ impl<'a> MoveTestAdapter<'a> for SimpleVMTestAdapter<'a> { .map(|a| MoveValue::Signer(*a).simple_serialize().unwrap()) .chain(args) .collect(); + let verbose = extra_args.verbose; let serialized_return_values = self .perform_session_action( gas_budget, @@ -305,10 +316,10 @@ impl<'a> MoveTestAdapter<'a> for SimpleVMTestAdapter<'a> { }, VMConfig::from(extra_args), ) - .map_err(|e| { + .map_err(|vm_error| { anyhow!( "Function execution failed with VMError: {}", - format_vm_error(&e, self.comparison_mode) + vm_error.format_test_output(move_test_debug() || verbose, self.comparison_mode) ) })?; Ok((None, serialized_return_values)) @@ -329,39 +340,6 @@ impl<'a> MoveTestAdapter<'a> for SimpleVMTestAdapter<'a> { } } -pub fn format_vm_error(e: &VMError, comparison_mode: bool) -> String { - let location_string = match e.location() { - Location::Undefined => "undefined".to_owned(), - Location::Script => "script".to_owned(), - Location::Module(id) => format!("0x{}::{}", id.address().short_str_lossless(), id.name()), - }; - format!( - "{{ - major_status: {major_status:?}, - sub_status: {sub_status:?}, - location: {location_string}, - indices: {indices}, - offsets: {offsets}, -}}", - major_status = e.major_status(), - sub_status = e.sub_status(), - location_string = location_string, - // TODO maybe include source map info? - indices = if comparison_mode { - // During comparison testing, abstract this data. - "redacted".to_string() - } else { - format!("{:?}", e.indices()) - }, - offsets = if comparison_mode { - // During comparison testing, abstract this data. - "redacted".to_string() - } else { - format!("{:?}", e.offsets()) - }, - ) -} - impl<'a> SimpleVMTestAdapter<'a> { fn perform_session_action( &mut self, diff --git a/third_party/move/tools/move-cli/Cargo.toml b/third_party/move/tools/move-cli/Cargo.toml index 1d4d3b91e2f90..df31e9ea0a1ac 100644 --- a/third_party/move/tools/move-cli/Cargo.toml +++ b/third_party/move/tools/move-cli/Cargo.toml @@ -11,6 +11,7 @@ edition = "2021" [dependencies] anyhow = "1.0.52" +bytes = "1.4.0" clap = { version = "4.3.9", features = ["derive"] } codespan-reporting = "0.11.1" colored = "2.0.0" @@ -43,7 +44,7 @@ move-ir-types = { path = "../../move-ir/types" } move-package = { path = "../move-package" } move-prover = { path = "../../move-prover" } move-resource-viewer = { path = "../move-resource-viewer" } -move-stdlib = { path = "../../move-stdlib", features = ["testing"] } +move-stdlib = { path = "../../move-stdlib" } move-symbol-pool = { path = "../../move-symbol-pool" } move-table-extension = { path = "../../extensions/move-table-extension", optional = true } move-unit-test = { path = "../move-unit-test" } diff --git a/third_party/move/tools/move-cli/src/base/test.rs b/third_party/move/tools/move-cli/src/base/test.rs index ba5f77545bec3..1539a663de300 100644 --- a/third_party/move/tools/move-cli/src/base/test.rs +++ b/third_party/move/tools/move-cli/src/base/test.rs @@ -18,6 +18,7 @@ use move_package::{ BuildConfig, CompilerConfig, }; use move_unit_test::UnitTestingConfig; +use move_vm_runtime::tracing::{LOGGING_FILE_WRITER, TRACING_ENABLED}; use move_vm_test_utils::gas_schedule::CostTable; // if unix #[cfg(target_family = "unix")] @@ -268,6 +269,10 @@ pub fn run_move_unit_tests( // Compute the coverage map. This will be used by other commands after this. if compute_coverage && !no_tests { + if *TRACING_ENABLED { + let buf_writer = &mut *LOGGING_FILE_WRITER.lock().unwrap(); + buf_writer.flush().unwrap(); + } let coverage_map = CoverageMap::from_trace_file(trace_path); output_map_to_file(coverage_map_path, &coverage_map).unwrap(); } diff --git a/third_party/move/tools/move-cli/src/sandbox/commands/publish.rs b/third_party/move/tools/move-cli/src/sandbox/commands/publish.rs index 80813761abe5c..d96e55a0c0129 100644 --- a/third_party/move/tools/move-cli/src/sandbox/commands/publish.rs +++ b/third_party/move/tools/move-cli/src/sandbox/commands/publish.rs @@ -170,7 +170,7 @@ pub fn publish( for unit in modules_to_publish { let id = module(&unit.unit)?.self_id(); let module_bytes = unit.unit.serialize(bytecode_version); - serialized_modules.push((id, module_bytes)); + serialized_modules.push((id, module_bytes.into())); } state.save_modules(&serialized_modules)?; } diff --git a/third_party/move/tools/move-cli/src/sandbox/commands/test.rs b/third_party/move/tools/move-cli/src/sandbox/commands/test.rs index 81dc118e90bbc..219dfb3153648 100644 --- a/third_party/move/tools/move-cli/src/sandbox/commands/test.rs +++ b/third_party/move/tools/move-cli/src/sandbox/commands/test.rs @@ -46,6 +46,10 @@ pub const TEST_ARGS_FILENAME: &str = "args.txt"; /// enabled in the move VM. const MOVE_VM_TRACING_ENV_VAR_NAME: &str = "MOVE_VM_TRACE"; +/// Name of the environment variable we need to set in order to flush +/// after every trace call. +const MOVE_VM_TRACING_FLUSH_ENV_VAR_NAME: &str = "MOVE_VM_TRACE_FLUSH"; + /// The default file name (inside the build output dir) for the runtime to /// dump the execution trace to. The trace will be used by the coverage tool /// if --track-cov is set. If --track-cov is not set, then no trace file will @@ -266,7 +270,10 @@ pub fn run_one( // then, when running , coverage will not be tracked nor printed env::remove_var(MOVE_VM_TRACING_ENV_VAR_NAME); }, - Some(path) => env::set_var(MOVE_VM_TRACING_ENV_VAR_NAME, path.as_os_str()), + Some(path) => { + env::set_var(MOVE_VM_TRACING_ENV_VAR_NAME, path.as_os_str()); + env::set_var(MOVE_VM_TRACING_FLUSH_ENV_VAR_NAME, path.as_os_str()); + }, } let cmd_output = cli_command_template().args(args_iter).output()?; diff --git a/third_party/move/tools/move-cli/src/sandbox/utils/on_disk_state_view.rs b/third_party/move/tools/move-cli/src/sandbox/utils/on_disk_state_view.rs index 2fa68c44c2842..6fc4341cedbc7 100644 --- a/third_party/move/tools/move-cli/src/sandbox/utils/on_disk_state_view.rs +++ b/third_party/move/tools/move-cli/src/sandbox/utils/on_disk_state_view.rs @@ -4,6 +4,7 @@ use crate::{BCS_EXTENSION, DEFAULT_BUILD_DIR, DEFAULT_STORAGE_DIR}; use anyhow::{anyhow, bail, Result}; +use bytes::Bytes; use move_binary_format::{ access::ModuleAccess, binary_views::BinaryIndexedView, @@ -133,12 +134,12 @@ impl OnDiskStateView { &self, addr: AccountAddress, tag: StructTag, - ) -> Result>> { + ) -> Result> { Self::get_bytes(&self.get_resource_path(addr, tag)) } /// Read the resource bytes stored on-disk at `addr`/`tag` - fn get_module_bytes(&self, module_id: &ModuleId) -> Result>> { + fn get_module_bytes(&self, module_id: &ModuleId) -> Result> { Self::get_bytes(&self.get_module_path(module_id)) } @@ -162,9 +163,9 @@ impl OnDiskStateView { } } - fn get_bytes(path: &Path) -> Result>> { + fn get_bytes(path: &Path) -> Result> { Ok(if path.exists() { - Some(fs::read(path)?) + Some(fs::read(path)?.into()) } else { None }) @@ -281,7 +282,7 @@ impl OnDiskStateView { /// Save all the modules in the local cache, re-generate mv_interfaces if required. pub fn save_modules<'a>( &self, - modules: impl IntoIterator)>, + modules: impl IntoIterator, ) -> Result<()> { for (module_id, module_bytes) in modules { self.save_module(module_id, module_bytes)?; @@ -338,7 +339,7 @@ impl ModuleResolver for OnDiskStateView { vec![] } - fn get_module(&self, module_id: &ModuleId) -> Result>, anyhow::Error> { + fn get_module(&self, module_id: &ModuleId) -> Result, anyhow::Error> { self.get_module_bytes(module_id) } } @@ -349,7 +350,7 @@ impl ResourceResolver for OnDiskStateView { address: &AccountAddress, struct_tag: &StructTag, _metadata: &[Metadata], - ) -> Result<(Option>, usize)> { + ) -> Result<(Option, usize)> { let buf = self.get_resource_bytes(*address, struct_tag.clone())?; let buf_size = resource_size(&buf); Ok((buf, buf_size)) diff --git a/third_party/move/tools/move-cli/src/sandbox/utils/package_context.rs b/third_party/move/tools/move-cli/src/sandbox/utils/package_context.rs index 98e0c72c1b3fb..644114bf69bda 100644 --- a/third_party/move/tools/move-cli/src/sandbox/utils/package_context.rs +++ b/third_party/move/tools/move-cli/src/sandbox/utils/package_context.rs @@ -58,7 +58,7 @@ impl PackageContext { let self_id = module.self_id(); let mut module_bytes = vec![]; module.serialize_for_version(bytecode_version, &mut module_bytes)?; - serialized_modules.push((self_id, module_bytes)); + serialized_modules.push((self_id, module_bytes.into())); } state.save_modules(&serialized_modules)?; diff --git a/third_party/move/tools/move-cli/tests/sandbox_tests/multi_module_publish/args.exp b/third_party/move/tools/move-cli/tests/sandbox_tests/multi_module_publish/args.exp index 22638bb3d499f..e26a988eea435 100644 --- a/third_party/move/tools/move-cli/tests/sandbox_tests/multi_module_publish/args.exp +++ b/third_party/move/tools/move-cli/tests/sandbox_tests/multi_module_publish/args.exp @@ -1,12 +1,12 @@ Command `sandbox publish --bundle --override-ordering A -v`: Found 2 modules -Invalid multi-module publishing: VMError with status LINKER_ERROR at location UNDEFINED and message Cannot find ModuleId { address: 0000000000000000000000000000000000000000000000000000000000000002, name: Identifier("B") } in data cache +Invalid multi-module publishing: VMError with status LINKER_ERROR at location UNDEFINED and message Linker Error: Cannot find ModuleId { address: 0000000000000000000000000000000000000000000000000000000000000002, name: Identifier("B") } in data cache Command `sandbox publish --bundle --override-ordering B -v`: Found 2 modules -Invalid multi-module publishing: VMError with status LINKER_ERROR at location UNDEFINED and message Cannot find ModuleId { address: 0000000000000000000000000000000000000000000000000000000000000002, name: Identifier("A") } in data cache +Invalid multi-module publishing: VMError with status LINKER_ERROR at location UNDEFINED and message Linker Error: Cannot find ModuleId { address: 0000000000000000000000000000000000000000000000000000000000000002, name: Identifier("A") } in data cache Command `sandbox publish --bundle --override-ordering B --override-ordering A -v`: Found 2 modules -Invalid multi-module publishing: VMError with status LINKER_ERROR at location UNDEFINED and message Cannot find ModuleId { address: 0000000000000000000000000000000000000000000000000000000000000002, name: Identifier("A") } in data cache +Invalid multi-module publishing: VMError with status LINKER_ERROR at location UNDEFINED and message Linker Error: Cannot find ModuleId { address: 0000000000000000000000000000000000000000000000000000000000000002, name: Identifier("A") } in data cache Command `sandbox publish --bundle --override-ordering A --override-ordering B -v`: Found 2 modules Publishing a new module 0000000000000000000000000000000000000000000000000000000000000002::A (wrote 121 bytes) diff --git a/third_party/move/tools/move-coverage/src/coverage_map.rs b/third_party/move/tools/move-coverage/src/coverage_map.rs index 9bb3345447d79..84f3417edd099 100644 --- a/third_party/move/tools/move-coverage/src/coverage_map.rs +++ b/third_party/move/tools/move-coverage/src/coverage_map.rs @@ -68,7 +68,8 @@ impl CoverageMap { for line in BufReader::new(file).lines() { let line = line.unwrap(); let mut splits = line.split(','); - let exec_id = splits.next().unwrap(); + // Use a dummy key so that the data structure of the coverage map does not need to be changed + let exec_id = "dummy_exec_id"; let context = splits.next().unwrap(); let pc = splits.next().unwrap().parse::().unwrap(); @@ -277,7 +278,8 @@ impl TraceMap { for line in BufReader::new(file).lines() { let line = line.unwrap(); let mut splits = line.split(','); - let exec_id = splits.next().unwrap(); + // Use a dummy key so that the data structure of the coverage map does not need to be changed + let exec_id = "dummy_exec_id"; let context = splits.next().unwrap(); let pc = splits.next().unwrap().parse::().unwrap(); diff --git a/third_party/move/tools/move-resource-viewer/src/lib.rs b/third_party/move/tools/move-resource-viewer/src/lib.rs index 98e79c18021d6..7600466390f73 100644 --- a/third_party/move/tools/move-resource-viewer/src/lib.rs +++ b/third_party/move/tools/move-resource-viewer/src/lib.rs @@ -92,7 +92,11 @@ impl<'a, T: MoveResolver + ?Sized> MoveValueAnnotator<'a, T> { // TODO pub fn get_resource_bytes(&self, addr: &AccountAddress, tag: &StructTag) -> Option> { - self.cache.state.get_resource(addr, tag).ok()? + self.cache + .state + .get_resource(addr, tag) + .ok()? + .map(|b| b.to_vec()) } pub fn get_module(&self, module: &ModuleId) -> Result> { diff --git a/third_party/move/tools/move-unit-test/Cargo.toml b/third_party/move/tools/move-unit-test/Cargo.toml index 3f54f52d4f463..c09e5c8bc4c6a 100644 --- a/third_party/move/tools/move-unit-test/Cargo.toml +++ b/third_party/move/tools/move-unit-test/Cargo.toml @@ -39,7 +39,7 @@ move-vm-types = { path = "../../move-vm/types" } # EVM-specific dependencies move-to-yul = { path = "../../evm/move-to-yul", optional = true } -evm = { version = "0.33.1", optional = true } +evm = { version = "0.36.0", optional = true } primitive-types = { version = "0.10.1", optional = true } [dev-dependencies] diff --git a/third_party/move/tools/move-unit-test/src/test_reporter.rs b/third_party/move/tools/move-unit-test/src/test_reporter.rs index 52918c1a6f82a..9bb785cd00436 100644 --- a/third_party/move/tools/move-unit-test/src/test_reporter.rs +++ b/third_party/move/tools/move-unit-test/src/test_reporter.rs @@ -9,7 +9,7 @@ use move_binary_format::{ access::ModuleAccess, errors::{ExecutionState, Location, VMError, VMResult}, }; -use move_command_line_common::files::FileHash; +use move_command_line_common::{env::read_bool_env_var, files::FileHash}; pub use move_compiler::unit_test::ExpectedMoveError as MoveError; use move_compiler::{ diagnostics::{self, Diagnostic, Diagnostics}, @@ -18,6 +18,7 @@ use move_compiler::{ use move_core_types::{effects::ChangeSet, language_storage::ModuleId, vm_status::StatusType}; use move_ir_types::location::Loc; use move_symbol_pool::Symbol; +use once_cell::sync::Lazy; use std::{ collections::{BTreeMap, BTreeSet, HashMap}, io::{Result, Write}, @@ -344,7 +345,7 @@ impl TestFailure { Some(vm_error) => vm_error, }; - let diags = match vm_error.location() { + let mut diags = match vm_error.location() { Location::Module(module_id) => { let diag_opt = vm_error.offsets().first().and_then(|(fdef_idx, offset)| { let function_source_map = test_plan @@ -375,6 +376,12 @@ impl TestFailure { _ => base_message, }; + static MOVE_TEST_DEBUG: Lazy = Lazy::new(|| read_bool_env_var("MOVE_TEST_DEBUG")); + if *MOVE_TEST_DEBUG { + let full_vm_error_description = vm_error.format_test_output(*MOVE_TEST_DEBUG, false); + diags = diags + &full_vm_error_description; + } + match vm_error.exec_state() { None => diags, Some(exec_state) => { diff --git a/third_party/move/tools/move-unit-test/tests/test_sources/native_signer_creation.exp b/third_party/move/tools/move-unit-test/tests/test_sources/native_signer_creation.exp index c0a93f80e49b8..e44b7bd4cd3b5 100644 --- a/third_party/move/tools/move-unit-test/tests/test_sources/native_signer_creation.exp +++ b/third_party/move/tools/move-unit-test/tests/test_sources/native_signer_creation.exp @@ -5,9 +5,9 @@ Running Move unit tests 0x1::M::test_determinisim Output: Ok(Changes { accounts: {} }) 0x1::M::test_doesnt_exist -Output: Ok(Changes { accounts: {0000000000000000000000000000000000000000000000000000000000000000: AccountChanges { modules: {}, resources: {StructTag { address: 0000000000000000000000000000000000000000000000000000000000000001, module: Identifier("M"), name: Identifier("A"), type_params: [] }: New([0])} }, 0100000000000000000000000000000000000000000000000000000000000000: AccountChanges { modules: {}, resources: {StructTag { address: 0000000000000000000000000000000000000000000000000000000000000001, module: Identifier("M"), name: Identifier("A"), type_params: [] }: New([0])} }, 0200000000000000000000000000000000000000000000000000000000000000: AccountChanges { modules: {}, resources: {StructTag { address: 0000000000000000000000000000000000000000000000000000000000000001, module: Identifier("M"), name: Identifier("A"), type_params: [] }: New([0])} }, 0300000000000000000000000000000000000000000000000000000000000000: AccountChanges { modules: {}, resources: {StructTag { address: 0000000000000000000000000000000000000000000000000000000000000001, module: Identifier("M"), name: Identifier("A"), type_params: [] }: New([0])} }, 0400000000000000000000000000000000000000000000000000000000000000: AccountChanges { modules: {}, resources: {StructTag { address: 0000000000000000000000000000000000000000000000000000000000000001, module: Identifier("M"), name: Identifier("A"), type_params: [] }: New([0])} }, 0500000000000000000000000000000000000000000000000000000000000000: AccountChanges { modules: {}, resources: {StructTag { address: 0000000000000000000000000000000000000000000000000000000000000001, module: Identifier("M"), name: Identifier("A"), type_params: [] }: New([0])} }, 0600000000000000000000000000000000000000000000000000000000000000: AccountChanges { modules: {}, resources: {StructTag { address: 0000000000000000000000000000000000000000000000000000000000000001, module: Identifier("M"), name: Identifier("A"), type_params: [] }: New([0])} }, 0700000000000000000000000000000000000000000000000000000000000000: AccountChanges { modules: {}, resources: {StructTag { address: 0000000000000000000000000000000000000000000000000000000000000001, module: Identifier("M"), name: Identifier("A"), type_params: [] }: New([0])} }, 0800000000000000000000000000000000000000000000000000000000000000: AccountChanges { modules: {}, resources: {StructTag { address: 0000000000000000000000000000000000000000000000000000000000000001, module: Identifier("M"), name: Identifier("A"), type_params: [] }: New([0])} }, 0900000000000000000000000000000000000000000000000000000000000000: AccountChanges { modules: {}, resources: {StructTag { address: 0000000000000000000000000000000000000000000000000000000000000001, module: Identifier("M"), name: Identifier("A"), type_params: [] }: New([0])} }} }) +Output: Ok(Changes { accounts: {0000000000000000000000000000000000000000000000000000000000000000: AccountChanges { modules: {}, resources: {StructTag { address: 0000000000000000000000000000000000000000000000000000000000000001, module: Identifier("M"), name: Identifier("A"), type_params: [] }: New(b"\0")} }, 0100000000000000000000000000000000000000000000000000000000000000: AccountChanges { modules: {}, resources: {StructTag { address: 0000000000000000000000000000000000000000000000000000000000000001, module: Identifier("M"), name: Identifier("A"), type_params: [] }: New(b"\0")} }, 0200000000000000000000000000000000000000000000000000000000000000: AccountChanges { modules: {}, resources: {StructTag { address: 0000000000000000000000000000000000000000000000000000000000000001, module: Identifier("M"), name: Identifier("A"), type_params: [] }: New(b"\0")} }, 0300000000000000000000000000000000000000000000000000000000000000: AccountChanges { modules: {}, resources: {StructTag { address: 0000000000000000000000000000000000000000000000000000000000000001, module: Identifier("M"), name: Identifier("A"), type_params: [] }: New(b"\0")} }, 0400000000000000000000000000000000000000000000000000000000000000: AccountChanges { modules: {}, resources: {StructTag { address: 0000000000000000000000000000000000000000000000000000000000000001, module: Identifier("M"), name: Identifier("A"), type_params: [] }: New(b"\0")} }, 0500000000000000000000000000000000000000000000000000000000000000: AccountChanges { modules: {}, resources: {StructTag { address: 0000000000000000000000000000000000000000000000000000000000000001, module: Identifier("M"), name: Identifier("A"), type_params: [] }: New(b"\0")} }, 0600000000000000000000000000000000000000000000000000000000000000: AccountChanges { modules: {}, resources: {StructTag { address: 0000000000000000000000000000000000000000000000000000000000000001, module: Identifier("M"), name: Identifier("A"), type_params: [] }: New(b"\0")} }, 0700000000000000000000000000000000000000000000000000000000000000: AccountChanges { modules: {}, resources: {StructTag { address: 0000000000000000000000000000000000000000000000000000000000000001, module: Identifier("M"), name: Identifier("A"), type_params: [] }: New(b"\0")} }, 0800000000000000000000000000000000000000000000000000000000000000: AccountChanges { modules: {}, resources: {StructTag { address: 0000000000000000000000000000000000000000000000000000000000000001, module: Identifier("M"), name: Identifier("A"), type_params: [] }: New(b"\0")} }, 0900000000000000000000000000000000000000000000000000000000000000: AccountChanges { modules: {}, resources: {StructTag { address: 0000000000000000000000000000000000000000000000000000000000000001, module: Identifier("M"), name: Identifier("A"), type_params: [] }: New(b"\0")} }} }) 0x1::M::test_exists -Output: Ok(Changes { accounts: {0000000000000000000000000000000000000000000000000000000000000000: AccountChanges { modules: {}, resources: {StructTag { address: 0000000000000000000000000000000000000000000000000000000000000001, module: Identifier("M"), name: Identifier("A"), type_params: [] }: New([0])} }, 0100000000000000000000000000000000000000000000000000000000000000: AccountChanges { modules: {}, resources: {StructTag { address: 0000000000000000000000000000000000000000000000000000000000000001, module: Identifier("M"), name: Identifier("A"), type_params: [] }: New([0])} }, 0200000000000000000000000000000000000000000000000000000000000000: AccountChanges { modules: {}, resources: {StructTag { address: 0000000000000000000000000000000000000000000000000000000000000001, module: Identifier("M"), name: Identifier("A"), type_params: [] }: New([0])} }, 0300000000000000000000000000000000000000000000000000000000000000: AccountChanges { modules: {}, resources: {StructTag { address: 0000000000000000000000000000000000000000000000000000000000000001, module: Identifier("M"), name: Identifier("A"), type_params: [] }: New([0])} }, 0400000000000000000000000000000000000000000000000000000000000000: AccountChanges { modules: {}, resources: {StructTag { address: 0000000000000000000000000000000000000000000000000000000000000001, module: Identifier("M"), name: Identifier("A"), type_params: [] }: New([0])} }, 0500000000000000000000000000000000000000000000000000000000000000: AccountChanges { modules: {}, resources: {StructTag { address: 0000000000000000000000000000000000000000000000000000000000000001, module: Identifier("M"), name: Identifier("A"), type_params: [] }: New([0])} }, 0600000000000000000000000000000000000000000000000000000000000000: AccountChanges { modules: {}, resources: {StructTag { address: 0000000000000000000000000000000000000000000000000000000000000001, module: Identifier("M"), name: Identifier("A"), type_params: [] }: New([0])} }, 0700000000000000000000000000000000000000000000000000000000000000: AccountChanges { modules: {}, resources: {StructTag { address: 0000000000000000000000000000000000000000000000000000000000000001, module: Identifier("M"), name: Identifier("A"), type_params: [] }: New([0])} }, 0800000000000000000000000000000000000000000000000000000000000000: AccountChanges { modules: {}, resources: {StructTag { address: 0000000000000000000000000000000000000000000000000000000000000001, module: Identifier("M"), name: Identifier("A"), type_params: [] }: New([0])} }, 0900000000000000000000000000000000000000000000000000000000000000: AccountChanges { modules: {}, resources: {StructTag { address: 0000000000000000000000000000000000000000000000000000000000000001, module: Identifier("M"), name: Identifier("A"), type_params: [] }: New([0])} }} }) +Output: Ok(Changes { accounts: {0000000000000000000000000000000000000000000000000000000000000000: AccountChanges { modules: {}, resources: {StructTag { address: 0000000000000000000000000000000000000000000000000000000000000001, module: Identifier("M"), name: Identifier("A"), type_params: [] }: New(b"\0")} }, 0100000000000000000000000000000000000000000000000000000000000000: AccountChanges { modules: {}, resources: {StructTag { address: 0000000000000000000000000000000000000000000000000000000000000001, module: Identifier("M"), name: Identifier("A"), type_params: [] }: New(b"\0")} }, 0200000000000000000000000000000000000000000000000000000000000000: AccountChanges { modules: {}, resources: {StructTag { address: 0000000000000000000000000000000000000000000000000000000000000001, module: Identifier("M"), name: Identifier("A"), type_params: [] }: New(b"\0")} }, 0300000000000000000000000000000000000000000000000000000000000000: AccountChanges { modules: {}, resources: {StructTag { address: 0000000000000000000000000000000000000000000000000000000000000001, module: Identifier("M"), name: Identifier("A"), type_params: [] }: New(b"\0")} }, 0400000000000000000000000000000000000000000000000000000000000000: AccountChanges { modules: {}, resources: {StructTag { address: 0000000000000000000000000000000000000000000000000000000000000001, module: Identifier("M"), name: Identifier("A"), type_params: [] }: New(b"\0")} }, 0500000000000000000000000000000000000000000000000000000000000000: AccountChanges { modules: {}, resources: {StructTag { address: 0000000000000000000000000000000000000000000000000000000000000001, module: Identifier("M"), name: Identifier("A"), type_params: [] }: New(b"\0")} }, 0600000000000000000000000000000000000000000000000000000000000000: AccountChanges { modules: {}, resources: {StructTag { address: 0000000000000000000000000000000000000000000000000000000000000001, module: Identifier("M"), name: Identifier("A"), type_params: [] }: New(b"\0")} }, 0700000000000000000000000000000000000000000000000000000000000000: AccountChanges { modules: {}, resources: {StructTag { address: 0000000000000000000000000000000000000000000000000000000000000001, module: Identifier("M"), name: Identifier("A"), type_params: [] }: New(b"\0")} }, 0800000000000000000000000000000000000000000000000000000000000000: AccountChanges { modules: {}, resources: {StructTag { address: 0000000000000000000000000000000000000000000000000000000000000001, module: Identifier("M"), name: Identifier("A"), type_params: [] }: New(b"\0")} }, 0900000000000000000000000000000000000000000000000000000000000000: AccountChanges { modules: {}, resources: {StructTag { address: 0000000000000000000000000000000000000000000000000000000000000001, module: Identifier("M"), name: Identifier("A"), type_params: [] }: New(b"\0")} }} }) Test failures: diff --git a/third_party/move/tools/move-unit-test/tests/test_sources/native_signer_creation.storage.exp b/third_party/move/tools/move-unit-test/tests/test_sources/native_signer_creation.storage.exp index 3c8f2c820b090..1cbe031ef2d99 100644 --- a/third_party/move/tools/move-unit-test/tests/test_sources/native_signer_creation.storage.exp +++ b/third_party/move/tools/move-unit-test/tests/test_sources/native_signer_creation.storage.exp @@ -5,9 +5,9 @@ Running Move unit tests 0x1::M::test_determinisim Output: Ok(Changes { accounts: {} }) 0x1::M::test_doesnt_exist -Output: Ok(Changes { accounts: {0000000000000000000000000000000000000000000000000000000000000000: AccountChanges { modules: {}, resources: {StructTag { address: 0000000000000000000000000000000000000000000000000000000000000001, module: Identifier("M"), name: Identifier("A"), type_params: [] }: New([0])} }, 0100000000000000000000000000000000000000000000000000000000000000: AccountChanges { modules: {}, resources: {StructTag { address: 0000000000000000000000000000000000000000000000000000000000000001, module: Identifier("M"), name: Identifier("A"), type_params: [] }: New([0])} }, 0200000000000000000000000000000000000000000000000000000000000000: AccountChanges { modules: {}, resources: {StructTag { address: 0000000000000000000000000000000000000000000000000000000000000001, module: Identifier("M"), name: Identifier("A"), type_params: [] }: New([0])} }, 0300000000000000000000000000000000000000000000000000000000000000: AccountChanges { modules: {}, resources: {StructTag { address: 0000000000000000000000000000000000000000000000000000000000000001, module: Identifier("M"), name: Identifier("A"), type_params: [] }: New([0])} }, 0400000000000000000000000000000000000000000000000000000000000000: AccountChanges { modules: {}, resources: {StructTag { address: 0000000000000000000000000000000000000000000000000000000000000001, module: Identifier("M"), name: Identifier("A"), type_params: [] }: New([0])} }, 0500000000000000000000000000000000000000000000000000000000000000: AccountChanges { modules: {}, resources: {StructTag { address: 0000000000000000000000000000000000000000000000000000000000000001, module: Identifier("M"), name: Identifier("A"), type_params: [] }: New([0])} }, 0600000000000000000000000000000000000000000000000000000000000000: AccountChanges { modules: {}, resources: {StructTag { address: 0000000000000000000000000000000000000000000000000000000000000001, module: Identifier("M"), name: Identifier("A"), type_params: [] }: New([0])} }, 0700000000000000000000000000000000000000000000000000000000000000: AccountChanges { modules: {}, resources: {StructTag { address: 0000000000000000000000000000000000000000000000000000000000000001, module: Identifier("M"), name: Identifier("A"), type_params: [] }: New([0])} }, 0800000000000000000000000000000000000000000000000000000000000000: AccountChanges { modules: {}, resources: {StructTag { address: 0000000000000000000000000000000000000000000000000000000000000001, module: Identifier("M"), name: Identifier("A"), type_params: [] }: New([0])} }, 0900000000000000000000000000000000000000000000000000000000000000: AccountChanges { modules: {}, resources: {StructTag { address: 0000000000000000000000000000000000000000000000000000000000000001, module: Identifier("M"), name: Identifier("A"), type_params: [] }: New([0])} }} }) +Output: Ok(Changes { accounts: {0000000000000000000000000000000000000000000000000000000000000000: AccountChanges { modules: {}, resources: {StructTag { address: 0000000000000000000000000000000000000000000000000000000000000001, module: Identifier("M"), name: Identifier("A"), type_params: [] }: New(b"\0")} }, 0100000000000000000000000000000000000000000000000000000000000000: AccountChanges { modules: {}, resources: {StructTag { address: 0000000000000000000000000000000000000000000000000000000000000001, module: Identifier("M"), name: Identifier("A"), type_params: [] }: New(b"\0")} }, 0200000000000000000000000000000000000000000000000000000000000000: AccountChanges { modules: {}, resources: {StructTag { address: 0000000000000000000000000000000000000000000000000000000000000001, module: Identifier("M"), name: Identifier("A"), type_params: [] }: New(b"\0")} }, 0300000000000000000000000000000000000000000000000000000000000000: AccountChanges { modules: {}, resources: {StructTag { address: 0000000000000000000000000000000000000000000000000000000000000001, module: Identifier("M"), name: Identifier("A"), type_params: [] }: New(b"\0")} }, 0400000000000000000000000000000000000000000000000000000000000000: AccountChanges { modules: {}, resources: {StructTag { address: 0000000000000000000000000000000000000000000000000000000000000001, module: Identifier("M"), name: Identifier("A"), type_params: [] }: New(b"\0")} }, 0500000000000000000000000000000000000000000000000000000000000000: AccountChanges { modules: {}, resources: {StructTag { address: 0000000000000000000000000000000000000000000000000000000000000001, module: Identifier("M"), name: Identifier("A"), type_params: [] }: New(b"\0")} }, 0600000000000000000000000000000000000000000000000000000000000000: AccountChanges { modules: {}, resources: {StructTag { address: 0000000000000000000000000000000000000000000000000000000000000001, module: Identifier("M"), name: Identifier("A"), type_params: [] }: New(b"\0")} }, 0700000000000000000000000000000000000000000000000000000000000000: AccountChanges { modules: {}, resources: {StructTag { address: 0000000000000000000000000000000000000000000000000000000000000001, module: Identifier("M"), name: Identifier("A"), type_params: [] }: New(b"\0")} }, 0800000000000000000000000000000000000000000000000000000000000000: AccountChanges { modules: {}, resources: {StructTag { address: 0000000000000000000000000000000000000000000000000000000000000001, module: Identifier("M"), name: Identifier("A"), type_params: [] }: New(b"\0")} }, 0900000000000000000000000000000000000000000000000000000000000000: AccountChanges { modules: {}, resources: {StructTag { address: 0000000000000000000000000000000000000000000000000000000000000001, module: Identifier("M"), name: Identifier("A"), type_params: [] }: New(b"\0")} }} }) 0x1::M::test_exists -Output: Ok(Changes { accounts: {0000000000000000000000000000000000000000000000000000000000000000: AccountChanges { modules: {}, resources: {StructTag { address: 0000000000000000000000000000000000000000000000000000000000000001, module: Identifier("M"), name: Identifier("A"), type_params: [] }: New([0])} }, 0100000000000000000000000000000000000000000000000000000000000000: AccountChanges { modules: {}, resources: {StructTag { address: 0000000000000000000000000000000000000000000000000000000000000001, module: Identifier("M"), name: Identifier("A"), type_params: [] }: New([0])} }, 0200000000000000000000000000000000000000000000000000000000000000: AccountChanges { modules: {}, resources: {StructTag { address: 0000000000000000000000000000000000000000000000000000000000000001, module: Identifier("M"), name: Identifier("A"), type_params: [] }: New([0])} }, 0300000000000000000000000000000000000000000000000000000000000000: AccountChanges { modules: {}, resources: {StructTag { address: 0000000000000000000000000000000000000000000000000000000000000001, module: Identifier("M"), name: Identifier("A"), type_params: [] }: New([0])} }, 0400000000000000000000000000000000000000000000000000000000000000: AccountChanges { modules: {}, resources: {StructTag { address: 0000000000000000000000000000000000000000000000000000000000000001, module: Identifier("M"), name: Identifier("A"), type_params: [] }: New([0])} }, 0500000000000000000000000000000000000000000000000000000000000000: AccountChanges { modules: {}, resources: {StructTag { address: 0000000000000000000000000000000000000000000000000000000000000001, module: Identifier("M"), name: Identifier("A"), type_params: [] }: New([0])} }, 0600000000000000000000000000000000000000000000000000000000000000: AccountChanges { modules: {}, resources: {StructTag { address: 0000000000000000000000000000000000000000000000000000000000000001, module: Identifier("M"), name: Identifier("A"), type_params: [] }: New([0])} }, 0700000000000000000000000000000000000000000000000000000000000000: AccountChanges { modules: {}, resources: {StructTag { address: 0000000000000000000000000000000000000000000000000000000000000001, module: Identifier("M"), name: Identifier("A"), type_params: [] }: New([0])} }, 0800000000000000000000000000000000000000000000000000000000000000: AccountChanges { modules: {}, resources: {StructTag { address: 0000000000000000000000000000000000000000000000000000000000000001, module: Identifier("M"), name: Identifier("A"), type_params: [] }: New([0])} }, 0900000000000000000000000000000000000000000000000000000000000000: AccountChanges { modules: {}, resources: {StructTag { address: 0000000000000000000000000000000000000000000000000000000000000001, module: Identifier("M"), name: Identifier("A"), type_params: [] }: New([0])} }} }) +Output: Ok(Changes { accounts: {0000000000000000000000000000000000000000000000000000000000000000: AccountChanges { modules: {}, resources: {StructTag { address: 0000000000000000000000000000000000000000000000000000000000000001, module: Identifier("M"), name: Identifier("A"), type_params: [] }: New(b"\0")} }, 0100000000000000000000000000000000000000000000000000000000000000: AccountChanges { modules: {}, resources: {StructTag { address: 0000000000000000000000000000000000000000000000000000000000000001, module: Identifier("M"), name: Identifier("A"), type_params: [] }: New(b"\0")} }, 0200000000000000000000000000000000000000000000000000000000000000: AccountChanges { modules: {}, resources: {StructTag { address: 0000000000000000000000000000000000000000000000000000000000000001, module: Identifier("M"), name: Identifier("A"), type_params: [] }: New(b"\0")} }, 0300000000000000000000000000000000000000000000000000000000000000: AccountChanges { modules: {}, resources: {StructTag { address: 0000000000000000000000000000000000000000000000000000000000000001, module: Identifier("M"), name: Identifier("A"), type_params: [] }: New(b"\0")} }, 0400000000000000000000000000000000000000000000000000000000000000: AccountChanges { modules: {}, resources: {StructTag { address: 0000000000000000000000000000000000000000000000000000000000000001, module: Identifier("M"), name: Identifier("A"), type_params: [] }: New(b"\0")} }, 0500000000000000000000000000000000000000000000000000000000000000: AccountChanges { modules: {}, resources: {StructTag { address: 0000000000000000000000000000000000000000000000000000000000000001, module: Identifier("M"), name: Identifier("A"), type_params: [] }: New(b"\0")} }, 0600000000000000000000000000000000000000000000000000000000000000: AccountChanges { modules: {}, resources: {StructTag { address: 0000000000000000000000000000000000000000000000000000000000000001, module: Identifier("M"), name: Identifier("A"), type_params: [] }: New(b"\0")} }, 0700000000000000000000000000000000000000000000000000000000000000: AccountChanges { modules: {}, resources: {StructTag { address: 0000000000000000000000000000000000000000000000000000000000000001, module: Identifier("M"), name: Identifier("A"), type_params: [] }: New(b"\0")} }, 0800000000000000000000000000000000000000000000000000000000000000: AccountChanges { modules: {}, resources: {StructTag { address: 0000000000000000000000000000000000000000000000000000000000000001, module: Identifier("M"), name: Identifier("A"), type_params: [] }: New(b"\0")} }, 0900000000000000000000000000000000000000000000000000000000000000: AccountChanges { modules: {}, resources: {StructTag { address: 0000000000000000000000000000000000000000000000000000000000000001, module: Identifier("M"), name: Identifier("A"), type_params: [] }: New(b"\0")} }} }) Test failures: diff --git a/third_party/move/tools/move-unit-test/tests/test_sources/other_expected_failures.exp b/third_party/move/tools/move-unit-test/tests/test_sources/other_expected_failures.exp index 7bb84caf4bd80..544054cf3b1f9 100644 --- a/third_party/move/tools/move-unit-test/tests/test_sources/other_expected_failures.exp +++ b/third_party/move/tools/move-unit-test/tests/test_sources/other_expected_failures.exp @@ -16,5 +16,5 @@ Output: Ok(Changes { accounts: {} }) 0x42::m::t4 Output: Ok(Changes { accounts: {} }) 0x42::m::t5 -Output: Ok(Changes { accounts: {0000000000000000000000000000000000000000000000000000000000000001: AccountChanges { modules: {}, resources: {StructTag { address: 0000000000000000000000000000000000000000000000000000000000000042, module: Identifier("m"), name: Identifier("R"), type_params: [] }: New([0])} }} }) +Output: Ok(Changes { accounts: {0000000000000000000000000000000000000000000000000000000000000001: AccountChanges { modules: {}, resources: {StructTag { address: 0000000000000000000000000000000000000000000000000000000000000042, module: Identifier("m"), name: Identifier("R"), type_params: [] }: New(b"\0")} }} }) Test result: OK. Total tests: 6; passed: 6; failed: 0 diff --git a/third_party/move/tools/move-unit-test/tests/test_sources/proposal_test.exp b/third_party/move/tools/move-unit-test/tests/test_sources/proposal_test.exp index 493d68d15e25c..73e00e05af9fa 100644 --- a/third_party/move/tools/move-unit-test/tests/test_sources/proposal_test.exp +++ b/third_party/move/tools/move-unit-test/tests/test_sources/proposal_test.exp @@ -12,11 +12,11 @@ Output: Ok(Changes { accounts: {} }) 0x1::Module::tests_aborts Output: Ok(Changes { accounts: {} }) 0x1::Module::tests_b -Output: Ok(Changes { accounts: {0000000000000000000000000000000000000000000000000000000000000001: AccountChanges { modules: {}, resources: {StructTag { address: 0000000000000000000000000000000000000000000000000000000000000001, module: Identifier("Module"), name: Identifier("A"), type_params: [] }: New([5, 0, 0, 0, 0, 0, 0, 0])} }, 0000000000000000000000000000000000000000000000000000000000000002: AccountChanges { modules: {}, resources: {StructTag { address: 0000000000000000000000000000000000000000000000000000000000000001, module: Identifier("Module"), name: Identifier("A"), type_params: [] }: New([5, 0, 0, 0, 0, 0, 0, 0])} }} }) +Output: Ok(Changes { accounts: {0000000000000000000000000000000000000000000000000000000000000001: AccountChanges { modules: {}, resources: {StructTag { address: 0000000000000000000000000000000000000000000000000000000000000001, module: Identifier("Module"), name: Identifier("A"), type_params: [] }: New(b"\x05\0\0\0\0\0\0\0")} }, 0000000000000000000000000000000000000000000000000000000000000002: AccountChanges { modules: {}, resources: {StructTag { address: 0000000000000000000000000000000000000000000000000000000000000001, module: Identifier("Module"), name: Identifier("A"), type_params: [] }: New(b"\x05\0\0\0\0\0\0\0")} }} }) 0x1::Module::tests_c -Output: Ok(Changes { accounts: {0000000000000000000000000000000000000000000000000000000000000001: AccountChanges { modules: {}, resources: {StructTag { address: 0000000000000000000000000000000000000000000000000000000000000001, module: Identifier("Module"), name: Identifier("A"), type_params: [] }: New([5, 0, 0, 0, 0, 0, 0, 0])} }, 0000000000000000000000000000000000000000000000000000000000000002: AccountChanges { modules: {}, resources: {StructTag { address: 0000000000000000000000000000000000000000000000000000000000000001, module: Identifier("Module"), name: Identifier("A"), type_params: [] }: New([6, 0, 0, 0, 0, 0, 0, 0])} }} }) +Output: Ok(Changes { accounts: {0000000000000000000000000000000000000000000000000000000000000001: AccountChanges { modules: {}, resources: {StructTag { address: 0000000000000000000000000000000000000000000000000000000000000001, module: Identifier("Module"), name: Identifier("A"), type_params: [] }: New(b"\x05\0\0\0\0\0\0\0")} }, 0000000000000000000000000000000000000000000000000000000000000002: AccountChanges { modules: {}, resources: {StructTag { address: 0000000000000000000000000000000000000000000000000000000000000001, module: Identifier("Module"), name: Identifier("A"), type_params: [] }: New(b"\x06\0\0\0\0\0\0\0")} }} }) 0x1::Module::tests_d -Output: Ok(Changes { accounts: {0000000000000000000000000000000000000000000000000000000000000001: AccountChanges { modules: {}, resources: {StructTag { address: 0000000000000000000000000000000000000000000000000000000000000001, module: Identifier("Module"), name: Identifier("B"), type_params: [U64] }: New([5, 0, 0, 0, 0, 0, 0, 0])} }, 0000000000000000000000000000000000000000000000000000000000000002: AccountChanges { modules: {}, resources: {StructTag { address: 0000000000000000000000000000000000000000000000000000000000000001, module: Identifier("Module"), name: Identifier("B"), type_params: [Bool] }: New([6, 0, 0, 0, 0, 0, 0, 0]), StructTag { address: 0000000000000000000000000000000000000000000000000000000000000001, module: Identifier("Module"), name: Identifier("B"), type_params: [U64] }: New([5, 0, 0, 0, 0, 0, 0, 0]), StructTag { address: 0000000000000000000000000000000000000000000000000000000000000001, module: Identifier("Module"), name: Identifier("B"), type_params: [Struct(StructTag { address: 0000000000000000000000000000000000000000000000000000000000000001, module: Identifier("Module"), name: Identifier("C"), type_params: [U64] })] }: New([5, 0, 0, 0, 0, 0, 0, 0])} }} }) +Output: Ok(Changes { accounts: {0000000000000000000000000000000000000000000000000000000000000001: AccountChanges { modules: {}, resources: {StructTag { address: 0000000000000000000000000000000000000000000000000000000000000001, module: Identifier("Module"), name: Identifier("B"), type_params: [U64] }: New(b"\x05\0\0\0\0\0\0\0")} }, 0000000000000000000000000000000000000000000000000000000000000002: AccountChanges { modules: {}, resources: {StructTag { address: 0000000000000000000000000000000000000000000000000000000000000001, module: Identifier("Module"), name: Identifier("B"), type_params: [Bool] }: New(b"\x06\0\0\0\0\0\0\0"), StructTag { address: 0000000000000000000000000000000000000000000000000000000000000001, module: Identifier("Module"), name: Identifier("B"), type_params: [U64] }: New(b"\x05\0\0\0\0\0\0\0"), StructTag { address: 0000000000000000000000000000000000000000000000000000000000000001, module: Identifier("Module"), name: Identifier("B"), type_params: [Struct(StructTag { address: 0000000000000000000000000000000000000000000000000000000000000001, module: Identifier("Module"), name: Identifier("C"), type_params: [U64] })] }: New(b"\x05\0\0\0\0\0\0\0")} }} }) Test failures: diff --git a/third_party/move/tools/move-unit-test/tests/test_sources/proposal_test.storage.exp b/third_party/move/tools/move-unit-test/tests/test_sources/proposal_test.storage.exp index 86e62bf2749c1..8f76629c838bb 100644 --- a/third_party/move/tools/move-unit-test/tests/test_sources/proposal_test.storage.exp +++ b/third_party/move/tools/move-unit-test/tests/test_sources/proposal_test.storage.exp @@ -12,11 +12,11 @@ Output: Ok(Changes { accounts: {} }) 0x1::Module::tests_aborts Output: Ok(Changes { accounts: {} }) 0x1::Module::tests_b -Output: Ok(Changes { accounts: {0000000000000000000000000000000000000000000000000000000000000001: AccountChanges { modules: {}, resources: {StructTag { address: 0000000000000000000000000000000000000000000000000000000000000001, module: Identifier("Module"), name: Identifier("A"), type_params: [] }: New([5, 0, 0, 0, 0, 0, 0, 0])} }, 0000000000000000000000000000000000000000000000000000000000000002: AccountChanges { modules: {}, resources: {StructTag { address: 0000000000000000000000000000000000000000000000000000000000000001, module: Identifier("Module"), name: Identifier("A"), type_params: [] }: New([5, 0, 0, 0, 0, 0, 0, 0])} }} }) +Output: Ok(Changes { accounts: {0000000000000000000000000000000000000000000000000000000000000001: AccountChanges { modules: {}, resources: {StructTag { address: 0000000000000000000000000000000000000000000000000000000000000001, module: Identifier("Module"), name: Identifier("A"), type_params: [] }: New(b"\x05\0\0\0\0\0\0\0")} }, 0000000000000000000000000000000000000000000000000000000000000002: AccountChanges { modules: {}, resources: {StructTag { address: 0000000000000000000000000000000000000000000000000000000000000001, module: Identifier("Module"), name: Identifier("A"), type_params: [] }: New(b"\x05\0\0\0\0\0\0\0")} }} }) 0x1::Module::tests_c -Output: Ok(Changes { accounts: {0000000000000000000000000000000000000000000000000000000000000001: AccountChanges { modules: {}, resources: {StructTag { address: 0000000000000000000000000000000000000000000000000000000000000001, module: Identifier("Module"), name: Identifier("A"), type_params: [] }: New([5, 0, 0, 0, 0, 0, 0, 0])} }, 0000000000000000000000000000000000000000000000000000000000000002: AccountChanges { modules: {}, resources: {StructTag { address: 0000000000000000000000000000000000000000000000000000000000000001, module: Identifier("Module"), name: Identifier("A"), type_params: [] }: New([6, 0, 0, 0, 0, 0, 0, 0])} }} }) +Output: Ok(Changes { accounts: {0000000000000000000000000000000000000000000000000000000000000001: AccountChanges { modules: {}, resources: {StructTag { address: 0000000000000000000000000000000000000000000000000000000000000001, module: Identifier("Module"), name: Identifier("A"), type_params: [] }: New(b"\x05\0\0\0\0\0\0\0")} }, 0000000000000000000000000000000000000000000000000000000000000002: AccountChanges { modules: {}, resources: {StructTag { address: 0000000000000000000000000000000000000000000000000000000000000001, module: Identifier("Module"), name: Identifier("A"), type_params: [] }: New(b"\x06\0\0\0\0\0\0\0")} }} }) 0x1::Module::tests_d -Output: Ok(Changes { accounts: {0000000000000000000000000000000000000000000000000000000000000001: AccountChanges { modules: {}, resources: {StructTag { address: 0000000000000000000000000000000000000000000000000000000000000001, module: Identifier("Module"), name: Identifier("B"), type_params: [U64] }: New([5, 0, 0, 0, 0, 0, 0, 0])} }, 0000000000000000000000000000000000000000000000000000000000000002: AccountChanges { modules: {}, resources: {StructTag { address: 0000000000000000000000000000000000000000000000000000000000000001, module: Identifier("Module"), name: Identifier("B"), type_params: [Bool] }: New([6, 0, 0, 0, 0, 0, 0, 0]), StructTag { address: 0000000000000000000000000000000000000000000000000000000000000001, module: Identifier("Module"), name: Identifier("B"), type_params: [U64] }: New([5, 0, 0, 0, 0, 0, 0, 0]), StructTag { address: 0000000000000000000000000000000000000000000000000000000000000001, module: Identifier("Module"), name: Identifier("B"), type_params: [Struct(StructTag { address: 0000000000000000000000000000000000000000000000000000000000000001, module: Identifier("Module"), name: Identifier("C"), type_params: [U64] })] }: New([5, 0, 0, 0, 0, 0, 0, 0])} }} }) +Output: Ok(Changes { accounts: {0000000000000000000000000000000000000000000000000000000000000001: AccountChanges { modules: {}, resources: {StructTag { address: 0000000000000000000000000000000000000000000000000000000000000001, module: Identifier("Module"), name: Identifier("B"), type_params: [U64] }: New(b"\x05\0\0\0\0\0\0\0")} }, 0000000000000000000000000000000000000000000000000000000000000002: AccountChanges { modules: {}, resources: {StructTag { address: 0000000000000000000000000000000000000000000000000000000000000001, module: Identifier("Module"), name: Identifier("B"), type_params: [Bool] }: New(b"\x06\0\0\0\0\0\0\0"), StructTag { address: 0000000000000000000000000000000000000000000000000000000000000001, module: Identifier("Module"), name: Identifier("B"), type_params: [U64] }: New(b"\x05\0\0\0\0\0\0\0"), StructTag { address: 0000000000000000000000000000000000000000000000000000000000000001, module: Identifier("Module"), name: Identifier("B"), type_params: [Struct(StructTag { address: 0000000000000000000000000000000000000000000000000000000000000001, module: Identifier("Module"), name: Identifier("C"), type_params: [U64] })] }: New(b"\x05\0\0\0\0\0\0\0")} }} }) Test failures: diff --git a/third_party/move/tools/move-unit-test/tests/test_sources/storage_on_error_empty_and_non_empty.exp b/third_party/move/tools/move-unit-test/tests/test_sources/storage_on_error_empty_and_non_empty.exp index f18e6e1613fa2..ca88a6bcea28a 100644 --- a/third_party/move/tools/move-unit-test/tests/test_sources/storage_on_error_empty_and_non_empty.exp +++ b/third_party/move/tools/move-unit-test/tests/test_sources/storage_on_error_empty_and_non_empty.exp @@ -7,13 +7,13 @@ Running Move unit tests 0x1::A::a Output: Ok(Changes { accounts: {} }) 0x1::A::b -Output: Ok(Changes { accounts: {0000000000000000000000000000000000000000000000000000000000000001: AccountChanges { modules: {}, resources: {StructTag { address: 0000000000000000000000000000000000000000000000000000000000000001, module: Identifier("A"), name: Identifier("A"), type_params: [] }: New([0])} }} }) +Output: Ok(Changes { accounts: {0000000000000000000000000000000000000000000000000000000000000001: AccountChanges { modules: {}, resources: {StructTag { address: 0000000000000000000000000000000000000000000000000000000000000001, module: Identifier("A"), name: Identifier("A"), type_params: [] }: New(b"\0")} }} }) 0x1::A::c -Output: Ok(Changes { accounts: {0000000000000000000000000000000000000000000000000000000000000001: AccountChanges { modules: {}, resources: {StructTag { address: 0000000000000000000000000000000000000000000000000000000000000001, module: Identifier("A"), name: Identifier("A"), type_params: [] }: New([0])} }} }) +Output: Ok(Changes { accounts: {0000000000000000000000000000000000000000000000000000000000000001: AccountChanges { modules: {}, resources: {StructTag { address: 0000000000000000000000000000000000000000000000000000000000000001, module: Identifier("A"), name: Identifier("A"), type_params: [] }: New(b"\0")} }} }) 0x1::A::x Output: Ok(Changes { accounts: {} }) 0x1::A::y -Output: Ok(Changes { accounts: {0000000000000000000000000000000000000000000000000000000000000001: AccountChanges { modules: {}, resources: {StructTag { address: 0000000000000000000000000000000000000000000000000000000000000001, module: Identifier("A"), name: Identifier("A"), type_params: [] }: New([0])} }} }) +Output: Ok(Changes { accounts: {0000000000000000000000000000000000000000000000000000000000000001: AccountChanges { modules: {}, resources: {StructTag { address: 0000000000000000000000000000000000000000000000000000000000000001, module: Identifier("A"), name: Identifier("A"), type_params: [] }: New(b"\0")} }} }) Test failures: diff --git a/third_party/move/tools/move-unit-test/tests/test_sources/storage_on_error_empty_and_non_empty.storage.exp b/third_party/move/tools/move-unit-test/tests/test_sources/storage_on_error_empty_and_non_empty.storage.exp index 70d30bae8057a..0dbcfd97af67f 100644 --- a/third_party/move/tools/move-unit-test/tests/test_sources/storage_on_error_empty_and_non_empty.storage.exp +++ b/third_party/move/tools/move-unit-test/tests/test_sources/storage_on_error_empty_and_non_empty.storage.exp @@ -7,13 +7,13 @@ Running Move unit tests 0x1::A::a Output: Ok(Changes { accounts: {} }) 0x1::A::b -Output: Ok(Changes { accounts: {0000000000000000000000000000000000000000000000000000000000000001: AccountChanges { modules: {}, resources: {StructTag { address: 0000000000000000000000000000000000000000000000000000000000000001, module: Identifier("A"), name: Identifier("A"), type_params: [] }: New([0])} }} }) +Output: Ok(Changes { accounts: {0000000000000000000000000000000000000000000000000000000000000001: AccountChanges { modules: {}, resources: {StructTag { address: 0000000000000000000000000000000000000000000000000000000000000001, module: Identifier("A"), name: Identifier("A"), type_params: [] }: New(b"\0")} }} }) 0x1::A::c -Output: Ok(Changes { accounts: {0000000000000000000000000000000000000000000000000000000000000001: AccountChanges { modules: {}, resources: {StructTag { address: 0000000000000000000000000000000000000000000000000000000000000001, module: Identifier("A"), name: Identifier("A"), type_params: [] }: New([0])} }} }) +Output: Ok(Changes { accounts: {0000000000000000000000000000000000000000000000000000000000000001: AccountChanges { modules: {}, resources: {StructTag { address: 0000000000000000000000000000000000000000000000000000000000000001, module: Identifier("A"), name: Identifier("A"), type_params: [] }: New(b"\0")} }} }) 0x1::A::x Output: Ok(Changes { accounts: {} }) 0x1::A::y -Output: Ok(Changes { accounts: {0000000000000000000000000000000000000000000000000000000000000001: AccountChanges { modules: {}, resources: {StructTag { address: 0000000000000000000000000000000000000000000000000000000000000001, module: Identifier("A"), name: Identifier("A"), type_params: [] }: New([0])} }} }) +Output: Ok(Changes { accounts: {0000000000000000000000000000000000000000000000000000000000000001: AccountChanges { modules: {}, resources: {StructTag { address: 0000000000000000000000000000000000000000000000000000000000000001, module: Identifier("A"), name: Identifier("A"), type_params: [] }: New(b"\0")} }} }) Test failures: diff --git a/third_party/move/tools/move-unit-test/tests/test_sources/storage_test.exp b/third_party/move/tools/move-unit-test/tests/test_sources/storage_test.exp index 5ae36ba252d41..e61be6e374246 100644 --- a/third_party/move/tools/move-unit-test/tests/test_sources/storage_test.exp +++ b/third_party/move/tools/move-unit-test/tests/test_sources/storage_test.exp @@ -2,7 +2,7 @@ Running Move unit tests [ PASS ] 0x1::B::tests_a [ PASS ] 0x1::B::tests_b 0x1::B::tests_a -Output: Ok(Changes { accounts: {0000000000000000000000000000000000000000000000000000000000000001: AccountChanges { modules: {}, resources: {StructTag { address: 0000000000000000000000000000000000000000000000000000000000000001, module: Identifier("B"), name: Identifier("A"), type_params: [U64] }: New([5, 0, 0, 0, 0, 0, 0, 0])} }, 0000000000000000000000000000000000000000000000000000000000000002: AccountChanges { modules: {}, resources: {StructTag { address: 0000000000000000000000000000000000000000000000000000000000000001, module: Identifier("B"), name: Identifier("A"), type_params: [Bool] }: New([0])} }} }) +Output: Ok(Changes { accounts: {0000000000000000000000000000000000000000000000000000000000000001: AccountChanges { modules: {}, resources: {StructTag { address: 0000000000000000000000000000000000000000000000000000000000000001, module: Identifier("B"), name: Identifier("A"), type_params: [U64] }: New(b"\x05\0\0\0\0\0\0\0")} }, 0000000000000000000000000000000000000000000000000000000000000002: AccountChanges { modules: {}, resources: {StructTag { address: 0000000000000000000000000000000000000000000000000000000000000001, module: Identifier("B"), name: Identifier("A"), type_params: [Bool] }: New(b"\0")} }} }) 0x1::B::tests_b -Output: Ok(Changes { accounts: {0000000000000000000000000000000000000000000000000000000000000001: AccountChanges { modules: {}, resources: {StructTag { address: 0000000000000000000000000000000000000000000000000000000000000001, module: Identifier("B"), name: Identifier("A"), type_params: [U64] }: New([5, 0, 0, 0, 0, 0, 0, 0])} }, 0000000000000000000000000000000000000000000000000000000000000002: AccountChanges { modules: {}, resources: {StructTag { address: 0000000000000000000000000000000000000000000000000000000000000001, module: Identifier("B"), name: Identifier("A"), type_params: [U64] }: New([6, 0, 0, 0, 0, 0, 0, 0])} }} }) +Output: Ok(Changes { accounts: {0000000000000000000000000000000000000000000000000000000000000001: AccountChanges { modules: {}, resources: {StructTag { address: 0000000000000000000000000000000000000000000000000000000000000001, module: Identifier("B"), name: Identifier("A"), type_params: [U64] }: New(b"\x05\0\0\0\0\0\0\0")} }, 0000000000000000000000000000000000000000000000000000000000000002: AccountChanges { modules: {}, resources: {StructTag { address: 0000000000000000000000000000000000000000000000000000000000000001, module: Identifier("B"), name: Identifier("A"), type_params: [U64] }: New(b"\x06\0\0\0\0\0\0\0")} }} }) Test result: OK. Total tests: 2; passed: 2; failed: 0 diff --git a/third_party/move/tools/move-unit-test/tests/test_sources/storage_test.storage.exp b/third_party/move/tools/move-unit-test/tests/test_sources/storage_test.storage.exp index 5ae36ba252d41..e61be6e374246 100644 --- a/third_party/move/tools/move-unit-test/tests/test_sources/storage_test.storage.exp +++ b/third_party/move/tools/move-unit-test/tests/test_sources/storage_test.storage.exp @@ -2,7 +2,7 @@ Running Move unit tests [ PASS ] 0x1::B::tests_a [ PASS ] 0x1::B::tests_b 0x1::B::tests_a -Output: Ok(Changes { accounts: {0000000000000000000000000000000000000000000000000000000000000001: AccountChanges { modules: {}, resources: {StructTag { address: 0000000000000000000000000000000000000000000000000000000000000001, module: Identifier("B"), name: Identifier("A"), type_params: [U64] }: New([5, 0, 0, 0, 0, 0, 0, 0])} }, 0000000000000000000000000000000000000000000000000000000000000002: AccountChanges { modules: {}, resources: {StructTag { address: 0000000000000000000000000000000000000000000000000000000000000001, module: Identifier("B"), name: Identifier("A"), type_params: [Bool] }: New([0])} }} }) +Output: Ok(Changes { accounts: {0000000000000000000000000000000000000000000000000000000000000001: AccountChanges { modules: {}, resources: {StructTag { address: 0000000000000000000000000000000000000000000000000000000000000001, module: Identifier("B"), name: Identifier("A"), type_params: [U64] }: New(b"\x05\0\0\0\0\0\0\0")} }, 0000000000000000000000000000000000000000000000000000000000000002: AccountChanges { modules: {}, resources: {StructTag { address: 0000000000000000000000000000000000000000000000000000000000000001, module: Identifier("B"), name: Identifier("A"), type_params: [Bool] }: New(b"\0")} }} }) 0x1::B::tests_b -Output: Ok(Changes { accounts: {0000000000000000000000000000000000000000000000000000000000000001: AccountChanges { modules: {}, resources: {StructTag { address: 0000000000000000000000000000000000000000000000000000000000000001, module: Identifier("B"), name: Identifier("A"), type_params: [U64] }: New([5, 0, 0, 0, 0, 0, 0, 0])} }, 0000000000000000000000000000000000000000000000000000000000000002: AccountChanges { modules: {}, resources: {StructTag { address: 0000000000000000000000000000000000000000000000000000000000000001, module: Identifier("B"), name: Identifier("A"), type_params: [U64] }: New([6, 0, 0, 0, 0, 0, 0, 0])} }} }) +Output: Ok(Changes { accounts: {0000000000000000000000000000000000000000000000000000000000000001: AccountChanges { modules: {}, resources: {StructTag { address: 0000000000000000000000000000000000000000000000000000000000000001, module: Identifier("B"), name: Identifier("A"), type_params: [U64] }: New(b"\x05\0\0\0\0\0\0\0")} }, 0000000000000000000000000000000000000000000000000000000000000002: AccountChanges { modules: {}, resources: {StructTag { address: 0000000000000000000000000000000000000000000000000000000000000001, module: Identifier("B"), name: Identifier("A"), type_params: [U64] }: New(b"\x06\0\0\0\0\0\0\0")} }} }) Test result: OK. Total tests: 2; passed: 2; failed: 0 diff --git a/types/Cargo.toml b/types/Cargo.toml index fb27effe6cddf..b2510c0821d06 100644 --- a/types/Cargo.toml +++ b/types/Cargo.toml @@ -19,6 +19,7 @@ aptos-crypto = { workspace = true } aptos-crypto-derive = { workspace = true } arr_macro = { workspace = true } bcs = { workspace = true } +bytes = { workspace = true } chrono = { workspace = true } derivative = { workspace = true } hex = { workspace = true } diff --git a/types/src/access_path.rs b/types/src/access_path.rs index c8a1919f8994b..5a8f020d37ef9 100644 --- a/types/src/access_path.rs +++ b/types/src/access_path.rs @@ -156,6 +156,10 @@ impl AccessPath { matches!(self.get_path(), Path::Code(_)) } + pub fn is_resource_group(&self) -> bool { + matches!(self.get_path(), Path::ResourceGroup(_)) + } + pub fn size(&self) -> usize { self.address.as_ref().len() + self.path.len() } diff --git a/types/src/account_config/resources/coin_info.rs b/types/src/account_config/resources/coin_info.rs index 31450306ed5b5..3f3a402b44f91 100644 --- a/types/src/account_config/resources/coin_info.rs +++ b/types/src/account_config/resources/coin_info.rs @@ -127,11 +127,11 @@ impl CoinInfoResource { let write_set = vec![ ( StateKey::access_path(ap), - WriteOp::Modification(bcs::to_bytes(&self).unwrap()), + WriteOp::Modification(bcs::to_bytes(&self).unwrap().into()), ), ( value_state_key, - WriteOp::Modification(bcs::to_bytes(&0_u128).unwrap()), + WriteOp::Modification(bcs::to_bytes(&0_u128).unwrap().into()), ), ]; Ok(WriteSetMut::new(write_set).freeze().unwrap()) diff --git a/types/src/account_state.rs b/types/src/account_state.rs index 42ec2344cb558..c5593701d8725 100644 --- a/types/src/account_state.rs +++ b/types/src/account_state.rs @@ -12,6 +12,7 @@ use crate::{ }, }; use anyhow::{anyhow, Error, Result}; +use bytes::Bytes; use move_core_types::{ account_address::AccountAddress, language_storage::{ModuleId, StructTag}, @@ -23,7 +24,6 @@ use std::{ convert::TryFrom, fmt, }; - #[derive(Clone, Deserialize, PartialEq, Eq, Serialize)] pub struct AccountState { address: AccountAddress, @@ -120,7 +120,7 @@ impl fmt::Debug for AccountState { } impl AccountView for AccountState { - fn get_state_value(&self, _: &StateKey) -> Result>> { + fn get_state_value(&self, _: &StateKey) -> Result> { unimplemented!() } @@ -141,7 +141,7 @@ impl TryFrom<&StateValue> for AccountState { type Error = Error; fn try_from(state_value: &StateValue) -> Result { - AccountState::try_from(state_value.bytes()).map_err(Into::into) + AccountState::try_from(state_value.bytes().as_ref()).map_err(Into::into) } } diff --git a/types/src/account_view.rs b/types/src/account_view.rs index edba258a4a532..fe2b21f357d90 100644 --- a/types/src/account_view.rs +++ b/types/src/account_view.rs @@ -9,11 +9,11 @@ use crate::{ validator_config::{ValidatorConfig, ValidatorOperatorConfigResource}, }; use anyhow::anyhow; +use bytes::Bytes; use move_core_types::{account_address::AccountAddress, move_resource::MoveResource}; use serde::de::DeserializeOwned; - pub trait AccountView { - fn get_state_value(&self, state_key: &StateKey) -> anyhow::Result>>; + fn get_state_value(&self, state_key: &StateKey) -> anyhow::Result>; fn get_account_address(&self) -> anyhow::Result>; diff --git a/types/src/bytes.rs b/types/src/bytes.rs new file mode 100644 index 0000000000000..d90977e6ec2a0 --- /dev/null +++ b/types/src/bytes.rs @@ -0,0 +1,16 @@ +// Copyright © Aptos Foundation +// SPDX-License-Identifier: Apache-2.0 + +#![forbid(unsafe_code)] + +use bytes::Bytes; + +pub trait NumToBytes { + fn le_bytes(&self) -> Bytes; +} + +impl NumToBytes for u64 { + fn le_bytes(&self) -> Bytes { + Bytes::copy_from_slice(&self.to_le_bytes()) + } +} diff --git a/types/src/fee_statement.rs b/types/src/fee_statement.rs index 70fc8ba991d52..7ec7e68e3d69e 100644 --- a/types/src/fee_statement.rs +++ b/types/src/fee_statement.rs @@ -7,14 +7,26 @@ use serde::{Deserialize, Serialize}; /// The structure is: /// /// - Net charge or refund (not in the statement) -/// - total charge: total_charge_gas_units, matches `gas_used` in the on-chain `TransactionInfo`. -/// - gas charge for execution (CPU time): execution_gas_units -/// - gas charge for IO (storage random access): io_gas_units -/// - storage fee charge (storage space): storage_fee_octas, when included in total_charge, this number is converted to gas units according to the user specified gas unit price. -/// - storage deletion refund: storage_fee_refund_octas, this is not included in `gas_used` or `total_charge_gas_units`, the net charge / refund is calculated by total_charge_gas_units * gas_unit_price - storage_fee_refund_octas. +/// - total charge: total_charge_gas_units, matches `gas_used` in the on-chain `TransactionInfo`. +/// This is the sum of the sub-items below. Notice that there's potential precision loss when +/// the conversion between internal and external gas units and between native token and gas +/// units, so it's possible that the numbers don't add up exactly. -- This number is the final +/// charge, while the break down is merely informational. +/// - gas charge for execution (CPU time): `execution_gas_units` +/// - gas charge for IO (storage random access): `io_gas_units` +/// - storage fee charge (storage space): `storage_fee_octas`, to be included in +/// `total_charge_gas_unit`, this number is converted to gas units according to the user +/// specified `gas_unit_price` on the transaction. +/// - storage deletion refund: `storage_fee_refund_octas`, this is not included in `gas_used` or +/// `total_charge_gas_units`, the net charge / refund is calculated by +/// `total_charge_gas_units` * `gas_unit_price` - `storage_fee_refund_octas`. +/// +/// This is meant to emitted as a module event. +/// +/// (keep this doc in sync with the `struct FeeStatement` in Move.) #[derive(Clone, Copy, Debug, Eq, PartialEq, Serialize, Deserialize)] pub struct FeeStatement { - /// Total gas charge, not including + /// Total gas charge. total_charge_gas_units: u64, /// Execution gas charge. execution_gas_units: u64, diff --git a/types/src/lib.rs b/types/src/lib.rs index ea758ebec8881..847156d629845 100644 --- a/types/src/lib.rs +++ b/types/src/lib.rs @@ -53,6 +53,7 @@ pub use utility_coin::*; pub mod account_view; pub mod aggregate_signature; pub mod block_executor; +pub mod bytes; pub mod state_store; #[cfg(test)] mod unit_tests; diff --git a/types/src/on_chain_config/mod.rs b/types/src/on_chain_config/mod.rs index ba3b59fdaf971..c8774166a0c78 100644 --- a/types/src/on_chain_config/mod.rs +++ b/types/src/on_chain_config/mod.rs @@ -8,6 +8,7 @@ use crate::{ event::{EventHandle, EventKey}, }; use anyhow::{format_err, Result}; +use bytes::Bytes; use move_core_types::{ ident_str, identifier::{IdentStr, Identifier}, @@ -123,7 +124,7 @@ impl OnChainConfigPayload

{ /// Trait to be implemented by a storage type from which to read on-chain configs pub trait ConfigStorage { - fn fetch_config(&self, access_path: AccessPath) -> Option>; + fn fetch_config(&self, access_path: AccessPath) -> Option; } /// Trait to be implemented by a Rust struct representation of an on-chain config diff --git a/types/src/proptest_types.rs b/types/src/proptest_types.rs index 9b55dc1284141..ad0626ee10616 100644 --- a/types/src/proptest_types.rs +++ b/types/src/proptest_types.rs @@ -38,6 +38,7 @@ use aptos_crypto::{ HashValue, }; use arr_macro::arr; +use bytes::Bytes; use move_core_types::language_storage::TypeTag; use proptest::{ collection::{vec, SizeRange}, @@ -55,7 +56,7 @@ use std::{ impl WriteOp { pub fn value_strategy() -> impl Strategy { - vec(any::(), 0..64).prop_map(WriteOp::Modification) + vec(any::(), 0..64).prop_map(|bytes| WriteOp::Modification(bytes.into())) } pub fn deletion_strategy() -> impl Strategy { @@ -806,9 +807,9 @@ impl TransactionToCommitGen { ( ( state_key.clone(), - Some(StateValue::new_legacy(value.clone())), + Some(StateValue::new_legacy(Bytes::copy_from_slice(&value))), ), - (state_key, WriteOp::Modification(value)), + (state_key, WriteOp::Modification(value.into())), ) }) }) diff --git a/types/src/state_store/state_value.rs b/types/src/state_store/state_value.rs index 38329b5a4f0d4..2bece10872cce 100644 --- a/types/src/state_store/state_value.rs +++ b/types/src/state_store/state_value.rs @@ -10,11 +10,11 @@ use aptos_crypto::{ HashValue, }; use aptos_crypto_derive::{BCSCryptoHash, CryptoHasher}; +use bytes::Bytes; use once_cell::sync::OnceCell; #[cfg(any(test, feature = "fuzzing"))] use proptest::{arbitrary::Arbitrary, prelude::*}; use serde::{Deserialize, Deserializer, Serialize, Serializer}; - #[derive( BCSCryptoHash, Clone, @@ -35,6 +35,9 @@ pub enum StateValueMetadata { }, } +// To avoid nested options when fetching a resource and its metadata. +pub type StateValueMetadataKind = Option; + impl StateValueMetadata { pub fn new(deposit: u64, creation_time_usecs: &CurrentTimeMicroseconds) -> Self { Self::V0 { @@ -85,10 +88,9 @@ impl Eq for StateValue {} )] #[serde(rename = "StateValue")] pub enum StateValueInner { - V0(#[serde(with = "serde_bytes")] Vec), + V0(Bytes), WithMetadata { - #[serde(with = "serde_bytes")] - data: Vec, + data: Bytes, metadata: StateValueMetadata, }, } @@ -99,7 +101,9 @@ impl Arbitrary for StateValue { type Strategy = BoxedStrategy; fn arbitrary_with(_args: Self::Parameters) -> Self::Strategy { - any::>().prop_map(StateValue::new_legacy).boxed() + any::>() + .prop_map(|bytes| StateValue::new_legacy(bytes.into())) + .boxed() } } @@ -124,11 +128,11 @@ impl Serialize for StateValue { } impl StateValue { - pub fn new_legacy(bytes: Vec) -> Self { + pub fn new_legacy(bytes: Bytes) -> Self { Self::new_impl(StateValueInner::V0(bytes)) } - pub fn new_with_metadata(data: Vec, metadata: StateValueMetadata) -> Self { + pub fn new_with_metadata(data: Bytes, metadata: StateValueMetadata) -> Self { Self::new_impl(StateValueInner::WithMetadata { data, metadata }) } @@ -141,29 +145,37 @@ impl StateValue { self.bytes().len() } - pub fn bytes(&self) -> &[u8] { + pub fn bytes(&self) -> &Bytes { match &self.inner { StateValueInner::V0(data) | StateValueInner::WithMetadata { data, .. } => data, } } - pub fn into_bytes(self) -> Vec { + pub fn into_metadata(self) -> Option { match self.inner { - StateValueInner::V0(data) | StateValueInner::WithMetadata { data, .. } => data, + StateValueInner::V0(_) => None, + StateValueInner::WithMetadata { metadata, .. } => Some(metadata), } } - pub fn into_metadata(self) -> Option { + pub fn into(self) -> (Option, Bytes) { match self.inner { - StateValueInner::V0(_) => None, - StateValueInner::WithMetadata { metadata, .. } => Some(metadata), + StateValueInner::V0(bytes) => (None, bytes), + StateValueInner::WithMetadata { data, metadata } => (Some(metadata), data), } } } -#[cfg(any(test, feature = "fuzzing"))] +// #[cfg(any(test, feature = "fuzzing"))] impl From> for StateValue { fn from(bytes: Vec) -> Self { + StateValue::new_legacy(bytes.into()) + } +} + +#[cfg(any(test, feature = "fuzzing"))] +impl From for StateValue { + fn from(bytes: Bytes) -> Self { StateValue::new_legacy(bytes) } } diff --git a/types/src/transaction/mod.rs b/types/src/transaction/mod.rs index a38742bfb9f2b..f979f24500f4a 100644 --- a/types/src/transaction/mod.rs +++ b/types/src/transaction/mod.rs @@ -1057,6 +1057,15 @@ impl TransactionOutput { &self.write_set } + // This is a special function to update the total supply in the write set. 'TransactionOutput' + // already has materialized write set, but in case of sharding support for total_supply, we + // want to update the total supply in the write set by aggregating the total supply deltas from + // each shard. However, is costly to materialize the entire write set again, hence we have this + // inplace update hack. + pub fn update_total_supply(&mut self, value: u128) { + self.write_set.update_total_supply(value); + } + pub fn events(&self) -> &[ContractEvent] { &self.events } diff --git a/types/src/write_set.rs b/types/src/write_set.rs index 23525ac8256b5..9eca13dd046ec 100644 --- a/types/src/write_set.rs +++ b/types/src/write_set.rs @@ -11,25 +11,39 @@ use crate::state_store::{ }; use anyhow::{bail, Result}; use aptos_crypto_derive::{BCSCryptoHash, CryptoHasher}; +use bytes::Bytes; +use once_cell::sync::Lazy; use serde::{Deserialize, Serialize}; use std::{ collections::{btree_map, BTreeMap}, - ops::Deref, + ops::{Deref, DerefMut}, }; +// Note: in case this changes in the future, it doesn't have to be a constant, and can be read from +// genesis directly if necessary. +pub static TOTAL_SUPPLY_STATE_KEY: Lazy = Lazy::new(|| { + StateKey::table_item( + "1b854694ae746cdbd8d44186ca4929b2b337df21d1c74633be19b2710552fdca" + .parse() + .unwrap(), + vec![ + 6, 25, 220, 41, 160, 170, 200, 250, 20, 103, 20, 5, 142, 141, 214, 210, 208, 243, 189, + 245, 246, 51, 25, 7, 191, 145, 243, 172, 216, 30, 105, 53, + ], + ) +}); + #[derive(Clone, Eq, Hash, PartialEq, Serialize, Deserialize)] pub enum WriteOp { - Creation(#[serde(with = "serde_bytes")] Vec), - Modification(#[serde(with = "serde_bytes")] Vec), + Creation(Bytes), + Modification(Bytes), Deletion, CreationWithMetadata { - #[serde(with = "serde_bytes")] - data: Vec, + data: Bytes, metadata: StateValueMetadata, }, ModificationWithMetadata { - #[serde(with = "serde_bytes")] - data: Vec, + data: Bytes, metadata: StateValueMetadata, }, DeletionWithMetadata { @@ -129,19 +143,7 @@ impl WriteOp { Ok(true) } - pub fn into_bytes(self) -> Option> { - use WriteOp::*; - - match self { - Creation(data) - | CreationWithMetadata { data, .. } - | Modification(data) - | ModificationWithMetadata { data, .. } => Some(data), - Deletion | DeletionWithMetadata { .. } => None, - } - } - - pub fn bytes(&self) -> Option<&[u8]> { + pub fn bytes(&self) -> Option<&Bytes> { use WriteOp::*; match self { @@ -166,22 +168,60 @@ impl WriteOp { } pub trait TransactionWrite { - fn extract_raw_bytes(&self) -> Option>; + fn bytes(&self) -> Option<&Bytes>; fn as_state_value(&self) -> Option; + + // Often, the contents of W:TransactionWrite are converted to Option, e.g. + // to emulate reading from storage after W has been applied. However, in some contexts, + // it is also helpful to convert a StateValue to a potential instance of W that would + // have the desired effect. This allows e.g. to store certain sentinel elements of + // type W in data-structures (happens in MVHashMap). If there are several instances of + // W that correspond to maybe_state_value, an arbitrary one may be provided. + fn from_state_value(maybe_state_value: Option) -> Self; + + fn extract_raw_bytes(&self) -> Option { + self.bytes().cloned() + } + + fn bytes_len(&self) -> usize { + self.bytes().map(|bytes| bytes.len()).unwrap_or(0) + } + + fn as_u128(&self) -> anyhow::Result> { + match self.bytes() { + Some(bytes) => Ok(Some(bcs::from_bytes(bytes)?)), + None => Ok(None), + } + } + + fn is_deletion(&self) -> bool { + self.bytes().is_none() + } } impl TransactionWrite for WriteOp { - fn extract_raw_bytes(&self) -> Option> { - self.clone().into_bytes() + fn bytes(&self) -> Option<&Bytes> { + self.bytes() } fn as_state_value(&self) -> Option { self.bytes().map(|bytes| match self.metadata() { - None => StateValue::new_legacy(bytes.to_vec()), - Some(metadata) => StateValue::new_with_metadata(bytes.to_vec(), metadata.clone()), + None => StateValue::new_legacy(bytes.clone()), + Some(metadata) => StateValue::new_with_metadata(bytes.clone(), metadata.clone()), }) } + + fn from_state_value(maybe_state_value: Option) -> Self { + match maybe_state_value.map(|state_value| state_value.into()) { + None => WriteOp::Deletion, + Some((None, bytes)) => WriteOp::Creation(bytes), + Some((Some(metadata), bytes)) => WriteOp::CreationWithMetadata { + data: bytes, + metadata, + }, + } + } } impl std::fmt::Debug for WriteOp { @@ -258,6 +298,14 @@ impl Deref for WriteSet { } } +impl DerefMut for WriteSet { + fn deref_mut(&mut self) -> &mut Self::Target { + match self { + Self::V0(write_set) => write_set, + } + } +} + /// `WriteSet` contains all access paths that one transaction modifies. Each of them is a `WriteOp` /// where `Value(val)` means that serialized representation should be updated to `val`, and /// `Deletion` means that we are going to delete this access path. @@ -280,6 +328,30 @@ impl WriteSetV0 { pub fn get(&self, key: &StateKey) -> Option<&WriteOp> { self.0.get(key) } + + pub fn get_total_supply(&self) -> Option { + let value = self + .0 + .get(&TOTAL_SUPPLY_STATE_KEY) + .and_then(|op| op.bytes()) + .map(|bytes| bcs::from_bytes::(bytes)); + value.transpose().map_err(anyhow::Error::msg).unwrap() + } + + // This is a temporary method to update the total supply in the write set. + // TODO: get rid of this func() and use WriteSetMut instead; for that we need to change + // VM execution such that to 'TransactionOutput' is materialized after updating + // total_supply. + pub fn update_total_supply(&mut self, value: u128) { + assert!(self + .0 + .write_set + .insert( + TOTAL_SUPPLY_STATE_KEY.clone(), + WriteOp::Modification(bcs::to_bytes(&value).unwrap().into()) + ) + .is_some()); + } } /// A mutable version of `WriteSet`.