diff --git a/.dockerignore b/.dockerignore index 14c2f5a8bf5..314dadf0a10 100644 --- a/.dockerignore +++ b/.dockerignore @@ -5,5 +5,5 @@ **/*.dSYM build -tests +tests/testdata cmd/prometheus diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 9781d2e3441..5ea480e5168 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -16,6 +16,10 @@ on: - synchronize - ready_for_review +concurrency: + group: ${{ github.ref }} + cancel-in-progress: true + jobs: tests: if: ${{ github.event_name == 'push' || !github.event.pull_request.draft }} @@ -24,7 +28,7 @@ jobs: os: [ ubuntu-20.04, macos-11 ] # list of os: https://github.com/actions/virtual-environments runs-on: ${{ matrix.os }} - steps: + steps: - uses: actions/checkout@v3 - run: git submodule update --init --recursive --force - uses: actions/setup-go@v3 @@ -60,7 +64,7 @@ jobs: if: runner.os == 'Linux' uses: golangci/golangci-lint-action@v3 with: - version: v1.47 + version: v1.49 - name: Test run: make test @@ -72,7 +76,7 @@ jobs: os: [ windows-2022 ] runs-on: ${{ matrix.os }} - steps: + steps: - uses: actions/checkout@v3 - run: git submodule update --init --recursive --force - uses: actions/setup-go@v3 @@ -104,15 +108,17 @@ jobs: - name: Test run: .\wmake.ps1 test - docker: - if: ${{ github.event_name == 'push' || !github.event.pull_request.draft }} + docker-build-check: + # don't run this on devel - the PR must have run it to be merged and it misleads that this pushes the docker image + if: (${{ github.event_name == 'push' || !github.event.pull_request.draft }}) && ${{ github.ref != 'refs/heads/devel' }} runs-on: ubuntu-20.04 steps: + - uses: AutoModality/action-clean@v1 - uses: actions/checkout@v3 with: fetch-depth: 0 # fetch git tags for "git describe" - - name: make docker + - name: make docker (see dockerhub for image builds) run: DOCKER_TAG=thorax/erigon:ci-$GITHUB_SHA DOCKER_UID=$(id -u) DOCKER_GID=$(id -g) make docker # check with root permissions, should be cached from previous build @@ -120,48 +126,95 @@ jobs: run: sudo DOCKER_TAG=thorax/erigon:ci-$GITHUB_SHA DOCKER_UID=$(id -u) DOCKER_GID=$(id -g) make docker hive: - needs: - - tests - - tests-windows - - docker - runs-on: ubuntu-20.04 - if: ${{ github.event_name == 'push' || !github.event.pull_request.draft }} - steps: - - uses: actions/checkout@v3 - with: - fetch-depth: 0 # fetch git tags for "git describe" + if: ${{ github.ref == 'refs/heads/devel' }} + needs: + - tests + - tests-windows + runs-on: self-hosted + steps: + - uses: AutoModality/action-clean@v1 + - uses: actions/checkout@v3 + with: + fetch-depth: 0 # fetch git tags for "git describe" + + - name: build erigon image + run: DOCKER_TAG=thorax/erigon:ci-$GITHUB_SHA DOCKER_UID=$(id -u) DOCKER_GID=$(id -g) make docker + + # check with root permissions, should be cached from previous build + - name: build erigon image (root permissions) + run: sudo DOCKER_TAG=thorax/erigon:ci-$GITHUB_SHA DOCKER_UID=$(id -u) DOCKER_GID=$(id -g) make docker + + - name: run hive and parse output + run: | + sudo mkdir -p /results-${{ github.run_id }} + docker run --rm -v /var/run/docker.sock:/var/run/docker.sock -v ${{ github.workspace }}:/work thorax/hive:latest --sim ethereum/engine --results-root=/work/results-${{ github.run_id }} --client erigon_ci-$GITHUB_SHA --exit.fail=false + docker run --rm --pull always -v /var/run/docker.sock:/var/run/docker.sock -v ${{ github.workspace }}:/work --entrypoint /app/hivecioutput thorax/hive:latest --resultsdir=/work/results-${{ github.run_id }} --outdir=/work/results-${{ github.run_id }} --exclusionsfile=/work/hive/exclusions.json + + - name: archive hive results + uses: actions/upload-artifact@v3 + if: always() + with: + name: test-results + path: results-${{ github.run_id }}/*.xml + + - name: clean up containers + if: always() + run: | + ids=$(docker ps -a -q) + for id in $ids + do + echo "stopping/removing container: $id" + docker stop $id && docker rm $id + done + + hive-results: + needs: hive + name: Hive results + runs-on: self-hosted + + permissions: + checks: write + pull-requests: write + actions: read - - name: build erigon image - run: DOCKER_TAG=thorax/erigon:ci-$GITHUB_SHA DOCKER_UID=$(id -u) DOCKER_GID=$(id -g) make docker - - # check with root permissions, should be cached from previous build - - name: build erigon image (root permissions) - run: sudo DOCKER_TAG=thorax/erigon:ci-$GITHUB_SHA DOCKER_UID=$(id -u) DOCKER_GID=$(id -g) make docker - - - name: run hive - run: sudo mkdir /results && docker run --rm -v /var/run/docker.sock:/var/run/docker.sock -v ${{ github.workspace }}:/work gatewayfm/hive:latest --sim ethereum/engine --results-root=/work/results --client erigon_ci-$GITHUB_SHA --docker.output --loglevel 5 - - - name: parse hive output - run: docker run --rm -v /var/run/docker.sock:/var/run/docker.sock -v ${{ github.workspace }}:/work --entrypoint /app/hivecioutput gatewayfm/hive:latest --resultsdir=/work/results --outdir=/work/results - - - name: archive hive results - uses: actions/upload-artifact@v3 - if: always() - with: - name: hive-ci-output - path: results/*.xml - - event_file: - needs: - - tests - - tests-windows - - docker - name: archive event file - runs-on: ubuntu-latest - if: ${{ github.event_name == 'push' || !github.event.pull_request.draft }} steps: - - name: upload - uses: actions/upload-artifact@v2 - with: - name: event file - path: ${{ github.event_path }} + - name: parse hive results + uses: phoenix-actions/test-reporting@v8 + with: + artifact: test-results + name: Tests + path: '*.xml' + reporter: java-junit + + - name: set badge color + shell: bash + run: | + case ${{ fromJSON( steps.test-results.outputs.json ).conclusion }} in + success) + echo "BADGE_COLOR=31c653" >> $GITHUB_ENV + ;; + failure) + echo "BADGE_COLOR=800000" >> $GITHUB_ENV + ;; + neutral) + echo "BADGE_COLOR=696969" >> $GITHUB_ENV + ;; + esac + + - name: create badge + uses: emibcn/badge-action@d6f51ff11b5c3382b3b88689ae2d6db22d9737d1 + with: + label: Hive + status: '${{ fromJSON( steps.test-results.outputs.json ).formatted.stats.tests }} tests, ${{ fromJSON( steps.test-results.outputs.json ).formatted.stats.runs }} runs: ${{ fromJSON( steps.test-results.outputs.json ).conclusion }}' + color: ${{ env.BADGE_COLOR }} + path: badge.svg + + - name: upload badge to gist + if: > + github.event_name == 'workflow_run' && github.event.workflow_run.head_branch == 'devel' || + github.event_name != 'workflow_run' && github.ref == 'refs/heads/devel' + uses: andymckay/append-gist-action@1fbfbbce708a39bd45846f0955ed5521f2099c6d + with: + token: ${{ secrets.GIST_TOKEN }} + gistURL: https://gist.githubusercontent.com/revitteth/dc492845ba6eb694e6c7279224634b20 + file: badge.svg diff --git a/.github/workflows/coverage.yml b/.github/workflows/coverage.yml index 72deaea1d53..b022cf5af81 100644 --- a/.github/workflows/coverage.yml +++ b/.github/workflows/coverage.yml @@ -51,5 +51,5 @@ jobs: uses: andymckay/append-gist-action@1fbfbbce708a39bd45846f0955ed5521f2099c6d with: token: ${{ secrets.GIST_TOKEN }} - gistURL: https://gist.githubusercontent.com/revittm/ee38e9beb22353eef6b88f2ad6ed7aa9 + gistURL: https://gist.githubusercontent.com/revitteth/ee38e9beb22353eef6b88f2ad6ed7aa9 file: badge.svg \ No newline at end of file diff --git a/.github/workflows/hive-results.yml b/.github/workflows/hive-results.yml deleted file mode 100644 index d6191e38449..00000000000 --- a/.github/workflows/hive-results.yml +++ /dev/null @@ -1,75 +0,0 @@ -name: Hive results - -on: - workflow_run: - workflows: ["CI"] - types: - - completed - -jobs: - hive-results: - name: Hive results - runs-on: ubuntu-latest - if: github.event.workflow_run.conclusion != 'skipped' - - permissions: - checks: write - pull-requests: write - actions: read - - steps: - - name: download and extract artifacts - env: - GITHUB_TOKEN: ${{secrets.GITHUB_TOKEN}} - run: | - mkdir -p artifacts && cd artifacts - - artifacts_url=${{ github.event.workflow_run.artifacts_url }} - - gh api "$artifacts_url" -q '.artifacts[] | [.name, .archive_download_url] | @tsv' | while read artifact - do - IFS=$'\t' read name url <<< "$artifact" - gh api $url > "$name.zip" - unzip -d "$name" "$name.zip" - done - - - name: publish hive test results - uses: EnricoMi/publish-unit-test-result-action@v1 - with: - commit: ${{ github.event.workflow_run.head_sha }} - event_file: artifacts/Event File/event.json - event_name: ${{ github.event.workflow_run.event }} - files: "artifacts/**/*.xml" - - - name: set badge color - shell: bash - run: | - case ${{ fromJSON( steps.test-results.outputs.json ).conclusion }} in - success) - echo "BADGE_COLOR=31c653" >> $GITHUB_ENV - ;; - failure) - echo "BADGE_COLOR=800000" >> $GITHUB_ENV - ;; - neutral) - echo "BADGE_COLOR=696969" >> $GITHUB_ENV - ;; - esac - - - name: create badge - uses: emibcn/badge-action@d6f51ff11b5c3382b3b88689ae2d6db22d9737d1 - with: - label: Hive - status: '${{ fromJSON( steps.test-results.outputs.json ).formatted.stats.tests }} tests, ${{ fromJSON( steps.test-results.outputs.json ).formatted.stats.runs }} runs: ${{ fromJSON( steps.test-results.outputs.json ).conclusion }}' - color: ${{ env.BADGE_COLOR }} - path: badge.svg - - - name: upload badge to gist - if: > - github.event_name == 'workflow_run' && github.event.workflow_run.head_branch == 'devel' || - github.event_name != 'workflow_run' && github.ref == 'refs/heads/devel' - uses: andymckay/append-gist-action@1fbfbbce708a39bd45846f0955ed5521f2099c6d - with: - token: ${{ secrets.GIST_TOKEN }} - gistURL: https://gist.githubusercontent.com/revittm/dc492845ba6eb694e6c7279224634b20 - file: badge.svg \ No newline at end of file diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml index 29531622448..6bf56eae778 100644 --- a/.github/workflows/release.yml +++ b/.github/workflows/release.yml @@ -1,5 +1,10 @@ name: Release +# Uncomment the following to let goreleaser automatically +# create a GitHub release when a tag is pushed. +# permissions: +# contents: write + on: push: branches-ignore: diff --git a/.github/workflows/stale-issues.yml b/.github/workflows/stale-issues.yml new file mode 100644 index 00000000000..0fbcbeb932c --- /dev/null +++ b/.github/workflows/stale-issues.yml @@ -0,0 +1,23 @@ +name: 'Close stale issues and PRs' +on: + schedule: + - cron: '30 1 * * *' + +permissions: + issues: write + pull-requests: write + +jobs: + stale: + runs-on: ubuntu-latest + steps: + - uses: actions/stale@v6 + with: # core team are exempt + exempt-issue-assignees: 'AskAlexSharov,realLedgerwatch,AndreaLanfranchi,yperbasis,vorot93,b00ris,JekaMas,mandrigin,Giulio2002,tjayrush,revitteth,hexoscott' + exempt-pr-assignees: 'AskAlexSharov,realLedgerwatch,AndreaLanfranchi,yperbasis,vorot93,b00ris,JekaMas,mandrigin,Giulio2002,tjayrush,revitteth,hexoscott' + stale-issue-message: 'This issue is stale because it has been open for 40 days with no activity. Remove stale label or comment, or this will be closed in 7 days.' + stale-pr-message: 'This PR is stale because it has been open for 40 days with no activity.' + close-issue-message: 'This issue was closed because it has been stalled for 7 days with no activity.' + days-before-stale: 40 + days-before-close: 7 + days-before-pr-close: -1 # don't close PRs diff --git a/.gitignore b/.gitignore index cb35431d04d..e4209cdbec5 100644 --- a/.gitignore +++ b/.gitignore @@ -81,3 +81,4 @@ dist .env coverage.out +dist diff --git a/.gitmodules b/.gitmodules index ae94b08f852..32bdb3b6e5a 100644 --- a/.gitmodules +++ b/.gitmodules @@ -1,6 +1,3 @@ [submodule "tests"] path = tests/testdata url = https://github.com/ethereum/tests -[submodule "libmdbx"] - path = libmdbx - url = https://github.com/torquem-ch/libmdbx.git diff --git a/.golangci.yml b/.golangci.yml index 01253b4176f..75ace9670cd 100644 --- a/.golangci.yml +++ b/.golangci.yml @@ -5,19 +5,19 @@ linters: disable-all: true enable: - gofmt - - deadcode - errcheck -# - gosimple # 1.18 + - gosimple - govet - ineffassign -# - staticcheck # 1.18 + - staticcheck # - structcheck # 1.18 # - unused # 1.18 - - varcheck -# - gocritic -# - bodyclose # 1.18 -# - gosec + - gocritic + - bodyclose + - gosec # - forcetypeassert + - prealloc + - unconvert linters-settings: gocritic: diff --git a/Dockerfile b/Dockerfile index 54704d5c1c8..9030eba5e1c 100644 --- a/Dockerfile +++ b/Dockerfile @@ -1,5 +1,5 @@ # syntax = docker/dockerfile:1.2 -FROM docker.io/library/golang:1.18-alpine3.15 AS builder +FROM docker.io/library/golang:1.19-alpine3.16 AS builder RUN apk --no-cache add build-base linux-headers git bash ca-certificates libstdc++ @@ -11,9 +11,9 @@ RUN --mount=type=cache,target=/root/.cache \ --mount=type=cache,target=/go/pkg/mod \ make all db-tools -FROM docker.io/library/alpine:3.15 +FROM docker.io/library/alpine:3.16 -RUN apk add --no-cache ca-certificates libstdc++ tzdata +RUN apk add --no-cache ca-certificates curl libstdc++ jq tzdata # copy compiled artifacts from builder COPY --from=builder /app/build/bin/* /usr/local/bin/ diff --git a/Makefile b/Makefile index c2047b27e99..f439b79e993 100644 --- a/Makefile +++ b/Makefile @@ -56,7 +56,7 @@ validate_docker_build_args: ## docker: validate, update submodules and build with docker docker: validate_docker_build_args git-submodules DOCKER_BUILDKIT=1 $(DOCKER) build -t ${DOCKER_TAG} \ - --build-arg "BUILD_DATE=$(shell date -Iseconds)" \ + --build-arg "BUILD_DATE=$(shell date +"%Y-%m-%dT%H:%M:%S:%z")" \ --build-arg VCS_REF=${GIT_COMMIT} \ --build-arg VERSION=${GIT_TAG} \ --build-arg UID=${DOCKER_UID} \ @@ -98,19 +98,20 @@ geth: erigon erigon: go-version erigon.cmd @rm -f $(GOBIN)/tg # Remove old binary to prevent confusion where users still use it because of the scripts -COMMANDS += cons -COMMANDS += devnettest +COMMANDS += devnet COMMANDS += downloader COMMANDS += hack COMMANDS += integration COMMANDS += observer COMMANDS += pics COMMANDS += rpcdaemon -COMMANDS += rpcdaemon22 COMMANDS += rpctest COMMANDS += sentry COMMANDS += state COMMANDS += txpool +COMMANDS += verkle +COMMANDS += evm +COMMANDS += lightclient # build each command using %.cmd rule $(COMMANDS): %: %.cmd @@ -119,30 +120,29 @@ $(COMMANDS): %: %.cmd all: erigon $(COMMANDS) ## db-tools: build db tools -db-tools: git-submodules +db-tools: @echo "Building db-tools" - # hub.docker.com setup incorrect gitpath for git modules. Just remove it and re-init submodule. - rm -rf libmdbx - git submodule update --init --recursive --force libmdbx - - cd libmdbx && MDBX_BUILD_TIMESTAMP=unknown make tools - cp libmdbx/mdbx_chk $(GOBIN) - cp libmdbx/mdbx_copy $(GOBIN) - cp libmdbx/mdbx_dump $(GOBIN) - cp libmdbx/mdbx_drop $(GOBIN) - cp libmdbx/mdbx_load $(GOBIN) - cp libmdbx/mdbx_stat $(GOBIN) + go mod vendor + cd vendor/github.com/torquem-ch/mdbx-go && MDBX_BUILD_TIMESTAMP=unknown make tools + cd vendor/github.com/torquem-ch/mdbx-go/mdbxdist && cp mdbx_chk $(GOBIN) && cp mdbx_copy $(GOBIN) && cp mdbx_dump $(GOBIN) && cp mdbx_drop $(GOBIN) && cp mdbx_load $(GOBIN) && cp mdbx_stat $(GOBIN) + rm -rf vendor @echo "Run \"$(GOBIN)/mdbx_stat -h\" to get info about mdbx db file." ## test: run unit tests with a 50s timeout test: $(GOTEST) --timeout 50s +test3: + $(GOTEST) --timeout 50s -tags $(BUILD_TAGS),erigon3 + ## test-integration: run integration tests with a 30m timeout test-integration: $(GOTEST) --timeout 30m -tags $(BUILD_TAGS),integration +test3-integration: + $(GOTEST) --timeout 30m -tags $(BUILD_TAGS),integration,erigon3 + ## lint: run golangci-lint with .golangci.yml config file lint: @./build/bin/golangci-lint run --config ./.golangci.yml @@ -155,13 +155,12 @@ lintci: ## lintci-deps: (re)installs golangci-lint to build/bin/golangci-lint lintci-deps: rm -f ./build/bin/golangci-lint - curl -sSfL https://raw.githubusercontent.com/golangci/golangci-lint/master/install.sh | sh -s -- -b ./build/bin v1.47.2 + curl -sSfL https://raw.githubusercontent.com/golangci/golangci-lint/master/install.sh | sh -s -- -b ./build/bin v1.49.0 ## clean: cleans the go cache, build dir, libmdbx db dir clean: go clean -cache rm -fr build/* - cd libmdbx/ && make clean # The devtools target installs tools required for 'go generate'. # You need to put $GOBIN (or $GOPATH/bin) in your PATH to use 'go generate'. @@ -205,7 +204,7 @@ git-submodules: PACKAGE_NAME := github.com/maticnetwork/erigon -GOLANG_CROSS_VERSION ?= v1.18.1 +GOLANG_CROSS_VERSION ?= v1.18.5 .PHONY: release-dry-run release-dry-run: git-submodules @@ -268,10 +267,16 @@ user_macos: sudo -u $(ERIGON_USER) mkdir -p $(ERIGON_USER_XDG_DATA_HOME) ## coverage: run code coverage report and output total coverage % +.PHONY: coverage coverage: @go test -coverprofile=coverage.out ./... > /dev/null 2>&1 && go tool cover -func coverage.out | grep total | awk '{print substr($$3, 1, length($$3)-1)}' +## hive: run hive test suite locally using docker e.g. OUTPUT_DIR=~/results/hive SIM=ethereum/engine make hive +.PHONY: hive +hive: + DOCKER_TAG=thorax/erigon:ci-local make docker + docker run --rm -v /var/run/docker.sock:/var/run/docker.sock -v $(OUTPUT_DIR):/work thorax/hive:latest --sim $(SIM) --results-root=/work/results --client erigon_ci-local # run erigon + ## help: print commands help help : Makefile @sed -n 's/^##//p' $< - diff --git a/README.md b/README.md index 080c8bb6ce0..0eb1cd6a7ae 100644 --- a/README.md +++ b/README.md @@ -2,13 +2,13 @@ This repository is a fork of the Erigon repository (https://github.com/ledgerwatch/erigon). Our long term plan is to use and support upstream Erigon. We decided, in consultation with the Erigon team, to create a short term fork that we can test for stability and maintain. This is a short-to-medium-term mitigation strategy because Erigon is going through a period of rapid changes that will likely be breaking and not backward compatible. -Erigon is an implementation of Ethereum (aka "Ethereum client"), on the efficiency frontier, written in Go. +Erigon is an implementation of Ethereum (execution client), on the efficiency frontier, written in Go. ![Build status](https://github.com/ledgerwatch/erigon/actions/workflows/ci.yml/badge.svg) -![Coverage](https://gist.githubusercontent.com/revittm/ee38e9beb22353eef6b88f2ad6ed7aa9/raw/badge.svg) +![Coverage](https://gist.githubusercontent.com/revitteth/ee38e9beb22353eef6b88f2ad6ed7aa9/raw/badge.svg) -![Hive](https://gist.githubusercontent.com/revittm/dc492845ba6eb694e6c7279224634b20/raw/badge.svg) +![Hive](https://gist.githubusercontent.com/revitteth/dc492845ba6eb694e6c7279224634b20/raw/badge.svg) @@ -19,7 +19,7 @@ Erigon is an implementation of Ethereum (aka "Ethereum client"), on the efficien + [Mining](#mining) + [Windows](#windows) + [GoDoc](https://godoc.org/github.com/ledgerwatch/erigon) - + [Beacon Chain](#beacon-chain) + + [Beacon Chain](#beacon-chain-consensus-layer) + [Dev Chain](#dev-chain) - [Key features](#key-features) @@ -28,6 +28,7 @@ Erigon is an implementation of Ethereum (aka "Ethereum client"), on the efficien + [JSON-RPC daemon](#json-rpc-daemon) + [Run all components by docker-compose](#run-all-components-by-docker-compose) + [Grafana dashboard](#grafana-dashboard) +- [Documentation](#documentation) - [FAQ](#faq) - [Getting in touch](#getting-in-touch) + [Erigon Discord Server](#erigon-discord-server) @@ -44,15 +45,16 @@ NB! In-depth links are marked by the microscope sign (🔬) **Disclaimer: this software is currently a tech preview. We will do our best to keep it stable and make no breaking changes but we don't guarantee anything. Things can and will break.** -🔬 Alpha/Beta versions difference: [here](https://erigon.substack.com/p/erigon-2-three-upgrades?s=r) +🔬 Alpha/Beta Designation has been discontinued. For release version numbering, please see [this blog post](https://erigon.substack.com/p/post-merge-release-of-erigon-dropping) System Requirements =================== * For an Archive node of Ethereum Mainnet we recommend >=3TB storage space: 1.8TB state (as of March 2022), -200GB temp files (can symlink or mount folder `/etl-tmp` to another disk). Ethereum Mainnet Full node (see `--prune*` flags): 400Gb (April 2022). + 200GB temp files (can symlink or mount folder `/etl-tmp` to another disk). Ethereum Mainnet Full node ( + see `--prune*` flags): 400Gb (April 2022). -* Goerli Full node (see `--prune*` flags): 189GB on Beta, 114GB on Alpha (April 2022). +* Goerli Full node (see `--prune*` flags): 189GB on Beta, 114GB on Alpha (April 2022).. * BSC Archive: 7TB. BSC Full: 1TB. @@ -63,21 +65,37 @@ Bear in mind that SSD performance deteriorates when close to capacity. RAM: >=16GB, 64-bit architecture, [Golang version >= 1.18](https://golang.org/doc/install), GCC 10+ -🔬 more details on disk storage [here](https://erigon.substack.com/p/disk-footprint-changes-in-new-erigon?s=r) and [here](https://ledgerwatch.github.io/turbo_geth_release.html#Disk-space). +🔬 more details on disk storage [here](https://erigon.substack.com/p/disk-footprint-changes-in-new-erigon?s=r) +and [here](https://ledgerwatch.github.io/turbo_geth_release.html#Disk-space). Usage ===== ### Getting Started +For building the latest stable release (this will be suitable for most users just wanting to run a node): + ```sh -git clone --recurse-submodules -j8 https://github.com/ledgerwatch/erigon.git +git clone --branch stable --single-branch https://github.com/ledgerwatch/erigon.git +cd erigon +make erigon +./build/bin/erigon +``` + +You can check [the list of releases](https://github.com/ledgerwatch/erigon/releases) for release notes. + +For building the bleeding edge development branch: + +```sh +git clone --recurse-submodules https://github.com/ledgerwatch/erigon.git cd erigon +git checkout devel make erigon ./build/bin/erigon ``` -Default `--snapshots` for `mainnet`, `goerli`, `bsc`. Other networks now have default `--snapshots=false`. Increase download speed by flag `--torrent.download.rate=20mb`. 🔬 See [Downloader docs](./cmd/downloader/readme.md) +Default `--snapshots` for `mainnet`, `goerli`, `bsc`. Other networks now have default `--snapshots=false`. Increase +download speed by flag `--torrent.download.rate=20mb`. 🔬 See [Downloader docs](./cmd/downloader/readme.md) Use `--datadir` to choose where to store data. @@ -97,7 +115,8 @@ How to start Erigon's services as separated processes, see in [docker-compose.ym There is an optional stage that can be enabled through flags: -* `--watch-the-burn`, Enable WatchTheBurn stage which keeps track of ETH issuance and is required to use `erigon_watchTheBurn`. +* `--watch-the-burn`, Enable WatchTheBurn stage which keeps track of ETH issuance and is required to + use `erigon_watchTheBurn`. ### Testnets @@ -151,13 +170,15 @@ Windows users may run erigon in 3 possible ways: following point) * If you need to build MDBX tools (i.e. `.\wmake.ps1 db-tools`) then [Chocolatey package manager](https://chocolatey.org/) for Windows must be installed. By Chocolatey you need - to install the following components : `cmake`, `make`, `mingw` by `choco install cmake make mingw`. + to install the following components : `cmake`, `make`, `mingw` by `choco install cmake make mingw`. Make sure + Windows System "Path" variable has: + C:\ProgramData\chocolatey\lib\mingw\tools\install\mingw64\bin **Important note about Anti-Viruses** During MinGW's compiler detection phase some temporary executables are generated to test compiler capabilities. It's been reported some anti-virus programs detect those files as possibly infected by `Win64/Kryptic.CIS` trojan horse (or a variant of it). Although those are false positives we have no control over 100+ vendors of security products for - Windows and their respective detection algorythms and we understand this might make your experience with Windows + Windows and their respective detection algorithms and we understand this might make your experience with Windows builds uncomfortable. To workaround the issue you might either set exclusions for your antivirus specifically for `build\bin\mdbx\CMakeFiles` sub-folder of the cloned repo or you can run erigon using the following other two options @@ -175,46 +196,88 @@ Windows users may run erigon in 3 possible ways: **Please also note the default WSL2 environment has its own IP address which does not match the one of the network interface of Windows host: take this into account when configuring NAT for port 30303 on your router.** -### Beacon Chain +### Using TOML or YAML Config Files -Erigon can be used as an execution-layer for beacon chain consensus clients (Eth2). Default configuration is ok. Eth2 -relies on availability of receipts - don't prune them: don't add character `r` to `--prune` flag. However, old receipts - are not needed for Eth2 and you can safely prune them with `--prune.r.before=11184524` in combination with `--prune htc`. +You can set Erigon flags through a YAML or TOML configuration file with the flag `--config`. The flags set in the +configuration +file can be overwritten by writing the flags directly on Erigon command line -You must enable JSON-RPC by `--http` and add `engine` to `--http.api` list. (Or run the [JSON-RPC daemon](#json-rpc-daemon) in addition to the Erigon) +### Example -If beacon chain client on a different device: add `--http.addr 0.0.0.0` (JSON-RPC listen on localhost by default) -. +`./build/bin/erigon --config ./config.yaml --chain=goerli -Once the JSON-RPC is running, all you need to do is point your beacon chain client to `:8545`, -where `` is either localhost or the IP address of the device running the JSON-RPC. +Assuming we have `chain : "mainnet" in our configuration file, by adding `--chain=goerli` allows the overwrite of the +flag inside +of the yaml configuration file and sets the chain to goerli -Erigon has been tested with Lighthouse however all other clients that support JSON-RPC should also work. +### TOML -### Authentication API +Example of setting up TOML config file + +``` +`datadir = 'your datadir' +port = 1111 +chain = "mainnet" +http = true +"private.api.addr"="localhost:9090" + +"http.api" = ["eth","debug","net"] +``` + +### YAML + +Example of setting up a YAML config file + +``` +datadir : 'your datadir' +port : 1111 +chain : "mainnet" +http : true +private.api.addr : "localhost:9090" + +http.api : ["eth","debug","net"] +``` -In order to establish a secure connection between the Consensus Layer and the Execution Layer, a JWT secret key is automatically generated. +### Beacon Chain (Consensus Layer) -The JWT secret key will be present in the datadir by default under the name of `jwt.hex` and its path can be specified with the flag `--authrpc.jwtsecret`. +Erigon can be used as an Execution Layer (EL) for Consensus Layer clients (CL). Default configuration is OK. -This piece of info needs to be specified in the Consensus Layer as well in order to establish connection successfully. More information can be found [here](https://github.com/ethereum/execution-apis/blob/main/src/engine/authentication.md) +If your CL client is on a different device, add `--authrpc.addr 0.0.0.0` ([Engine API] listens on localhost by default) +as well as `--authrpc.vhosts `. + +[Engine API]: https://github.com/ethereum/execution-apis/blob/main/src/engine/specification.md + +In order to establish a secure connection between the Consensus Layer and the Execution Layer, a JWT secret key is +automatically generated. + +The JWT secret key will be present in the datadir by default under the name of `jwt.hex` and its path can be specified +with the flag `--authrpc.jwtsecret`. + +This piece of info needs to be specified in the Consensus Layer as well in order to establish connection successfully. +More information can be found [here](https://github.com/ethereum/execution-apis/blob/main/src/engine/authentication.md). + +Once Erigon is running, you need to point your CL client to `:8551`, +where `` is either `localhost` or the IP address of the device running Erigon, and also point to the JWT +secret path created by Erigon. ### Multiple Instances / One Machine -Define 5 flags to avoid conflicts: `--datadir --port --http.port --torrent.port --private.api.addr`. Example of multiple chains on the same machine: +Define 6 flags to avoid conflicts: `--datadir --port --http.port --authrpc.port --torrent.port --private.api.addr`. +Example of multiple chains on the same machine: ``` # mainnet -./build/bin/erigon --datadir="" --chain=mainnet --port=30303 --http.port=8545 --torrent.port=42069 --private.api.addr=127.0.0.1:9090 --http --ws --http.api=eth,debug,net,trace,web3,erigon +./build/bin/erigon --datadir="" --chain=mainnet --port=30303 --http.port=8545 --authrpc.port=8551 --torrent.port=42069 --private.api.addr=127.0.0.1:9090 --http --ws --http.api=eth,debug,net,trace,web3,erigon # rinkeby -./build/bin/erigon --datadir="" --chain=rinkeby --port=30304 --http.port=8546 --torrent.port=42068 --private.api.addr=127.0.0.1:9091 --http --ws --http.api=eth,debug,net,trace,web3,erigon +./build/bin/erigon --datadir="" --chain=rinkeby --port=30304 --http.port=8546 --authrpc.port=8552 --torrent.port=42068 --private.api.addr=127.0.0.1:9091 --http --ws --http.api=eth,debug,net,trace,web3,erigon ``` Quote your path if it has spaces. ### Dev Chain + 🔬 Detailed explanation is [DEV_CHAIN](/DEV_CHAIN.md). Key features @@ -269,11 +332,12 @@ Examples of stages are: ### JSON-RPC daemon -Most of Erigon's components (sentry, txpool, snapshots downloader, can work inside Erigon and as independent process. +Most of Erigon's components (sentry, txpool, snapshots downloader, can work inside Erigon and as independent process. To enable built-in RPC server: `--http` and `--ws` (sharing same port with http) -Run RPCDaemon as separated process: this daemon can use local DB (with running Erigon or on snapshot of a database) or remote DB (run on another server). 🔬 See [RPC-Daemon docs](./cmd/rpcdaemon/README.md) +Run RPCDaemon as separated process: this daemon can use local DB (with running Erigon or on snapshot of a database) or +remote DB (run on another server). 🔬 See [RPC-Daemon docs](./cmd/rpcdaemon/README.md) #### **For remote DB** @@ -298,12 +362,16 @@ For a details on the implementation status of each command, [see this table](./cmd/rpcdaemon/README.md#rpc-implementation-status). ### Run all components by docker-compose -Docker allows for building and running Erigon via containers. This alleviates the need for installing build dependencies onto the host OS. + +Docker allows for building and running Erigon via containers. This alleviates the need for installing build dependencies +onto the host OS. #### Optional: Setup dedicated user + User UID/GID need to be synchronized between the host OS and container so files are written with correct permission. You may wish to setup a dedicated user/group on the host OS, in which case the following `make` targets are available. + ```sh # create "erigon" user make user_linux @@ -312,21 +380,28 @@ make user_macos ``` #### Environment Variables + There is a `.env.example` file in the root of the repo. + * `DOCKER_UID` - The UID of the docker user * `DOCKER_GID` - The GID of the docker user * `XDG_DATA_HOME` - The data directory which will be mounted to the docker containers If not specified, the UID/GID will use the current user. -A good choice for `XDG_DATA_HOME` is to use the `~erigon/.ethereum` directory created by helper targets `make user_linux` or `make user_macos`. +A good choice for `XDG_DATA_HOME` is to use the `~erigon/.ethereum` directory created by helper +targets `make user_linux` or `make user_macos`. #### Check: Permissions -In all cases, `XDG_DATA_HOME` (specified or default) must be writeable by the user UID/GID in docker, which will be determined by the `DOCKER_UID` and `DOCKER_GID` at build time. -If a build or service startup is failing due to permissions, check that all the directories, UID, and GID controlled by these environment variables are correct. +In all cases, `XDG_DATA_HOME` (specified or default) must be writeable by the user UID/GID in docker, which will be +determined by the `DOCKER_UID` and `DOCKER_GID` at build time. + +If a build or service startup is failing due to permissions, check that all the directories, UID, and GID controlled by +these environment variables are correct. #### Run + Next command starts: Erigon on port 30303, rpcdaemon on port 8545, prometheus on port 9090, and grafana on port 3000. ```sh @@ -351,7 +426,7 @@ make docker-compose # if you followed above instructions # # Note: uid/gid syntax below will automatically use uid/gid of running user so this syntax -# is intended to be ran via the dedicated user setup earlier +# is intended to be run via the dedicated user setup earlier # DOCKER_UID=$(id -u) DOCKER_GID=$(id -g) XDG_DATA_HOME=/preferred/data/folder DOCKER_BUILDKIT=1 COMPOSE_DOCKER_CLI_BUILD=1 make docker-compose @@ -384,6 +459,16 @@ Windows support for docker-compose is not ready yet. Please help us with .ps1 po Disabled by default. To enable see `./build/bin/erigon --help` for flags `--prune` +Documentation +============== + +The `./docs` directory includes a lot of useful but outdated documentation. For code located +in the `./cmd` directory, their respective documentation can be found in `./cmd/*/README.md`. +A more recent collation of developments and happenings in Erigon can be found in the +[Erigon Blog](https://erigon.substack.com/). + + + FAQ ================ @@ -399,42 +484,45 @@ Detailed explanation: [./docs/programmers_guide/db_faq.md](./docs/programmers_gu #### `erigon` ports -| Port | Protocol | Purpose | Expose | +| Port | Protocol | Purpose | Expose | |:-----:|:---------:|:----------------------:|:-------:| -| 30303 | TCP & UDP | eth/66 or 67 peering | Public | -| 9090 | TCP | gRPC Connections | Private | -| 42069 | TCP & UDP | Snap sync (Bittorrent) | Public | -| 6060 | TCP | Metrics or Pprof | Private | +| 30303 | TCP & UDP | eth/66 or 67 peering | Public | +| 9090 | TCP | gRPC Connections | Private | +| 42069 | TCP & UDP | Snap sync (Bittorrent) | Public | +| 6060 | TCP | Metrics or Pprof | Private | +| 8551 | TCP | Engine API (JWT auth) | Private | Typically, 30303 is exposed to the internet to allow incoming peering connections. 9090 is exposed only internally for rpcdaemon or other connections, (e.g. rpcdaemon -> erigon). +Port 8551 (JWT authenticated) is exposed only internally for [Engine API] JSON-RPC queries from the Consensus Layer +node. #### `RPC` ports -| Port | Protocol | Purpose | Expose | -|:-----:|:---------:|:------------------:|:-------:| -| 8545 | TCP | HTTP & WebSockets | Private | -| 8551 | TCP | HTTP with JWT auth | Private | +| Port | Protocol | Purpose | Expose | +|:----:|:--------:|:-----------------:|:-------:| +| 8545 | TCP | HTTP & WebSockets | Private | -Typically, 8545 is exposed only internally for JSON-RPC queries. Both HTTP and WebSocket connections are on the same port. -Typically, 8551 (JWT authenticated) is exposed only internally for the Engine API JSON-RPC queries. +Typically, 8545 is exposed only internally for JSON-RPC queries. Both HTTP and WebSocket connections are on the same +port. #### `sentry` ports -| Port | Protocol | Purpose | Expose | +| Port | Protocol | Purpose | Expose | |:-----:|:---------:|:----------------:|:-------:| -| 30303 | TCP & UDP | Peering | Public | -| 9091 | TCP | gRPC Connections | Private | +| 30303 | TCP & UDP | Peering | Public | +| 9091 | TCP | gRPC Connections | Private | -Typically, a sentry process will run one eth/xx protocol (e.g. eth/66) and will be exposed to the internet on 30303. Port +Typically, a sentry process will run one eth/xx protocol (e.g. eth/66) and will be exposed to the internet on 30303. +Port 9091 is for internal gRCP connections (e.g erigon -> sentry). #### Other ports -| Port | Protocol | Purpose | Expose | +| Port | Protocol | Purpose | Expose | |:----:|:--------:|:-------:|:-------:| -| 6060 | TCP | pprof | Private | -| 6060 | TCP | metrics | Private | +| 6060 | TCP | pprof | Private | +| 6060 | TCP | metrics | Private | Optional flags can be enabled that enable pprof or metrics (or both) - however, they both run on 6060 by default, so you'll have to change one if you want to run both at the same time. use `--help` with the binary for more info. @@ -450,13 +538,16 @@ Reserved for future use: **gRPC ports**: `9092` consensus engine, `9093` snapsho run `go tool pprof -inuse_space -png http://127.0.0.1:6060/debug/pprof/heap > mem.png` ### How to run local devnet? + 🔬 Detailed explanation is [here](/DEV_CHAIN.md). ### Docker permissions error Docker uses user erigon with UID/GID 1000 (for security reasons). You can see this user being created in the Dockerfile. -Can fix by giving a host's user ownership of the folder, where the host's user UID/GID is the same as the docker's user UID/GID (1000). -More details in [post](https://www.fullstaq.com/knowledge-hub/blogs/docker-and-the-host-filesystem-owner-matching-problem) +Can fix by giving a host's user ownership of the folder, where the host's user UID/GID is the same as the docker's user +UID/GID (1000). +More details +in [post](https://www.fullstaq.com/knowledge-hub/blogs/docker-and-the-host-filesystem-owner-matching-problem) ### Run RaspberyPI @@ -476,7 +567,7 @@ Send an email to `security [at] torquem.ch`. ### Team -Core contributors (in alpabetical order of first names): +Core contributors (in alphabetical order of first names): * Alex Sharov ([AskAlexSharov](https://twitter.com/AskAlexSharov)) @@ -504,7 +595,7 @@ Thanks to: * All contributors of Go-Ethereum -* Our special respect and graditude is to the core team of [Go-Ethereum](https://github.com/ethereum/go-ethereum). Keep +* Our special respect and gratitude is to the core team of [Go-Ethereum](https://github.com/ethereum/go-ethereum). Keep up the great job! Happy testing! 🥤 @@ -522,7 +613,7 @@ Application `htop` on column `res` shows memory of "App + OS used to hold page cache for given App", but it's not informative, because if `htop` says that app using 90% of memory you still can run 3 more instances of app on the same machine - because most of that `90%` is "OS pages cache". -OS automatically free this cache any time it needs memory. Smaller "page cache size" may not impact performance of +OS automatically frees this cache any time it needs memory. Smaller "page cache size" may not impact performance of Erigon at all. Next tools show correct memory usage of Erigon: @@ -539,8 +630,10 @@ memory. **Warning:** Multiple instances of Erigon on same machine will touch Disk concurrently, it impacts performance - one of main Erigon optimisations: "reduce Disk random access". -"Blocks Execution stage" still does much random reads - this is reason why it's slowest stage. We do not recommend run -multiple genesis syncs on same Disk. If genesis sync passed, then it's fine to run multiple Erigon on same Disk. +"Blocks Execution stage" still does many random reads - this is reason why it's slowest stage. We do not recommend +running +multiple genesis syncs on same Disk. If genesis sync passed, then it's fine to run multiple Erigon instances on same +Disk. ### Blocks Execution is slow on cloud-network-drives @@ -559,6 +652,7 @@ For example: btrfs's autodefrag option - may increase write IO 100x times ### the --mount option requires BuildKit error For anyone else that was getting the BuildKit error when trying to start Erigon the old way you can use the below... + ``` XDG_DATA_HOME=/preferred/data/folder DOCKER_BUILDKIT=1 COMPOSE_DOCKER_CLI_BUILD=1 make docker-compose ``` diff --git a/accounts/abi/abi_test.go b/accounts/abi/abi_test.go index ce1f35c6517..ca2ff1656cd 100644 --- a/accounts/abi/abi_test.go +++ b/accounts/abi/abi_test.go @@ -13,6 +13,7 @@ // // You should have received a copy of the GNU Lesser General Public License // along with the go-ethereum library. If not, see . +// //nolint:scopelint package abi @@ -165,8 +166,9 @@ func TestInvalidABI(t *testing.T) { // TestConstructor tests a constructor function. // The test is based on the following contract: -// contract TestConstructor { -// constructor(uint256 a, uint256 b) public{} +// +// contract TestConstructor { +// constructor(uint256 a, uint256 b) public{} // } func TestConstructor(t *testing.T) { json := `[{ "inputs": [{"internalType": "uint256","name": "a","type": "uint256" },{ "internalType": "uint256","name": "b","type": "uint256"}],"stateMutability": "nonpayable","type": "constructor"}]` @@ -710,16 +712,19 @@ func TestBareEvents(t *testing.T) { } // TestUnpackEvent is based on this contract: -// contract T { -// event received(address sender, uint amount, bytes memo); -// event receivedAddr(address sender); -// function receive(bytes memo) external payable { -// received(msg.sender, msg.value, memo); -// receivedAddr(msg.sender); -// } -// } +// +// contract T { +// event received(address sender, uint amount, bytes memo); +// event receivedAddr(address sender); +// function receive(bytes memo) external payable { +// received(msg.sender, msg.value, memo); +// receivedAddr(msg.sender); +// } +// } +// // When receive("X") is called with sender 0x00... and value 1, it produces this tx receipt: -// receipt{status=1 cgas=23949 bloom=00000000004000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000800000000000000000000000000000000000040200000000000000000000000000000000001000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000080000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000 logs=[log: b6818c8064f645cd82d99b59a1a267d6d61117ef [75fd880d39c1daf53b6547ab6cb59451fc6452d27caa90e5b6649dd8293b9eed] 000000000000000000000000376c47978271565f56deb45495afa69e59c16ab200000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000060000000000000000000000000000000000000000000000000000000000000000158 9ae378b6d4409eada347a5dc0c180f186cb62dc68fcc0f043425eb917335aa28 0 95d429d309bb9d753954195fe2d69bd140b4ae731b9b5b605c34323de162cf00 0]} +// +// receipt{status=1 cgas=23949 bloom=00000000004000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000800000000000000000000000000000000000040200000000000000000000000000000000001000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000080000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000 logs=[log: b6818c8064f645cd82d99b59a1a267d6d61117ef [75fd880d39c1daf53b6547ab6cb59451fc6452d27caa90e5b6649dd8293b9eed] 000000000000000000000000376c47978271565f56deb45495afa69e59c16ab200000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000060000000000000000000000000000000000000000000000000000000000000000158 9ae378b6d4409eada347a5dc0c180f186cb62dc68fcc0f043425eb917335aa28 0 95d429d309bb9d753954195fe2d69bd140b4ae731b9b5b605c34323de162cf00 0]} func TestUnpackEvent(t *testing.T) { const abiJSON = `[{"constant":false,"inputs":[{"name":"memo","type":"bytes"}],"name":"receive","outputs":[],"payable":true,"stateMutability":"payable","type":"function"},{"anonymous":false,"inputs":[{"indexed":false,"name":"sender","type":"address"},{"indexed":false,"name":"amount","type":"uint256"},{"indexed":false,"name":"memo","type":"bytes"}],"name":"received","type":"event"},{"anonymous":false,"inputs":[{"indexed":false,"name":"sender","type":"address"}],"name":"receivedAddr","type":"event"}]` abi, err := JSON(strings.NewReader(abiJSON)) @@ -1064,8 +1069,9 @@ func TestDoubleDuplicateMethodNames(t *testing.T) { // TestDoubleDuplicateEventNames checks that if send0 already exists, there won't be a name // conflict and that the second send event will be renamed send1. // The test runs the abi of the following contract. -// contract DuplicateEvent { -// event send(uint256 a); +// +// contract DuplicateEvent { +// event send(uint256 a); // event send0(); // event send(); // } @@ -1092,7 +1098,8 @@ func TestDoubleDuplicateEventNames(t *testing.T) { // TestUnnamedEventParam checks that an event with unnamed parameters is // correctly handled. // The test runs the abi of the following contract. -// contract TestEvent { +// +// contract TestEvent { // event send(uint256, uint256); // } func TestUnnamedEventParam(t *testing.T) { diff --git a/accounts/abi/bind/backends/simulated.go b/accounts/abi/bind/backends/simulated.go index 83a0c05330a..cd722523f47 100644 --- a/accounts/abi/bind/backends/simulated.go +++ b/accounts/abi/bind/backends/simulated.go @@ -28,6 +28,7 @@ import ( "github.com/holiman/uint256" ethereum "github.com/ledgerwatch/erigon" "github.com/ledgerwatch/erigon-lib/kv" + state2 "github.com/ledgerwatch/erigon-lib/state" "github.com/ledgerwatch/erigon/accounts/abi" "github.com/ledgerwatch/erigon/accounts/abi/bind" "github.com/ledgerwatch/erigon/common" @@ -36,13 +37,10 @@ import ( "github.com/ledgerwatch/erigon/common/u256" "github.com/ledgerwatch/erigon/consensus/ethash" "github.com/ledgerwatch/erigon/core" - "github.com/ledgerwatch/erigon/core/bloombits" "github.com/ledgerwatch/erigon/core/rawdb" "github.com/ledgerwatch/erigon/core/state" "github.com/ledgerwatch/erigon/core/types" "github.com/ledgerwatch/erigon/core/vm" - "github.com/ledgerwatch/erigon/eth/filters" - "github.com/ledgerwatch/erigon/ethdb" "github.com/ledgerwatch/erigon/ethdb/olddb" "github.com/ledgerwatch/erigon/event" "github.com/ledgerwatch/erigon/params" @@ -66,9 +64,8 @@ var ( // ChainReader, ChainStateReader, ContractBackend, ContractCaller, ContractFilterer, ContractTransactor, // DeployBackend, GasEstimator, GasPricer, LogFilterer, PendingContractCaller, TransactionReader, and TransactionSender type SimulatedBackend struct { - m *stages.MockSentry - getHeader func(hash common.Hash, number uint64) *types.Header - contractHasTEVM func(common.Hash) (bool, error) + m *stages.MockSentry + getHeader func(hash common.Hash, number uint64) *types.Header mu sync.Mutex prependBlock *types.Block @@ -79,8 +76,6 @@ type SimulatedBackend struct { pendingReader *state.PlainStateReader pendingState *state.IntraBlockState // Currently pending state that will be the active on request - events *filters.EventSystem // Event system for filtering log events live - rmLogsFeed event.Feed chainFeed event.Feed logsFeed event.Feed @@ -105,8 +100,6 @@ func NewSimulatedBackendWithConfig(alloc core.GenesisAlloc, config *params.Chain return h }, } - backend.contractHasTEVM = ethdb.GetHasTEVM(olddb.NewObjectDatabase(m.DB)) - backend.events = filters.NewEventSystem(&filterBackend{m.DB, backend}) backend.emptyPendingBlock() return backend } @@ -120,9 +113,9 @@ func NewSimulatedBackend(t *testing.T, alloc core.GenesisAlloc, gasLimit uint64) return b } -func (b *SimulatedBackend) DB() kv.RwDB { - return b.m.DB -} +func (b *SimulatedBackend) DB() kv.RwDB { return b.m.DB } +func (b *SimulatedBackend) Agg() *state2.Aggregator22 { return b.m.HistoryV3Components() } +func (b *SimulatedBackend) HistoryV3() bool { return b.m.HistoryV3 } // Close terminates the underlying blockchain's update loop. func (b *SimulatedBackend) Close() { @@ -667,7 +660,7 @@ func (b *SimulatedBackend) callContract(_ context.Context, call ethereum.CallMsg txContext := core.NewEVMTxContext(msg) header := block.Header() - evmContext := core.NewEVMBlockContext(header, core.GetHashFn(header, b.getHeader), b.m.Engine, nil, b.contractHasTEVM) + evmContext := core.NewEVMBlockContext(header, core.GetHashFn(header, b.getHeader), b.m.Engine, nil) // Create a new environment which holds all relevant information // about the transaction and calling mechanisms. vmEnv := vm.NewEVM(evmContext, txContext, statedb, b.m.ChainConfig, vm.Config{}) @@ -700,7 +693,7 @@ func (b *SimulatedBackend) SendTransaction(ctx context.Context, tx types.Transac &b.pendingHeader.Coinbase, b.gasPool, b.pendingState, state.NewNoopWriter(), b.pendingHeader, tx, - &b.pendingHeader.GasUsed, vm.Config{}, b.contractHasTEVM); err != nil { + &b.pendingHeader.GasUsed, vm.Config{}); err != nil { return err } //fmt.Printf("==== Start producing block %d\n", (b.prependBlock.NumberU64() + 1)) @@ -725,94 +718,18 @@ func (b *SimulatedBackend) SendTransaction(ctx context.Context, tx types.Transac // // TODO(karalabe): Deprecate when the subscription one can return past data too. func (b *SimulatedBackend) FilterLogs(ctx context.Context, query ethereum.FilterQuery) ([]types.Log, error) { - var filter *filters.Filter - if query.BlockHash != nil { - // Block filter requested, construct a single-shot filter - filter = filters.NewBlockFilter(&filterBackend{b.m.DB, b}, *query.BlockHash, query.Addresses, query.Topics) - } else { - // Initialize unset filter boundaries to run from genesis to chain head - from := int64(0) - if query.FromBlock != nil { - from = query.FromBlock.Int64() - } - to := int64(-1) - if query.ToBlock != nil { - to = query.ToBlock.Int64() - } - // Construct the range filter - filter = filters.NewRangeFilter(&filterBackend{b.m.DB, b}, from, to, query.Addresses, query.Topics) - } - // Run the filter and return all the logs - logs, err := filter.Logs(ctx) - if err != nil { - return nil, err - } - res := make([]types.Log, len(logs)) - for i, nLog := range logs { - res[i] = *nLog - } - return res, nil + return nil, nil } // SubscribeFilterLogs creates a background log filtering operation, returning a // subscription immediately, which can be used to stream the found events. func (b *SimulatedBackend) SubscribeFilterLogs(ctx context.Context, query ethereum.FilterQuery, ch chan<- types.Log) (ethereum.Subscription, error) { - // Subscribe to contract events - sink := make(chan []*types.Log) - - sub, err := b.events.SubscribeLogs(query, sink) - if err != nil { - return nil, err - } - // Since we're getting logs in batches, we need to flatten them into a plain stream - return event.NewSubscription(func(quit <-chan struct{}) error { - defer sub.Unsubscribe() - for { - select { - case logs := <-sink: - for _, nlog := range logs { - select { - case ch <- *nlog: - case err := <-sub.Err(): - return err - case <-quit: - return nil - } - } - case err := <-sub.Err(): - return err - case <-quit: - return nil - } - } - }), nil + return nil, nil } // SubscribeNewHead returns an event subscription for a new header. func (b *SimulatedBackend) SubscribeNewHead(ctx context.Context, ch chan<- *types.Header) (ethereum.Subscription, error) { - // subscribe to a new head - sink := make(chan *types.Header) - sub := b.events.SubscribeNewHeads(sink) - - return event.NewSubscription(func(quit <-chan struct{}) error { - defer sub.Unsubscribe() - for { - select { - case head := <-sink: - select { - case ch <- head: - case err := <-sub.Err(): - return err - case <-quit: - return nil - } - case err := <-sub.Err(): - return err - case <-quit: - return nil - } - } - }), nil + return nil, nil } // AdjustTime adds a time shift to the simulated clock. @@ -947,10 +864,6 @@ func (fb *filterBackend) SubscribePendingLogsEvent(ch chan<- []*types.Log) event func (fb *filterBackend) BloomStatus() (uint64, uint64) { return 4096, 0 } -func (fb *filterBackend) ServiceFilter(ctx context.Context, ms *bloombits.MatcherSession) { - panic("not supported") -} - func nullSubscription() event.Subscription { return event.NewSubscription(func(quit <-chan struct{}) error { <-quit diff --git a/accounts/abi/bind/backends/simulated_test.go b/accounts/abi/bind/backends/simulated_test.go index 3e813c91168..643b92aa71d 100644 --- a/accounts/abi/bind/backends/simulated_test.go +++ b/accounts/abi/bind/backends/simulated_test.go @@ -94,17 +94,17 @@ func TestSimulatedBackend(t *testing.T) { var testKey, _ = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291") -// the following is based on this contract: -// contract T { -// event received(address sender, uint amount, bytes memo); -// event receivedAddr(address sender); +// the following is based on this contract: +// contract T { +// event received(address sender, uint amount, bytes memo); +// event receivedAddr(address sender); // -// function receive(bytes calldata memo) external payable returns (string memory res) { -// emit received(msg.sender, msg.value, memo); -// emit receivedAddr(msg.sender); -// return "hello world"; -// } -// } +// function receive(bytes calldata memo) external payable returns (string memory res) { +// emit received(msg.sender, msg.value, memo); +// emit receivedAddr(msg.sender); +// return "hello world"; +// } +// } const abiJSON = `[ { "constant": false, "inputs": [ { "name": "memo", "type": "bytes" } ], "name": "receive", "outputs": [ { "name": "res", "type": "string" } ], "payable": true, "stateMutability": "payable", "type": "function" }, { "anonymous": false, "inputs": [ { "indexed": false, "name": "sender", "type": "address" }, { "indexed": false, "name": "amount", "type": "uint256" }, { "indexed": false, "name": "memo", "type": "bytes" } ], "name": "received", "type": "event" }, { "anonymous": false, "inputs": [ { "indexed": false, "name": "sender", "type": "address" } ], "name": "receivedAddr", "type": "event" } ]` const abiBin = `0x608060405234801561001057600080fd5b506102a0806100206000396000f3fe60806040526004361061003b576000357c010000000000000000000000000000000000000000000000000000000090048063a69b6ed014610040575b600080fd5b6100b76004803603602081101561005657600080fd5b810190808035906020019064010000000081111561007357600080fd5b82018360208201111561008557600080fd5b803590602001918460018302840111640100000000831117156100a757600080fd5b9091929391929390505050610132565b6040518080602001828103825283818151815260200191508051906020019080838360005b838110156100f75780820151818401526020810190506100dc565b50505050905090810190601f1680156101245780820380516001836020036101000a031916815260200191505b509250505060405180910390f35b60607f75fd880d39c1daf53b6547ab6cb59451fc6452d27caa90e5b6649dd8293b9eed33348585604051808573ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff168152602001848152602001806020018281038252848482818152602001925080828437600081840152601f19601f8201169050808301925050509550505050505060405180910390a17f46923992397eac56cf13058aced2a1871933622717e27b24eabc13bf9dd329c833604051808273ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16815260200191505060405180910390a16040805190810160405280600b81526020017f68656c6c6f20776f726c6400000000000000000000000000000000000000000081525090509291505056fea165627a7a72305820ff0c57dad254cfeda48c9cfb47f1353a558bccb4d1bc31da1dae69315772d29e0029` const deployedCode = `60806040526004361061003b576000357c010000000000000000000000000000000000000000000000000000000090048063a69b6ed014610040575b600080fd5b6100b76004803603602081101561005657600080fd5b810190808035906020019064010000000081111561007357600080fd5b82018360208201111561008557600080fd5b803590602001918460018302840111640100000000831117156100a757600080fd5b9091929391929390505050610132565b6040518080602001828103825283818151815260200191508051906020019080838360005b838110156100f75780820151818401526020810190506100dc565b50505050905090810190601f1680156101245780820380516001836020036101000a031916815260200191505b509250505060405180910390f35b60607f75fd880d39c1daf53b6547ab6cb59451fc6452d27caa90e5b6649dd8293b9eed33348585604051808573ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff168152602001848152602001806020018281038252848482818152602001925080828437600081840152601f19601f8201169050808301925050509550505050505060405180910390a17f46923992397eac56cf13058aced2a1871933622717e27b24eabc13bf9dd329c833604051808273ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16815260200191505060405180910390a16040805190810160405280600b81526020017f68656c6c6f20776f726c6400000000000000000000000000000000000000000081525090509291505056fea165627a7a72305820ff0c57dad254cfeda48c9cfb47f1353a558bccb4d1bc31da1dae69315772d29e0029` @@ -857,6 +857,9 @@ func TestSimulatedBackend_TransactionReceipt(t *testing.T) { } sim.Commit() + if sim.m.HistoryV3 { + return + } receipt, err := sim.TransactionReceipt(bgCtx, signedTx.Hash()) if err != nil { t.Errorf("could not get transaction receipt: %v", err) @@ -954,7 +957,8 @@ func TestSimulatedBackend_CodeAt(t *testing.T) { } // When receive("X") is called with sender 0x00... and value 1, it produces this tx receipt: -// receipt{status=1 cgas=23949 bloom=00000000004000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000800000000000000000000000000000000000040200000000000000000000000000000000001000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000080000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000 logs=[log: b6818c8064f645cd82d99b59a1a267d6d61117ef [75fd880d39c1daf53b6547ab6cb59451fc6452d27caa90e5b6649dd8293b9eed] 000000000000000000000000376c47978271565f56deb45495afa69e59c16ab200000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000060000000000000000000000000000000000000000000000000000000000000000158 9ae378b6d4409eada347a5dc0c180f186cb62dc68fcc0f043425eb917335aa28 0 95d429d309bb9d753954195fe2d69bd140b4ae731b9b5b605c34323de162cf00 0]} +// +// receipt{status=1 cgas=23949 bloom=00000000004000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000800000000000000000000000000000000000040200000000000000000000000000000000001000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000080000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000 logs=[log: b6818c8064f645cd82d99b59a1a267d6d61117ef [75fd880d39c1daf53b6547ab6cb59451fc6452d27caa90e5b6649dd8293b9eed] 000000000000000000000000376c47978271565f56deb45495afa69e59c16ab200000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000060000000000000000000000000000000000000000000000000000000000000000158 9ae378b6d4409eada347a5dc0c180f186cb62dc68fcc0f043425eb917335aa28 0 95d429d309bb9d753954195fe2d69bd140b4ae731b9b5b605c34323de162cf00 0]} func TestSimulatedBackend_PendingAndCallContract(t *testing.T) { testAddr := crypto.PubkeyToAddress(testKey.PublicKey) sim := simTestBackend(t, testAddr) diff --git a/accounts/abi/bind/util_test.go b/accounts/abi/bind/util_test.go index 7c9fc83075c..bfc8ae14d7d 100644 --- a/accounts/abi/bind/util_test.go +++ b/accounts/abi/bind/util_test.go @@ -70,6 +70,9 @@ func TestWaitDeployed(t *testing.T) { }, 10000000, ) + if backend.HistoryV3() { + t.Skip("HistoryV3 doesn't store receipts") + } // Create the transaction. // Create the transaction. diff --git a/accounts/abi/pack_test.go b/accounts/abi/pack_test.go index 9ac81e57007..b1f5c404074 100644 --- a/accounts/abi/pack_test.go +++ b/accounts/abi/pack_test.go @@ -13,6 +13,7 @@ // // You should have received a copy of the GNU Lesser General Public License // along with the go-ethereum library. If not, see . +// //nolint:scopelint package abi diff --git a/accounts/abi/reflect.go b/accounts/abi/reflect.go index be417a8f2f1..bbec540b327 100644 --- a/accounts/abi/reflect.go +++ b/accounts/abi/reflect.go @@ -28,11 +28,13 @@ import ( // given type // e.g. turn // var fields []reflect.StructField -// fields = append(fields, reflect.StructField{ -// Name: "X", -// Type: reflect.TypeOf(new(big.Int)), -// Tag: reflect.StructTag("json:\"" + "x" + "\""), -// } +// +// fields = append(fields, reflect.StructField{ +// Name: "X", +// Type: reflect.TypeOf(new(big.Int)), +// Tag: reflect.StructTag("json:\"" + "x" + "\""), +// } +// // into // type TupleT struct { X *big.Int } func ConvertType(in interface{}, proto interface{}) interface{} { @@ -178,10 +180,14 @@ func setStruct(dst, src reflect.Value) error { // mapArgNamesToStructFields maps a slice of argument names to struct fields. // first round: for each Exportable field that contains a `abi:""` tag -// and this field name exists in the given argument name list, pair them together. +// +// and this field name exists in the given argument name list, pair them together. +// // second round: for each argument name that has not been already linked, -// find what variable is expected to be mapped into, if it exists and has not been -// used, pair them. +// +// find what variable is expected to be mapped into, if it exists and has not been +// used, pair them. +// // Note this function assumes the given value is a struct value. func mapArgNamesToStructFields(argNames []string, value reflect.Value) (map[string]string, error) { typ := value.Type() diff --git a/accounts/abi/unpack_test.go b/accounts/abi/unpack_test.go index 2d169d65f96..22858cddbf9 100644 --- a/accounts/abi/unpack_test.go +++ b/accounts/abi/unpack_test.go @@ -13,6 +13,7 @@ // // You should have received a copy of the GNU Lesser General Public License // along with the go-ethereum library. If not, see . +// //nolint:scopelint package abi diff --git a/cmd/abigen/main.go b/cmd/abigen/main.go index 2eeac93eb06..1f749c98bd6 100644 --- a/cmd/abigen/main.go +++ b/cmd/abigen/main.go @@ -146,19 +146,19 @@ func abigen(c *cli.Context) error { if c.GlobalString(abiFlag.Name) != "" { // Load up the ABI, optional bytecode and type name from the parameters var ( - abi []byte - err error + abiBytes []byte + err error ) input := c.GlobalString(abiFlag.Name) if input == "-" { - abi, err = io.ReadAll(os.Stdin) + abiBytes, err = io.ReadAll(os.Stdin) } else { - abi, err = os.ReadFile(input) + abiBytes, err = os.ReadFile(input) } if err != nil { utils.Fatalf("Failed to read input ABI: %v", err) } - abis = append(abis, string(abi)) + abis = append(abis, string(abiBytes)) var bin []byte if binFile := c.GlobalString(binFlag.Name); binFile != "" { diff --git a/cmd/bootnode/main.go b/cmd/bootnode/main.go index 7a36c249cc8..852ad295f72 100644 --- a/cmd/bootnode/main.go +++ b/cmd/bootnode/main.go @@ -21,10 +21,11 @@ import ( "crypto/ecdsa" "flag" "fmt" - "github.com/ledgerwatch/erigon-lib/common" "net" "os" + "github.com/ledgerwatch/erigon-lib/common" + "github.com/ledgerwatch/erigon/cmd/utils" "github.com/ledgerwatch/erigon/crypto" "github.com/ledgerwatch/erigon/p2p/discover" diff --git a/cmd/cons/commands/clique.go b/cmd/cons/commands/clique.go deleted file mode 100644 index 7db1fed7f28..00000000000 --- a/cmd/cons/commands/clique.go +++ /dev/null @@ -1,336 +0,0 @@ -package commands - -import ( - "context" - "embed" - "fmt" - "math/big" - "net" - "os" - "path/filepath" - "runtime" - "strings" - "syscall" - "time" - - grpc_middleware "github.com/grpc-ecosystem/go-grpc-middleware" - grpc_recovery "github.com/grpc-ecosystem/go-grpc-middleware/recovery" - "github.com/holiman/uint256" - common2 "github.com/ledgerwatch/erigon-lib/common" - "github.com/ledgerwatch/erigon-lib/gointerfaces" - proto_cons "github.com/ledgerwatch/erigon-lib/gointerfaces/consensus" - "github.com/ledgerwatch/erigon-lib/kv" - "github.com/ledgerwatch/erigon/common" - "github.com/ledgerwatch/erigon/consensus/clique" - "github.com/ledgerwatch/erigon/core" - "github.com/ledgerwatch/erigon/core/types" - "github.com/ledgerwatch/erigon/params" - "github.com/ledgerwatch/log/v3" - "github.com/pelletier/go-toml" - "github.com/spf13/cobra" - "google.golang.org/grpc" - "google.golang.org/grpc/keepalive" - "google.golang.org/protobuf/types/known/emptypb" -) - -//go:embed configs -var configs embed.FS - -func init() { - withApiAddr(cliqueCmd) - withDataDir(cliqueCmd) - withConfig(cliqueCmd) - rootCmd.AddCommand(cliqueCmd) -} - -var cliqueCmd = &cobra.Command{ - Use: "clique", - Short: "Run clique consensus engine", - RunE: func(cmd *cobra.Command, args []string) error { - ctx, _ := common2.RootContext() - logger := log.New() - return cliqueEngine(ctx, logger) - }, -} - -func cliqueEngine(ctx context.Context, logger log.Logger) error { - var server *CliqueServerImpl - var err error - if config == "test" { - // Configuration will be received from the test driver - server, err = grpcCliqueServer(ctx, true /* testServer */) - if err != nil { - return err - } - } else { - var configuration []byte - if strings.HasPrefix(config, "embed:") { - filename := config[len("embed:"):] - if configuration, err = configs.ReadFile(filepath.Join("configs", filename)); err != nil { - return fmt.Errorf("reading embedded configuration for %s: %w", filename, err) - } - } else if strings.HasPrefix(config, "file:") { - filename := config[len("file:"):] - if configuration, err = os.ReadFile(filename); err != nil { - return fmt.Errorf("reading configuration from file %s: %w", filename, err) - } - } else { - return fmt.Errorf("unrecognized config option: [%s], `file:` to specify config file in file system, `embed:` to use embedded file, `test` to register test interface and receive config from test driver", config) - } - if server, err = grpcCliqueServer(ctx, false /* testServer */); err != nil { - return err - } - if err = server.initAndConfig(configuration); err != nil { - return err - } - } - server.db = openDB(filepath.Join(datadirCli, "clique", "db"), logger) - server.c = clique.New(server.chainConfig, params.CliqueSnapshot, server.db) - <-ctx.Done() - return nil -} - -func grpcCliqueServer(ctx context.Context, testServer bool) (*CliqueServerImpl, error) { - // STARTING GRPC SERVER - log.Info("Starting Clique server", "on", consensusAddr) - listenConfig := net.ListenConfig{ - Control: func(network, address string, _ syscall.RawConn) error { - log.Info("Clique received connection", "via", network, "from", address) - return nil - }, - } - lis, err := listenConfig.Listen(ctx, "tcp", consensusAddr) - if err != nil { - return nil, fmt.Errorf("could not create Clique listener: %w, addr=%s", err, consensusAddr) - } - var ( - streamInterceptors []grpc.StreamServerInterceptor - unaryInterceptors []grpc.UnaryServerInterceptor - ) - //if metrics.Enabled { - //streamInterceptors = append(streamInterceptors, grpc_prometheus.StreamServerInterceptor) - //unaryInterceptors = append(unaryInterceptors, grpc_prometheus.UnaryServerInterceptor) - //} - streamInterceptors = append(streamInterceptors, grpc_recovery.StreamServerInterceptor()) - unaryInterceptors = append(unaryInterceptors, grpc_recovery.UnaryServerInterceptor()) - var grpcServer *grpc.Server - cpus := uint32(runtime.GOMAXPROCS(-1)) - opts := []grpc.ServerOption{ - grpc.NumStreamWorkers(cpus), // reduce amount of goroutines - grpc.WriteBufferSize(1024), // reduce buffers to save mem - grpc.ReadBufferSize(1024), - grpc.MaxConcurrentStreams(100), // to force clients reduce concurrency level - grpc.KeepaliveParams(keepalive.ServerParameters{ - Time: 10 * time.Minute, - }), - // Don't drop the connection, settings accordign to this comment on GitHub - // https://github.com/grpc/grpc-go/issues/3171#issuecomment-552796779 - grpc.KeepaliveEnforcementPolicy(keepalive.EnforcementPolicy{ - MinTime: 10 * time.Second, - PermitWithoutStream: true, - }), - grpc.StreamInterceptor(grpc_middleware.ChainStreamServer(streamInterceptors...)), - grpc.UnaryInterceptor(grpc_middleware.ChainUnaryServer(unaryInterceptors...)), - } - grpcServer = grpc.NewServer(opts...) - - cliqueServer := NewCliqueServer(ctx) - proto_cons.RegisterConsensusEngineServer(grpcServer, cliqueServer) - if testServer { - proto_cons.RegisterTestServer(grpcServer, cliqueServer) - } - //if metrics.Enabled { - // grpc_prometheus.Register(grpcServer) - //} - go func() { - if err1 := grpcServer.Serve(lis); err1 != nil { - log.Error("Clique server fail", "err", err1) - } - }() - return cliqueServer, nil -} - -type CliqueServerImpl struct { - proto_cons.UnimplementedConsensusEngineServer - proto_cons.UnimplementedTestServer - genesis *core.Genesis - chainConfig *params.ChainConfig - c *clique.Clique - db kv.RwDB -} - -func NewCliqueServer(_ context.Context) *CliqueServerImpl { - return &CliqueServerImpl{} -} - -// initAndConfig resets the Clique Engine and configures it according to given configuration -func (cs *CliqueServerImpl) initAndConfig(configuration []byte) error { - tree, err := toml.LoadBytes(configuration) - if err != nil { - return err - } - var ( - epoch int64 - period int64 - vanityStr string - signersStr []string - gaslimit int64 - timestamp int64 - balances *toml.Tree - chainIdStr string - forksTree *toml.Tree - difficultyStr string - ok bool - ) - if epoch, ok = tree.Get("engine.params.epoch").(int64); !ok { - return fmt.Errorf("engine.params.epoch absent or of wrong type") - } - if period, ok = tree.Get("engine.params.period").(int64); !ok { - return fmt.Errorf("engine.params.period absent or of wrong type") - } - if vanityStr, ok = tree.Get("genesis.vanity").(string); !ok { - return fmt.Errorf("genesis.vanity absent or of wrong type") - } - if signersStr, ok = tree.GetArray("genesis.signers").([]string); !ok { - return fmt.Errorf("signers absent or of wrong type") - } - if gaslimit, ok = tree.Get("genesis.gas_limit").(int64); !ok { - return fmt.Errorf("genesis.gaslimit absent or of wrong type") - } - if timestamp, ok = tree.Get("genesis.timestamp").(int64); !ok { - return fmt.Errorf("genesis.timestamp absent or of wrong type") - } - if difficultyStr, ok = tree.Get("genesis.difficulty").(string); !ok { - return fmt.Errorf("genesis.difficulty absent or of wrong type") - } - if balances, ok = tree.Get("genesis.balances").(*toml.Tree); !ok { - return fmt.Errorf("genesis.balances absent or of wrong type") - } - // construct chain config - var chainConfig params.ChainConfig - if chainIdStr, ok = tree.Get("genesis.chain_id").(string); !ok { - return fmt.Errorf("genesis.chain_id absent or of wrong type") - } - var chainId big.Int - chainId.SetBytes(common.Hex2Bytes(chainIdStr)) - chainConfig.ChainID = &chainId - if forksTree, ok = tree.Get("forks").(*toml.Tree); !ok { - return fmt.Errorf("forks absent or of wrong type") - } - for forkName, forkNumber := range forksTree.ToMap() { - var number int64 - if number, ok = forkNumber.(int64); !ok { - return fmt.Errorf("forks.%s is of a wrong type: %T", forkName, forkNumber) - } - bigNumber := big.NewInt(number) - switch forkName { - case "homestead": - chainConfig.HomesteadBlock = bigNumber - case "tangerine": - chainConfig.TangerineWhistleBlock = bigNumber - case "spurious": - chainConfig.SpuriousDragonBlock = bigNumber - case "byzantium": - chainConfig.ByzantiumBlock = bigNumber - case "constantinople": - chainConfig.ConstantinopleBlock = bigNumber - case "petersburg": - chainConfig.PetersburgBlock = bigNumber - case "istanbul": - chainConfig.IstanbulBlock = bigNumber - case "berlin": - chainConfig.BerlinBlock = bigNumber - case "london": - chainConfig.LondonBlock = bigNumber - default: - return fmt.Errorf("unknown fork name [%s]", forkName) - } - } - chainConfig.Clique = ¶ms.CliqueConfig{ - Epoch: uint64(epoch), - Period: uint64(period), - } - // construct genesis - var genesis core.Genesis - genesis.Config = &chainConfig - genesis.Timestamp = uint64(timestamp) - genesis.ExtraData = common.FromHex(vanityStr) - for _, signer := range signersStr { - genesis.ExtraData = append(genesis.ExtraData, common.HexToAddress(signer).Bytes()...) - } - genesis.ExtraData = append(genesis.ExtraData, make([]byte, clique.ExtraSeal)...) - genesis.GasLimit = uint64(gaslimit) - genesis.Difficulty = new(big.Int).SetBytes(common.FromHex(difficultyStr)) - genesis.Alloc = make(core.GenesisAlloc) - for account, balance := range balances.ToMap() { - genesis.Alloc[common.HexToAddress(account)] = core.GenesisAccount{ - Balance: new(big.Int).SetBytes(common.FromHex(balance.(string))), - } - } - var genesisBlock *types.Block - if genesisBlock, _, err = genesis.ToBlock(); err != nil { - return fmt.Errorf("creating genesis block: %w", err) - } - log.Info("Created genesis block", "hash", genesisBlock.Hash()) - return nil -} - -// StartTestCase implements Test interface from consensus.proto -// When called, it signals to the consensus engine to reset its state and re-initialise using configuration -// received from the test driver -func (cs *CliqueServerImpl) StartTestCase(_ context.Context, testCase *proto_cons.StartTestCaseMessage) (*emptypb.Empty, error) { - if testCase.Mechanism != "clique" { - return &emptypb.Empty{}, fmt.Errorf("expected mechanism [clique], got [%s]", testCase.Mechanism) - } - if err := cs.initAndConfig(testCase.Config); err != nil { - return &emptypb.Empty{}, err - } - return &emptypb.Empty{}, nil -} - -func (cs *CliqueServerImpl) ChainSpec(context.Context, *emptypb.Empty) (*proto_cons.ChainSpecMessage, error) { - var cliqueConfig []byte - var forks []*proto_cons.Fork - var chainId uint256.Int - if cs.chainConfig.ChainID != nil { - chainId.SetFromBig(cs.chainConfig.ChainID) - } - var genesisDiff uint256.Int - if cs.genesis.Difficulty != nil { - genesisDiff.SetFromBig(cs.genesis.Difficulty) - } - var extraWithoutSigners []byte - extraWithoutSigners = append(extraWithoutSigners, cs.genesis.ExtraData[:clique.ExtraVanity]...) - extraWithoutSigners = append(extraWithoutSigners, cs.genesis.ExtraData[len(cs.genesis.ExtraData)-clique.ExtraSeal:]...) - tomlTree, err := toml.TreeFromMap(make(map[string]interface{})) - if err != nil { - return nil, err - } - genesisSigners := make([]string, (len(cs.genesis.ExtraData)-clique.ExtraVanity-clique.ExtraSeal)/common.AddressLength) - for i := 0; i < len(genesisSigners); i++ { - genesisSigners[i] = fmt.Sprintf("%x", cs.genesis.ExtraData[clique.ExtraVanity+i*common.AddressLength:]) - } - tomlTree.Set("genesis.signers", genesisSigners) - cliqueConfig, err = tomlTree.Marshal() - if err != nil { - return nil, err - } - return &proto_cons.ChainSpecMessage{ - Mechanism: "clique", - MechanismConfig: cliqueConfig, - Genesis: &proto_cons.Genesis{ - ChainId: gointerfaces.ConvertUint256IntToH256(&chainId), - Template: &proto_cons.Template{ - ParentHash: gointerfaces.ConvertHashToH256(cs.genesis.ParentHash), - Coinbase: gointerfaces.ConvertAddressToH160(cs.genesis.Coinbase), - Difficulty: gointerfaces.ConvertUint256IntToH256(&genesisDiff), - Number: cs.genesis.Number, - GasLimit: cs.genesis.GasLimit, - Time: cs.genesis.Timestamp, - Extra: extraWithoutSigners, // Initial signers are passed in the clique-specific configuration - Nonce: cs.genesis.Nonce, - }, - }, - Forks: forks, - }, nil -} diff --git a/cmd/cons/commands/configs/goerli.toml b/cmd/cons/commands/configs/goerli.toml deleted file mode 100644 index e69de29bb2d..00000000000 diff --git a/cmd/cons/commands/configs/rinkeby.toml b/cmd/cons/commands/configs/rinkeby.toml deleted file mode 100644 index 90abbe005f2..00000000000 --- a/cmd/cons/commands/configs/rinkeby.toml +++ /dev/null @@ -1,353 +0,0 @@ -name = "Rinkeby" -data_dir = "rinkeby" - -bootnodes = [ - "enode://a24ac7c5484ef4ed0c5eb2d36620ba4e4aa13b8c84684e1b4aab0cebea2ae45cb4d375b77eab56516d34bfbd3c1a833fc51296ff084b770b94fb9028c4d25ccf@52.169.42.101:30303", - "enode://343149e4feefa15d882d9fe4ac7d88f885bd05ebb735e547f12e12080a9fa07c8014ca6fd7f373123488102fe5e34111f8509cf0b7de3f5b44339c9f25e87cb8@52.3.158.184:30303", - "enode://b6b28890b006743680c52e64e0d16db57f28124885595fa03a562be1d2bf0f3a1da297d56b13da25fb992888fd556d4c1a27b1f39d531bde7de1921c90061cc6@159.89.28.211:30303", -] - -[engine] -name = "clique" -[engine.params] -period = 15 -epoch = 30000 - -[genesis] -gas_limit = 4700000 -timestamp = 1492009146 -difficulty = "0x1" -chain_id = "0x4" -vanity = "0x52657370656374206d7920617574686f7269746168207e452e436172746d616e" -signers = [ - "0x42eb768f2244c8811c63729a21a3569731535f06", - "0x7ffc57839b00206d1ad20c69a1981b489f772031", - "0xb279182d99e65703f0076e4812653aab85fca0f0", -] - -[genesis.balances] -"0x0000000000000000000000000000000000000000" = "0x1" -"0x0000000000000000000000000000000000000001" = "0x1" -"0x0000000000000000000000000000000000000002" = "0x1" -"0x0000000000000000000000000000000000000003" = "0x1" -"0x0000000000000000000000000000000000000004" = "0x1" -"0x0000000000000000000000000000000000000005" = "0x1" -"0x0000000000000000000000000000000000000006" = "0x1" -"0x0000000000000000000000000000000000000007" = "0x1" -"0x0000000000000000000000000000000000000008" = "0x1" -"0x0000000000000000000000000000000000000009" = "0x1" -"0x000000000000000000000000000000000000000a" = "0x1" -"0x000000000000000000000000000000000000000b" = "0x1" -"0x000000000000000000000000000000000000000c" = "0x1" -"0x000000000000000000000000000000000000000d" = "0x1" -"0x000000000000000000000000000000000000000e" = "0x1" -"0x000000000000000000000000000000000000000f" = "0x1" -"0x0000000000000000000000000000000000000010" = "0x1" -"0x0000000000000000000000000000000000000011" = "0x1" -"0x0000000000000000000000000000000000000012" = "0x1" -"0x0000000000000000000000000000000000000013" = "0x1" -"0x0000000000000000000000000000000000000014" = "0x1" -"0x0000000000000000000000000000000000000015" = "0x1" -"0x0000000000000000000000000000000000000016" = "0x1" -"0x0000000000000000000000000000000000000017" = "0x1" -"0x0000000000000000000000000000000000000018" = "0x1" -"0x0000000000000000000000000000000000000019" = "0x1" -"0x000000000000000000000000000000000000001a" = "0x1" -"0x000000000000000000000000000000000000001b" = "0x1" -"0x000000000000000000000000000000000000001c" = "0x1" -"0x000000000000000000000000000000000000001d" = "0x1" -"0x000000000000000000000000000000000000001e" = "0x1" -"0x000000000000000000000000000000000000001f" = "0x1" -"0x0000000000000000000000000000000000000020" = "0x1" -"0x0000000000000000000000000000000000000021" = "0x1" -"0x0000000000000000000000000000000000000022" = "0x1" -"0x0000000000000000000000000000000000000023" = "0x1" -"0x0000000000000000000000000000000000000024" = "0x1" -"0x0000000000000000000000000000000000000025" = "0x1" -"0x0000000000000000000000000000000000000026" = "0x1" -"0x0000000000000000000000000000000000000027" = "0x1" -"0x0000000000000000000000000000000000000028" = "0x1" -"0x0000000000000000000000000000000000000029" = "0x1" -"0x000000000000000000000000000000000000002a" = "0x1" -"0x000000000000000000000000000000000000002b" = "0x1" -"0x000000000000000000000000000000000000002c" = "0x1" -"0x000000000000000000000000000000000000002d" = "0x1" -"0x000000000000000000000000000000000000002e" = "0x1" -"0x000000000000000000000000000000000000002f" = "0x1" -"0x0000000000000000000000000000000000000030" = "0x1" -"0x0000000000000000000000000000000000000031" = "0x1" -"0x0000000000000000000000000000000000000032" = "0x1" -"0x0000000000000000000000000000000000000033" = "0x1" -"0x0000000000000000000000000000000000000034" = "0x1" -"0x0000000000000000000000000000000000000035" = "0x1" -"0x0000000000000000000000000000000000000036" = "0x1" -"0x0000000000000000000000000000000000000037" = "0x1" -"0x0000000000000000000000000000000000000038" = "0x1" -"0x0000000000000000000000000000000000000039" = "0x1" -"0x000000000000000000000000000000000000003a" = "0x1" -"0x000000000000000000000000000000000000003b" = "0x1" -"0x000000000000000000000000000000000000003c" = "0x1" -"0x000000000000000000000000000000000000003d" = "0x1" -"0x000000000000000000000000000000000000003e" = "0x1" -"0x000000000000000000000000000000000000003f" = "0x1" -"0x0000000000000000000000000000000000000040" = "0x1" -"0x0000000000000000000000000000000000000041" = "0x1" -"0x0000000000000000000000000000000000000042" = "0x1" -"0x0000000000000000000000000000000000000043" = "0x1" -"0x0000000000000000000000000000000000000044" = "0x1" -"0x0000000000000000000000000000000000000045" = "0x1" -"0x0000000000000000000000000000000000000046" = "0x1" -"0x0000000000000000000000000000000000000047" = "0x1" -"0x0000000000000000000000000000000000000048" = "0x1" -"0x0000000000000000000000000000000000000049" = "0x1" -"0x000000000000000000000000000000000000004a" = "0x1" -"0x000000000000000000000000000000000000004b" = "0x1" -"0x000000000000000000000000000000000000004c" = "0x1" -"0x000000000000000000000000000000000000004d" = "0x1" -"0x000000000000000000000000000000000000004e" = "0x1" -"0x000000000000000000000000000000000000004f" = "0x1" -"0x0000000000000000000000000000000000000050" = "0x1" -"0x0000000000000000000000000000000000000051" = "0x1" -"0x0000000000000000000000000000000000000052" = "0x1" -"0x0000000000000000000000000000000000000053" = "0x1" -"0x0000000000000000000000000000000000000054" = "0x1" -"0x0000000000000000000000000000000000000055" = "0x1" -"0x0000000000000000000000000000000000000056" = "0x1" -"0x0000000000000000000000000000000000000057" = "0x1" -"0x0000000000000000000000000000000000000058" = "0x1" -"0x0000000000000000000000000000000000000059" = "0x1" -"0x000000000000000000000000000000000000005a" = "0x1" -"0x000000000000000000000000000000000000005b" = "0x1" -"0x000000000000000000000000000000000000005c" = "0x1" -"0x000000000000000000000000000000000000005d" = "0x1" -"0x000000000000000000000000000000000000005e" = "0x1" -"0x000000000000000000000000000000000000005f" = "0x1" -"0x0000000000000000000000000000000000000060" = "0x1" -"0x0000000000000000000000000000000000000061" = "0x1" -"0x0000000000000000000000000000000000000062" = "0x1" -"0x0000000000000000000000000000000000000063" = "0x1" -"0x0000000000000000000000000000000000000064" = "0x1" -"0x0000000000000000000000000000000000000065" = "0x1" -"0x0000000000000000000000000000000000000066" = "0x1" -"0x0000000000000000000000000000000000000067" = "0x1" -"0x0000000000000000000000000000000000000068" = "0x1" -"0x0000000000000000000000000000000000000069" = "0x1" -"0x000000000000000000000000000000000000006a" = "0x1" -"0x000000000000000000000000000000000000006b" = "0x1" -"0x000000000000000000000000000000000000006c" = "0x1" -"0x000000000000000000000000000000000000006d" = "0x1" -"0x000000000000000000000000000000000000006e" = "0x1" -"0x000000000000000000000000000000000000006f" = "0x1" -"0x0000000000000000000000000000000000000070" = "0x1" -"0x0000000000000000000000000000000000000071" = "0x1" -"0x0000000000000000000000000000000000000072" = "0x1" -"0x0000000000000000000000000000000000000073" = "0x1" -"0x0000000000000000000000000000000000000074" = "0x1" -"0x0000000000000000000000000000000000000075" = "0x1" -"0x0000000000000000000000000000000000000076" = "0x1" -"0x0000000000000000000000000000000000000077" = "0x1" -"0x0000000000000000000000000000000000000078" = "0x1" -"0x0000000000000000000000000000000000000079" = "0x1" -"0x000000000000000000000000000000000000007a" = "0x1" -"0x000000000000000000000000000000000000007b" = "0x1" -"0x000000000000000000000000000000000000007c" = "0x1" -"0x000000000000000000000000000000000000007d" = "0x1" -"0x000000000000000000000000000000000000007e" = "0x1" -"0x000000000000000000000000000000000000007f" = "0x1" -"0x0000000000000000000000000000000000000080" = "0x1" -"0x0000000000000000000000000000000000000081" = "0x1" -"0x0000000000000000000000000000000000000082" = "0x1" -"0x0000000000000000000000000000000000000083" = "0x1" -"0x0000000000000000000000000000000000000084" = "0x1" -"0x0000000000000000000000000000000000000085" = "0x1" -"0x0000000000000000000000000000000000000086" = "0x1" -"0x0000000000000000000000000000000000000087" = "0x1" -"0x0000000000000000000000000000000000000088" = "0x1" -"0x0000000000000000000000000000000000000089" = "0x1" -"0x000000000000000000000000000000000000008a" = "0x1" -"0x000000000000000000000000000000000000008b" = "0x1" -"0x000000000000000000000000000000000000008c" = "0x1" -"0x000000000000000000000000000000000000008d" = "0x1" -"0x000000000000000000000000000000000000008e" = "0x1" -"0x000000000000000000000000000000000000008f" = "0x1" -"0x0000000000000000000000000000000000000090" = "0x1" -"0x0000000000000000000000000000000000000091" = "0x1" -"0x0000000000000000000000000000000000000092" = "0x1" -"0x0000000000000000000000000000000000000093" = "0x1" -"0x0000000000000000000000000000000000000094" = "0x1" -"0x0000000000000000000000000000000000000095" = "0x1" -"0x0000000000000000000000000000000000000096" = "0x1" -"0x0000000000000000000000000000000000000097" = "0x1" -"0x0000000000000000000000000000000000000098" = "0x1" -"0x0000000000000000000000000000000000000099" = "0x1" -"0x000000000000000000000000000000000000009a" = "0x1" -"0x000000000000000000000000000000000000009b" = "0x1" -"0x000000000000000000000000000000000000009c" = "0x1" -"0x000000000000000000000000000000000000009d" = "0x1" -"0x000000000000000000000000000000000000009e" = "0x1" -"0x000000000000000000000000000000000000009f" = "0x1" -"0x00000000000000000000000000000000000000a0" = "0x1" -"0x00000000000000000000000000000000000000a1" = "0x1" -"0x00000000000000000000000000000000000000a2" = "0x1" -"0x00000000000000000000000000000000000000a3" = "0x1" -"0x00000000000000000000000000000000000000a4" = "0x1" -"0x00000000000000000000000000000000000000a5" = "0x1" -"0x00000000000000000000000000000000000000a6" = "0x1" -"0x00000000000000000000000000000000000000a7" = "0x1" -"0x00000000000000000000000000000000000000a8" = "0x1" -"0x00000000000000000000000000000000000000a9" = "0x1" -"0x00000000000000000000000000000000000000aa" = "0x1" -"0x00000000000000000000000000000000000000ab" = "0x1" -"0x00000000000000000000000000000000000000ac" = "0x1" -"0x00000000000000000000000000000000000000ad" = "0x1" -"0x00000000000000000000000000000000000000ae" = "0x1" -"0x00000000000000000000000000000000000000af" = "0x1" -"0x00000000000000000000000000000000000000b0" = "0x1" -"0x00000000000000000000000000000000000000b1" = "0x1" -"0x00000000000000000000000000000000000000b2" = "0x1" -"0x00000000000000000000000000000000000000b3" = "0x1" -"0x00000000000000000000000000000000000000b4" = "0x1" -"0x00000000000000000000000000000000000000b5" = "0x1" -"0x00000000000000000000000000000000000000b6" = "0x1" -"0x00000000000000000000000000000000000000b7" = "0x1" -"0x00000000000000000000000000000000000000b8" = "0x1" -"0x00000000000000000000000000000000000000b9" = "0x1" -"0x00000000000000000000000000000000000000ba" = "0x1" -"0x00000000000000000000000000000000000000bb" = "0x1" -"0x00000000000000000000000000000000000000bc" = "0x1" -"0x00000000000000000000000000000000000000bd" = "0x1" -"0x00000000000000000000000000000000000000be" = "0x1" -"0x00000000000000000000000000000000000000bf" = "0x1" -"0x00000000000000000000000000000000000000c0" = "0x1" -"0x00000000000000000000000000000000000000c1" = "0x1" -"0x00000000000000000000000000000000000000c2" = "0x1" -"0x00000000000000000000000000000000000000c3" = "0x1" -"0x00000000000000000000000000000000000000c4" = "0x1" -"0x00000000000000000000000000000000000000c5" = "0x1" -"0x00000000000000000000000000000000000000c6" = "0x1" -"0x00000000000000000000000000000000000000c7" = "0x1" -"0x00000000000000000000000000000000000000c8" = "0x1" -"0x00000000000000000000000000000000000000c9" = "0x1" -"0x00000000000000000000000000000000000000ca" = "0x1" -"0x00000000000000000000000000000000000000cb" = "0x1" -"0x00000000000000000000000000000000000000cc" = "0x1" -"0x00000000000000000000000000000000000000cd" = "0x1" -"0x00000000000000000000000000000000000000ce" = "0x1" -"0x00000000000000000000000000000000000000cf" = "0x1" -"0x00000000000000000000000000000000000000d0" = "0x1" -"0x00000000000000000000000000000000000000d1" = "0x1" -"0x00000000000000000000000000000000000000d2" = "0x1" -"0x00000000000000000000000000000000000000d3" = "0x1" -"0x00000000000000000000000000000000000000d4" = "0x1" -"0x00000000000000000000000000000000000000d5" = "0x1" -"0x00000000000000000000000000000000000000d6" = "0x1" -"0x00000000000000000000000000000000000000d7" = "0x1" -"0x00000000000000000000000000000000000000d8" = "0x1" -"0x00000000000000000000000000000000000000d9" = "0x1" -"0x00000000000000000000000000000000000000da" = "0x1" -"0x00000000000000000000000000000000000000db" = "0x1" -"0x00000000000000000000000000000000000000dc" = "0x1" -"0x00000000000000000000000000000000000000dd" = "0x1" -"0x00000000000000000000000000000000000000de" = "0x1" -"0x00000000000000000000000000000000000000df" = "0x1" -"0x00000000000000000000000000000000000000e0" = "0x1" -"0x00000000000000000000000000000000000000e1" = "0x1" -"0x00000000000000000000000000000000000000e2" = "0x1" -"0x00000000000000000000000000000000000000e3" = "0x1" -"0x00000000000000000000000000000000000000e4" = "0x1" -"0x00000000000000000000000000000000000000e5" = "0x1" -"0x00000000000000000000000000000000000000e6" = "0x1" -"0x00000000000000000000000000000000000000e7" = "0x1" -"0x00000000000000000000000000000000000000e8" = "0x1" -"0x00000000000000000000000000000000000000e9" = "0x1" -"0x00000000000000000000000000000000000000ea" = "0x1" -"0x00000000000000000000000000000000000000eb" = "0x1" -"0x00000000000000000000000000000000000000ec" = "0x1" -"0x00000000000000000000000000000000000000ed" = "0x1" -"0x00000000000000000000000000000000000000ee" = "0x1" -"0x00000000000000000000000000000000000000ef" = "0x1" -"0x00000000000000000000000000000000000000f0" = "0x1" -"0x00000000000000000000000000000000000000f1" = "0x1" -"0x00000000000000000000000000000000000000f2" = "0x1" -"0x00000000000000000000000000000000000000f3" = "0x1" -"0x00000000000000000000000000000000000000f4" = "0x1" -"0x00000000000000000000000000000000000000f5" = "0x1" -"0x00000000000000000000000000000000000000f6" = "0x1" -"0x00000000000000000000000000000000000000f7" = "0x1" -"0x00000000000000000000000000000000000000f8" = "0x1" -"0x00000000000000000000000000000000000000f9" = "0x1" -"0x00000000000000000000000000000000000000fa" = "0x1" -"0x00000000000000000000000000000000000000fb" = "0x1" -"0x00000000000000000000000000000000000000fc" = "0x1" -"0x00000000000000000000000000000000000000fd" = "0x1" -"0x00000000000000000000000000000000000000fe" = "0x1" -"0x00000000000000000000000000000000000000ff" = "0x1" -"0x31b98d14007bdee637298086988a0bbd31184523" = "0x200000000000000000000000000000000000000000000000000000000000000" - -[forks] -homestead = 1 -tangerine = 2 -spurious = 3 -byzantium = 1_035_301 -constantinople = 3_660_663 -petersburg = 4_321_234 -istanbul = 5_435_345 -berlin = 8_290_928 - -[params] -account_start_nonce = 0 -gas_limit_bound_divisor = 1024 -max_code_size = 24576 -maximum_extra_data_size = 65535 -min_gas_limit = 5000 -network_id = "0x4" - -[precompiles.0.0x0000000000000000000000000000000000000001] -name = "ecrecover" -pricing = { formula = "linear", params = { base = 3000.0, word = 0 } } - -[precompiles.0.0x0000000000000000000000000000000000000002] -name = "sha256" -pricing = { formula = "linear", params = { base = 60, word = 12 } } - -[precompiles.0.0x0000000000000000000000000000000000000003] -name = "ripemd160" -pricing = { formula = "linear", params = { base = 600, word = 120 } } - -[precompiles.0.0x0000000000000000000000000000000000000004] -name = "identity" -pricing = { formula = "linear", params = { base = 15, word = 3 } } - -# EIP 1108 transition at block 5_435_345 (0x52efd1) -[precompiles.1035301.0x0000000000000000000000000000000000000005] -name = "modexp" -pricing = { formula = "modexp", params = { divisor = 20 } } - -[precompiles.1035301.0x0000000000000000000000000000000000000006] -name = "alt_bn128_add" -pricing = { formula = "alt_bn128_const_operations", params = { price = 500 } } - -[precompiles.1035301.0x0000000000000000000000000000000000000007] -name = "alt_bn128_mul" -pricing = { formula = "alt_bn128_const_operations", params = { price = 40000 } } - -[precompiles.1035301.0x0000000000000000000000000000000000000008] -name = "alt_bn128_pairing" -pricing = { formula = "alt_bn128_pairing", params = { base = 100000, pair = 80000 } } - -# EIP 1108 transition at block 5_435_345 (0x52efd1) -[precompiles.5435345.0x0000000000000000000000000000000000000006] -name = "alt_bn128_add" -pricing = { formula = "alt_bn128_const_operations", params = { price = 150 } } - -[precompiles.5435345.0x0000000000000000000000000000000000000007] -name = "alt_bn128_mul" -pricing = { formula = "alt_bn128_const_operations", params = { price = 40000 } } - -[precompiles.5435345.0x0000000000000000000000000000000000000008] -name = "alt_bn128_pairing" -pricing = { formula = "alt_bn128_pairing", params = { base = 45000, pair = 34000 } } - -[precompiles.5435345.0x0000000000000000000000000000000000000009] -name = "blake2_f" -pricing = { formula = "blake2_f", params = { gas_per_round = 1 } } \ No newline at end of file diff --git a/cmd/cons/commands/root.go b/cmd/cons/commands/root.go deleted file mode 100644 index 023ff094d52..00000000000 --- a/cmd/cons/commands/root.go +++ /dev/null @@ -1,69 +0,0 @@ -package commands - -import ( - "fmt" - "os" - - "github.com/ledgerwatch/erigon-lib/common" - "github.com/ledgerwatch/erigon-lib/kv" - "github.com/ledgerwatch/erigon-lib/kv/mdbx" - "github.com/ledgerwatch/erigon/cmd/utils" - "github.com/ledgerwatch/erigon/common/paths" - "github.com/ledgerwatch/erigon/internal/debug" - "github.com/ledgerwatch/log/v3" - "github.com/spf13/cobra" -) - -var ( - consensusAddr string // Address of the consensus engine : - datadirCli string // Path to the working dir - config string // `file:`` to specify config file in file system, `embed:`` to use embedded file, `test` to register test interface and receive config from test driver -) - -func init() { - utils.CobraFlags(rootCmd, append(debug.Flags, utils.MetricFlags...)) -} - -var rootCmd = &cobra.Command{ - Use: "consensus", - Short: "consensus is Proof Of Concept for separare consensus engine", - PersistentPreRun: func(cmd *cobra.Command, args []string) { - if err := debug.SetupCobra(cmd); err != nil { - panic(err) - } - }, - PersistentPostRun: func(cmd *cobra.Command, args []string) { - debug.Exit() - }, -} - -func Execute() { - ctx, _ := common.RootContext() - if err := rootCmd.ExecuteContext(ctx); err != nil { - fmt.Println(err) - os.Exit(1) - } -} - -func must(err error) { - if err != nil { - panic(err) - } -} - -func withDataDir(cmd *cobra.Command) { - cmd.Flags().StringVar(&datadirCli, "datadir", paths.DefaultDataDir(), "directory where databases and temporary files are kept") - must(cmd.MarkFlagDirname("datadir")) -} - -func withApiAddr(cmd *cobra.Command) { - cmd.Flags().StringVar(&consensusAddr, "consensus.api.addr", "localhost:9093", "address to listen to for consensus engine api :") -} - -func withConfig(cmd *cobra.Command) { - cmd.Flags().StringVar(&config, "config", "", "`file:` to specify config file in file system, `embed:` to use embedded file, `test` to register test interface and receive config from test driver") -} - -func openDB(path string, logger log.Logger) kv.RwDB { - return mdbx.NewMDBX(logger).Path(path).MustOpen() -} diff --git a/cmd/cons/main.go b/cmd/cons/main.go deleted file mode 100644 index fba4633bbca..00000000000 --- a/cmd/cons/main.go +++ /dev/null @@ -1,9 +0,0 @@ -package main - -import ( - "github.com/ledgerwatch/erigon/cmd/cons/commands" -) - -func main() { - commands.Execute() -} diff --git a/cmd/devnet/README.md b/cmd/devnet/README.md new file mode 100644 index 00000000000..fe97104cc27 --- /dev/null +++ b/cmd/devnet/README.md @@ -0,0 +1,4 @@ +# Devnet + +This is an automated tool run on the devnet that simulates p2p connection between nodes and ultimately tests operations on them. +See [DEV_CHAIN](https://github.com/ledgerwatch/erigon/blob/devel/DEV_CHAIN.md) for a manual version. \ No newline at end of file diff --git a/cmd/devnet/devnetutils/utils.go b/cmd/devnet/devnetutils/utils.go new file mode 100644 index 00000000000..54656dd6390 --- /dev/null +++ b/cmd/devnet/devnetutils/utils.go @@ -0,0 +1,66 @@ +package devnetutils + +import ( + "fmt" + "os/exec" +) + +// ClearDevDB cleans up the dev folder used for the operations +func ClearDevDB() { + fmt.Printf("\nDeleting ./dev folders\n") + + cmd := exec.Command("rm", "-rf", "./dev0") + err := cmd.Run() + if err != nil { + fmt.Println("Error occurred clearing Dev DB") + panic("could not clear dev DB") + } + + cmd2 := exec.Command("rm", "-rf", "./dev2") + err2 := cmd2.Run() + if err2 != nil { + fmt.Println("Error occurred clearing Dev DB") + panic("could not clear dev2 DB") + } + + fmt.Printf("SUCCESS => Deleted ./dev0 and ./dev2\n") +} + +func DeleteLogs() { + fmt.Printf("\nRemoving old logs to create new ones...\nBefore re-running the devnet tool, make sure to copy out old logs if you need them!!!\n\n") + + cmd := exec.Command("rm", "-rf", "./erigon_node_1") + err := cmd.Run() + if err != nil { + fmt.Println("Error occurred removing log node_1") + panic("could not remove old logs") + } + + cmd2 := exec.Command("rm", "-rf", "./erigon_node_2") + err2 := cmd2.Run() + if err2 != nil { + fmt.Println("Error occurred removing log node_2") + panic("could not remove old logs") + } +} + +// UniqueIDFromEnode returns the unique ID from a node's enode, removing the `?discport=0` part +func UniqueIDFromEnode(enode string) (string, error) { + if len(enode) == 0 { + return "", fmt.Errorf("invalid enode string") + } + + // iterate through characters in the string until we reach '?' + // using index iteration because enode characters have single codepoints + var i int + for i < len(enode) && enode[i] != byte('?') { + i++ + } + + // if '?' is not found in the enode, return an error + if i == len(enode) { + return "", fmt.Errorf("invalid enode string") + } + + return enode[:i], nil +} diff --git a/cmd/devnet/devnetutils/utils_test.go b/cmd/devnet/devnetutils/utils_test.go new file mode 100644 index 00000000000..2c447005b70 --- /dev/null +++ b/cmd/devnet/devnetutils/utils_test.go @@ -0,0 +1,40 @@ +package devnetutils + +import "testing" + +func TestUniqueIDFromEnode(t *testing.T) { + testCases := []struct { + input string + expectedRes string + shouldError bool + }{ + { + input: "", + expectedRes: "", + shouldError: true, + }, + { + input: "enode://11c368e7a2775951d66ff155a982844ccd5219d10b53e310001e1e40c6a4e76c2f6e42f39acc1e4015cd3b7428765125214d89b07ca5fa2c19ac94746fc360b0@127.0.0.1:63380?discport=0", + expectedRes: "enode://11c368e7a2775951d66ff155a982844ccd5219d10b53e310001e1e40c6a4e76c2f6e42f39acc1e4015cd3b7428765125214d89b07ca5fa2c19ac94746fc360b0@127.0.0.1:63380", + shouldError: false, + }, + { + input: "enode://11c368e7a2775951d66ff155a982844ccd5219d10b53e310001e1e40c6a4e76c2f6e42f39acc1e4015cd3b7428765125214d89b07ca5fa2c19ac94746fc360b0@127.0.0.1:63380discport=0", + expectedRes: "", + shouldError: true, + }, + } + + for _, testCase := range testCases { + got, err := UniqueIDFromEnode(testCase.input) + if testCase.shouldError && err == nil { + t.Errorf("expected error to happen, got no error") + } + if !testCase.shouldError && err != nil { + t.Errorf("expected no error, got %s", err) + } + if got != testCase.expectedRes { + t.Errorf("expected %s, got %s", testCase.expectedRes, got) + } + } +} diff --git a/cmd/devnet/main.go b/cmd/devnet/main.go new file mode 100644 index 00000000000..5172a2cfbc4 --- /dev/null +++ b/cmd/devnet/main.go @@ -0,0 +1,24 @@ +package main + +import ( + "sync" + + "github.com/ledgerwatch/erigon/cmd/devnet/devnetutils" + "github.com/ledgerwatch/erigon/cmd/devnet/node" +) + +func main() { + // wait group variable to prevent main function from terminating until routines are finished + var wg sync.WaitGroup + + // remove the old logs from previous runs + devnetutils.DeleteLogs() + + defer devnetutils.ClearDevDB() + + // start the first erigon node in a go routine + node.Start(&wg) + + // wait for all goroutines to complete before exiting + wg.Wait() +} diff --git a/cmd/devnet/models/errors.go b/cmd/devnet/models/errors.go new file mode 100644 index 00000000000..dbff4482475 --- /dev/null +++ b/cmd/devnet/models/errors.go @@ -0,0 +1,8 @@ +package models + +import "errors" + +var ( + // ErrInvalidArgument for invalid arguments + ErrInvalidArgument = errors.New("invalid argument") +) diff --git a/cmd/devnet/models/model.go b/cmd/devnet/models/model.go new file mode 100644 index 00000000000..5438b91e449 --- /dev/null +++ b/cmd/devnet/models/model.go @@ -0,0 +1,61 @@ +package models + +import ( + "fmt" + "github.com/ledgerwatch/erigon/cmd/rpctest/rpctest" + "github.com/ledgerwatch/erigon/p2p" +) + +const ( + // BuildDirArg is the build directory for the devnet executable + BuildDirArg = "./build/bin/devnet" + // DataDirArg is the datadir flag + DataDirArg = "--datadir" + // ChainArg is the chain flag + ChainArg = "--chain" + // DevPeriodArg is the dev.period flag + DevPeriodArg = "--dev.period" + // VerbosityArg is the verbosity flag + VerbosityArg = "--verbosity" + // Mine is the mine flag + Mine = "--mine" + // NoDiscover is the nodiscover flag + NoDiscover = "--nodiscover" + // PrivateApiAddrArg is the private.api.addr flag + PrivateApiAddrArg = "--private.api.addr" + // StaticPeersArg is the staticpeers flag + StaticPeersArg = "--staticpeers" + // HttpApiArg is the http.api flag + HttpApiArg = "--http.api" + + // DataDirParam is the datadir parameter + DataDirParam = "./dev" + // ChainParam is the chain parameter + ChainParam = "dev" + // DevPeriodParam is the dev.period parameter + DevPeriodParam = "30" + // VerbosityParam is the verbosity parameter + VerbosityParam = "0" + // PrivateApiParamMine is the private.api.addr parameter for the mining node + PrivateApiParamMine = "localhost:9090" + // PrivateApiParamNoMine is the private.api.addr parameter for the non-mining node + PrivateApiParamNoMine = "localhost:9091" + + // ErigonUrl is the default url for rpc connections + ErigonUrl = "http://localhost:8545" + // ErigonLogFilePrefix is the default file prefix for logging erigon node info and errors + ErigonLogFilePrefix = "erigon_node_" +) + +type AdminNodeInfoResponse struct { + rpctest.CommonResponse + Result p2p.NodeInfo `json:"result"` +} + +// ParameterFromArgument merges the argument and parameter and returns a flag input string +func ParameterFromArgument(arg, param string) (string, error) { + if arg == "" { + return "", ErrInvalidArgument + } + return fmt.Sprintf("%s=%s", arg, param), nil +} diff --git a/cmd/devnet/models/model_test.go b/cmd/devnet/models/model_test.go new file mode 100644 index 00000000000..80e9e47b08e --- /dev/null +++ b/cmd/devnet/models/model_test.go @@ -0,0 +1,32 @@ +package models + +import ( + "fmt" + "testing" +) + +func TestParameterFromArgument(t *testing.T) { + enode := fmt.Sprintf("%q", "1234567") + testCases := []struct { + argInput string + paramInput string + expectedRes string + expectedErr error + }{ + {"--datadir", "./dev", "--datadir=./dev", nil}, + {"--chain", "dev", "--chain=dev", nil}, + {"--dev.period", "30", "--dev.period=30", nil}, + {"--staticpeers", enode, "--staticpeers=" + enode, nil}, + {"", "30", "", ErrInvalidArgument}, + } + + for _, testCase := range testCases { + got, err := ParameterFromArgument(testCase.argInput, testCase.paramInput) + if got != testCase.expectedRes { + t.Errorf("expected %s, got %s", testCase.expectedRes, got) + } + if err != testCase.expectedErr { + t.Errorf("expected error: %s, got error: %s", testCase.expectedErr, err) + } + } +} diff --git a/cmd/devnet/node/node.go b/cmd/devnet/node/node.go new file mode 100644 index 00000000000..80285474ada --- /dev/null +++ b/cmd/devnet/node/node.go @@ -0,0 +1,146 @@ +package node + +import ( + "fmt" + "os" + "sync" + "time" + + "github.com/urfave/cli" + + "github.com/ledgerwatch/erigon-lib/common/dbg" + "github.com/ledgerwatch/erigon/cmd/devnet/devnetutils" + "github.com/ledgerwatch/erigon/cmd/devnet/models" + "github.com/ledgerwatch/erigon/cmd/devnet/requests" + "github.com/ledgerwatch/erigon/params" + erigonapp "github.com/ledgerwatch/erigon/turbo/app" + erigoncli "github.com/ledgerwatch/erigon/turbo/cli" + "github.com/ledgerwatch/erigon/turbo/node" + "github.com/ledgerwatch/log/v3" +) + +// Holds the number id of each node on the network, the first node is node 0 +var nodeNumber int + +// Start starts the process for two erigon nodes running on the dev chain +func Start(wg *sync.WaitGroup) { + // add one goroutine to the wait-list + wg.Add(1) + + // start the first node + go StartNode(wg, miningNodeArgs()) + + // sleep for a while to allow first node to start + time.Sleep(time.Second * 10) + + // get the enode of the first node + enode, err := getEnode() + if err != nil { + // TODO: Log the error, it means node did not start well + fmt.Printf("error starting the node: %s\n", err) + } + + // add one goroutine to the wait-list + wg.Add(1) + + // start the second node, connect it to the mining node with the enode + go StartNode(wg, nonMiningNodeArgs(2, enode)) +} + +// StartNode starts an erigon node on the dev chain +func StartNode(wg *sync.WaitGroup, args []string) { + fmt.Printf("Arguments for node %d are: %v\n", nodeNumber, args) + + // catch any errors and avoid panics if an error occurs + defer func() { + panicResult := recover() + if panicResult == nil { + wg.Done() + return + } + + log.Error("catch panic", "err", panicResult, "stack", dbg.Stack()) + wg.Done() + os.Exit(1) + }() + + app := erigonapp.MakeApp(runNode, erigoncli.DefaultFlags) + nodeNumber++ // increment the number of nodes on the network + if err := app.Run(args); err != nil { + _, printErr := fmt.Fprintln(os.Stderr, err) + if printErr != nil { + log.Warn("Error writing app run error to stderr", "err", printErr) + } + wg.Done() + os.Exit(1) + } +} + +// runNode configures, creates and serves an erigon node +func runNode(ctx *cli.Context) { + logger := log.New() + + handler, err := log.FileHandler(models.ErigonLogFilePrefix+fmt.Sprintf("%d", nodeNumber), log.LogfmtFormat(), 1<<27) // 128Mb + if err != nil { + log.Error("Issue setting up log file handler", "err", err) + return + } + + logger.SetHandler(handler) + log.SetRootHandler(handler) + + // Initializing the node and providing the current git commit there + logger.Info("Build info", "git_branch", params.GitBranch, "git_tag", params.GitTag, "git_commit", params.GitCommit) + + nodeCfg := node.NewNodConfigUrfave(ctx) + ethCfg := node.NewEthConfigUrfave(ctx, nodeCfg) + + ethNode, err := node.New(nodeCfg, ethCfg, logger) + if err != nil { + log.Error("Devnet startup", "err", err) + return + } + + err = ethNode.Serve() + if err != nil { + log.Error("error while serving Devnet node", "err", err) + } +} + +// miningNodeArgs returns custom args for starting a mining node +func miningNodeArgs() []string { + dataDir, _ := models.ParameterFromArgument(models.DataDirArg, models.DataDirParam+fmt.Sprintf("%d", nodeNumber)) + chainType, _ := models.ParameterFromArgument(models.ChainArg, models.ChainParam) + devPeriod, _ := models.ParameterFromArgument(models.DevPeriodArg, models.DevPeriodParam) + verbosity, _ := models.ParameterFromArgument(models.VerbosityArg, models.VerbosityParam) + privateApiAddr, _ := models.ParameterFromArgument(models.PrivateApiAddrArg, models.PrivateApiParamMine) + httpApi, _ := models.ParameterFromArgument(models.HttpApiArg, "admin,eth,erigon,web3,net,debug,trace,txpool,parity") + + return []string{models.BuildDirArg, dataDir, chainType, privateApiAddr, models.Mine, httpApi, devPeriod, verbosity} +} + +// nonMiningNodeArgs returns custom args for starting a non-mining node +func nonMiningNodeArgs(nodeNumber int, enode string) []string { + dataDir, _ := models.ParameterFromArgument(models.DataDirArg, models.DataDirParam+fmt.Sprintf("%d", nodeNumber)) + chainType, _ := models.ParameterFromArgument(models.ChainArg, models.ChainParam) + verbosity, _ := models.ParameterFromArgument(models.VerbosityArg, models.VerbosityParam) + privateApiAddr, _ := models.ParameterFromArgument(models.PrivateApiAddrArg, models.PrivateApiParamNoMine) + staticPeers, _ := models.ParameterFromArgument(models.StaticPeersArg, enode) + + return []string{models.BuildDirArg, dataDir, chainType, privateApiAddr, staticPeers, models.NoDiscover, verbosity} +} + +// getEnode returns the enode of the mining node +func getEnode() (string, error) { + nodeInfo, err := requests.AdminNodeInfo(0) + if err != nil { + return "", err + } + + enode, err := devnetutils.UniqueIDFromEnode(nodeInfo.Enode) + if err != nil { + return "", err + } + + return enode, nil +} diff --git a/cmd/devnet/requests/request_generator.go b/cmd/devnet/requests/request_generator.go new file mode 100644 index 00000000000..6e2f12ec449 --- /dev/null +++ b/cmd/devnet/requests/request_generator.go @@ -0,0 +1,88 @@ +package requests + +import ( + "encoding/json" + "fmt" + "io" + "net/http" + "strings" + "time" + + "github.com/ledgerwatch/erigon/cmd/devnet/models" + "github.com/ledgerwatch/erigon/cmd/rpctest/rpctest" + "github.com/ledgerwatch/log/v3" +) + +func post(client *http.Client, url, request string, response interface{}) error { + start := time.Now() + r, err := client.Post(url, "application/json", strings.NewReader(request)) // nolint:bodyclose + if err != nil { + return fmt.Errorf("client failed to make post request: %v", err) + } + defer func(Body io.ReadCloser) { + closeErr := Body.Close() + if closeErr != nil { + log.Warn("body close", "err", closeErr) + } + }(r.Body) + + if r.StatusCode != 200 { + return fmt.Errorf("status %s", r.Status) + } + + b, err := io.ReadAll(r.Body) + if err != nil { + return fmt.Errorf("failed to readAll from body: %s", err) + } + + err = json.Unmarshal(b, &response) + if err != nil { + return fmt.Errorf("failed to unmarshal response: %s", err) + } + + log.Info("Got in", "time", time.Since(start).Seconds()) + return nil +} + +type RequestGenerator struct { + reqID int + client *http.Client +} + +func (req *RequestGenerator) call(target string, method, body string, response interface{}) rpctest.CallResult { + start := time.Now() + err := post(req.client, models.ErigonUrl, body, response) + return rpctest.CallResult{ + RequestBody: body, + Target: target, + Took: time.Since(start), + RequestID: req.reqID, + Method: method, + Err: err, + } +} + +func (req *RequestGenerator) Erigon(method, body string, response interface{}) rpctest.CallResult { + return req.call(models.ErigonUrl, method, body, response) +} + +func (req *RequestGenerator) getAdminNodeInfo() string { + const template = `{"jsonrpc":"2.0","method":"admin_nodeInfo","id":%d}` + return fmt.Sprintf(template, req.reqID) +} + +func initialiseRequestGenerator(reqId int) *RequestGenerator { + var client = &http.Client{ + Timeout: time.Second * 600, + } + + reqGen := RequestGenerator{ + client: client, + reqID: reqId, + } + if reqGen.reqID == 0 { + reqGen.reqID++ + } + + return &reqGen +} diff --git a/cmd/devnet/requests/requests.go b/cmd/devnet/requests/requests.go new file mode 100644 index 00000000000..b5097d1de21 --- /dev/null +++ b/cmd/devnet/requests/requests.go @@ -0,0 +1,18 @@ +package requests + +import ( + "fmt" + "github.com/ledgerwatch/erigon/cmd/devnet/models" + "github.com/ledgerwatch/erigon/p2p" +) + +func AdminNodeInfo(reqId int) (p2p.NodeInfo, error) { + reqGen := initialiseRequestGenerator(reqId) + var b models.AdminNodeInfoResponse + + if res := reqGen.Erigon("admin_nodeInfo", reqGen.getAdminNodeInfo(), &b); res.Err != nil { + return p2p.NodeInfo{}, fmt.Errorf("failed to get admin node info: %v", res.Err) + } + + return b.Result, nil +} diff --git a/cmd/devnettest/commands/account.go b/cmd/devnettest/commands/account.go deleted file mode 100644 index 80ee5b375de..00000000000 --- a/cmd/devnettest/commands/account.go +++ /dev/null @@ -1,69 +0,0 @@ -package commands - -import ( - "fmt" - - "github.com/ledgerwatch/erigon/cmd/devnettest/requests" - "github.com/ledgerwatch/erigon/common" - "github.com/spf13/cobra" -) - -const ( - devAddress = "0x67b1d87101671b127f5f8714789C7192f7ad340e" - blockNum = "latest" -) - -func init() { - rootCmd.AddCommand(getBalanceCmd) - rootCmd.AddCommand(getTransactionCountCmd) -} - -var getBalanceCmd = &cobra.Command{ - Use: "get-balance", - Short: fmt.Sprintf("Checks balance for the address: %q", devAddress), - Run: func(cmd *cobra.Command, args []string) { - callGetBalance(devAddress, blockNum, 0) - }, -} - -var getTransactionCountCmd = &cobra.Command{ - Use: "get-transaction-count", - Short: fmt.Sprintf("Gets nonce for the address: %q", devAddress), - Run: func(cmd *cobra.Command, args []string) { - callGetTransactionCount(devAddress, blockNum, 0) - }, -} - -func callGetBalance(addr, blockNum string, checkBal uint64) { - fmt.Printf("Getting balance for address: %q...\n", addr) - address := common.HexToAddress(addr) - bal, err := requests.GetBalance(reqId, address, blockNum) - if err != nil { - fmt.Printf("FAILURE => %v\n", err) - return - } - - if checkBal > 0 && checkBal != bal { - fmt.Printf("FAILURE => Balance should be %d, got %d\n", checkBal, bal) - return - } - - fmt.Printf("SUCCESS => Balance: %d\n", bal) -} - -func callGetTransactionCount(addr, blockNum string, checkNonce uint64) { - fmt.Printf("Getting nonce for address: %q...\n", addr) - address := common.HexToAddress(addr) - nonce, err := requests.GetTransactionCountCmd(reqId, address, blockNum) - if err != nil { - fmt.Printf("FAILURE => %v\n", err) - return - } - - if checkNonce > 0 && checkNonce != nonce { - fmt.Printf("FAILURE => Nonce should be %d, got %d\n", checkNonce, nonce) - return - } - - fmt.Printf("SUCCESS => Nonce: %d\n", nonce) -} diff --git a/cmd/devnettest/commands/all.go b/cmd/devnettest/commands/all.go deleted file mode 100644 index 4da64a1643e..00000000000 --- a/cmd/devnettest/commands/all.go +++ /dev/null @@ -1,50 +0,0 @@ -package commands - -import ( - "fmt" - "github.com/spf13/cobra" -) - -func init() { - rootCmd.AddCommand(allCmd) -} - -var allCmd = &cobra.Command{ - Use: "all", - Short: "Runs all the simulation tests for erigon devnet", - Run: func(cmd *cobra.Command, args []string) { - // Test connection to JSON RPC - fmt.Println("Mocking get requests to JSON RPC...") - callMockGetRequest() - fmt.Println() - - // First get balance of the receiver's account - callGetBalance(recvAddr, blockNum, 0) - fmt.Println() - - // Send a token from the dev address to the receiver's address - callSendRegularTxAndSearchBlock(sendValue, devAddress, recvAddr, true) - fmt.Println() - - // Check the balance to make sure the receiver received such token - callGetBalance(recvAddr, blockNum, sendValue) - fmt.Println() - - // Get the nonce of the devAddress, it should be 1 - callGetTransactionCount(devAddress, blockNum, 1) - fmt.Println() - - // Create a contract transaction signed by the dev address and emit a log for it - // callContractTx() - // time.Sleep(erigon.DevPeriod * 2 * time.Second) - // fmt.Println() - - // Get the nonce of the devAddress, check that it is 3 - // callGetTransactionCount(devAddress, blockNum, 3) - // fmt.Println() - - // Confirm that the txpool is empty (meaning all txs have been mined) - fmt.Println("Confirming the tx pool is empty...") - showTxPoolContent() - }, -} diff --git a/cmd/devnettest/commands/block.go b/cmd/devnettest/commands/block.go deleted file mode 100644 index a688ea5c170..00000000000 --- a/cmd/devnettest/commands/block.go +++ /dev/null @@ -1,97 +0,0 @@ -package commands - -import ( - "context" - "fmt" - "time" - - "github.com/ledgerwatch/erigon/cmd/devnettest/erigon" - "github.com/ledgerwatch/erigon/cmd/devnettest/requests" - "github.com/ledgerwatch/erigon/cmd/devnettest/services" - "github.com/spf13/cobra" -) - -const ( - recvAddr = "0x71562b71999873DB5b286dF957af199Ec94617F7" - sendValue uint64 = 10000 -) - -func init() { - rootCmd.AddCommand(sendTxCmd) -} - -var sendTxCmd = &cobra.Command{ - Use: "send-tx", - Short: "Sends a transaction", - Run: func(cmd *cobra.Command, args []string) { - callSendRegularTxAndSearchBlock(sendValue, recvAddr, devAddress, true) - }, -} - -func callSendRegularTxAndSearchBlock(value uint64, fromAddr, toAddr string, search bool) { - fmt.Printf("Sending %d ETH from %q to %q...\n", value, fromAddr, toAddr) - - nonce, err := services.GetNonce(reqId) - if err != nil { - fmt.Printf("failed to get latest nonce: %v\n", err) - return - } - - // subscriptionContract is the handler to the contract for further operations - signedTx, _, _, _, err := services.CreateTransaction("regular", toAddr, value, nonce) - if err != nil { - fmt.Printf("failed to create transaction: %v\n", err) - return - } - - hash, err := requests.SendTx(reqId, signedTx) - if err != nil { - fmt.Printf("failed to send transaction: %v\n", err) - return - } - - fmt.Printf("SUCCESS => Tx submitted, adding tx with hash %q to txpool\n", hash) - - if search { - if _, err := services.SearchBlockForTx(*hash); err != nil { - fmt.Printf("error searching block for tx: %v\n", err) - return - } - } - - err = services.ApplyTransaction(context.Background(), *signedTx) - if err != nil { - fmt.Printf("failed to apply transaction: %v\n", err) - return - } -} - -func callContractTx() { - nonce, err := services.GetNonce(reqId) - if err != nil { - fmt.Printf("failed to get latest nonce: %v\n", err) - return - } - - // subscriptionContract is the handler to the contract for further operations - signedTx, address, subscriptionContract, transactOpts, err := services.CreateTransaction("contract", "", 0, nonce) - if err != nil { - fmt.Printf("failed to create transaction: %v\n", err) - return - } - - fmt.Println("Creating contract tx...") - hash, err := requests.SendTx(reqId, signedTx) - if err != nil { - fmt.Printf("failed to send transaction: %v\n", err) - return - } - fmt.Printf("SUCCESS => Tx submitted, adding tx with hash %q to txpool\n", hash) - - time.Sleep(erigon.DevPeriod * time.Second) - - if err := services.EmitEventAndGetLogs(reqId, subscriptionContract, transactOpts, address); err != nil { - fmt.Printf("failed to emit events: %v\n", err) - return - } -} diff --git a/cmd/devnettest/commands/event.go b/cmd/devnettest/commands/event.go deleted file mode 100644 index bd59b052c6e..00000000000 --- a/cmd/devnettest/commands/event.go +++ /dev/null @@ -1,40 +0,0 @@ -package commands - -import ( - "fmt" - "github.com/ledgerwatch/erigon/cmd/devnettest/services" - "github.com/spf13/cobra" - "sync" -) - -var ( - //eventAddrs = []string{devAddress, recvAddr} - eventTopics []string -) - -func init() { - rootCmd.AddCommand(LogsCmd) -} - -var LogsCmd = &cobra.Command{ - Use: "logs", - Short: "Subscribes to log event sends a notification each time a new log appears", - Run: func(cmd *cobra.Command, args []string) { - callLogs() - }, -} - -var wg sync.WaitGroup - -func callLogs() { - wg.Add(1) - go func() { - if err := services.Logs([]string{}, eventTopics); err != nil { - fmt.Printf("could not subscribe to log events: %v\n", err) - } - defer wg.Done() - }() - wg.Wait() - - callContractTx() -} diff --git a/cmd/devnettest/commands/parity.go b/cmd/devnettest/commands/parity.go deleted file mode 100644 index e07d4169cba..00000000000 --- a/cmd/devnettest/commands/parity.go +++ /dev/null @@ -1,42 +0,0 @@ -package commands - -import ( - "fmt" - "github.com/ledgerwatch/erigon/cmd/devnettest/services" - "strings" - - "github.com/ledgerwatch/erigon/cmd/devnettest/requests" - "github.com/ledgerwatch/erigon/common" - "github.com/spf13/cobra" -) - -var ( - offsetAddr string - quantity int -) - -func init() { - //listStorageKeysCmd.Flags().StringVar(&services.DevAddress, "addr", "", "String address to list keys") - //listStorageKeysCmd.MarkFlagRequired("addr") - //listStorageKeysCmd.Flags().StringVar(&offsetAddr, "offset", "", "Offset storage key from which the batch should start") - //listStorageKeysCmd.Flags().IntVar(&quantity, "quantity", 10, "Integer number of addresses to display in a batch") - //listStorageKeysCmd.Flags().StringVar(&blockNum, "block", "latest", "Integer block number, or the string 'latest', 'earliest' or 'pending'; now only 'latest' is available") - - rootCmd.AddCommand(listStorageKeysCmd) -} - -var listStorageKeysCmd = &cobra.Command{ - Use: "parity-list", - Short: "Returns all storage keys of the given address", - RunE: func(cmd *cobra.Command, args []string) error { - if !common.IsHexAddress(services.DevAddress) { - return fmt.Errorf("address: %v, is not a valid hex address\n", services.DevAddress) - } - toAddress := common.HexToAddress(services.DevAddress) - offset := common.Hex2Bytes(strings.TrimSuffix(offsetAddr, "0x")) - if err := requests.ParityList(reqId, toAddress, quantity, offset, blockNum); err != nil { - fmt.Printf("error getting parity list: %v\n", err) - } - return nil - }, -} diff --git a/cmd/devnettest/commands/requests.go b/cmd/devnettest/commands/requests.go deleted file mode 100644 index d34bc363037..00000000000 --- a/cmd/devnettest/commands/requests.go +++ /dev/null @@ -1,25 +0,0 @@ -package commands - -import ( - "fmt" - "github.com/ledgerwatch/erigon/cmd/devnettest/requests" - "github.com/spf13/cobra" -) - -func init() { - rootCmd.AddCommand(mockRequestCmd) -} - -var mockRequestCmd = &cobra.Command{ - Use: "mock", - Short: "Mocks a request on the devnet", - Run: func(cmd *cobra.Command, args []string) { - callMockGetRequest() - }, -} - -func callMockGetRequest() { - if err := requests.MockGetRequest(reqId); err != nil { - fmt.Printf("error mocking get request: %v\n", err) - } -} diff --git a/cmd/devnettest/commands/root.go b/cmd/devnettest/commands/root.go deleted file mode 100644 index 8cecb0f03ed..00000000000 --- a/cmd/devnettest/commands/root.go +++ /dev/null @@ -1,23 +0,0 @@ -package commands - -import ( - "github.com/spf13/cobra" -) - -var ( - reqId int -) - -func init() { - rootCmd.PersistentFlags().IntVar(&reqId, "req-id", 0, "Defines number of request id") -} - -var rootCmd = &cobra.Command{ - Use: "devnettest", - Short: "Devnettest root command", -} - -// Execute executes the root command. -func Execute() error { - return rootCmd.Execute() -} diff --git a/cmd/devnettest/commands/tx.go b/cmd/devnettest/commands/tx.go deleted file mode 100644 index 2710002889c..00000000000 --- a/cmd/devnettest/commands/tx.go +++ /dev/null @@ -1,26 +0,0 @@ -package commands - -import ( - "fmt" - - "github.com/ledgerwatch/erigon/cmd/devnettest/requests" - "github.com/spf13/cobra" -) - -func init() { - rootCmd.AddCommand(txPoolCmd) -} - -var txPoolCmd = &cobra.Command{ - Use: "txpool-content", - Short: "Gets content of txpool", - Run: func(cmd *cobra.Command, args []string) { - showTxPoolContent() - }, -} - -func showTxPoolContent() { - if err := requests.TxpoolContent(reqId); err != nil { - fmt.Printf("error getting txpool content: %v\n", err) - } -} diff --git a/cmd/devnettest/contracts/build/Subscription.abi b/cmd/devnettest/contracts/build/Subscription.abi deleted file mode 100644 index ccb0c274f26..00000000000 --- a/cmd/devnettest/contracts/build/Subscription.abi +++ /dev/null @@ -1 +0,0 @@ -[{"anonymous":false,"inputs":[],"name":"SubscriptionEvent","type":"event"},{"stateMutability":"nonpayable","type":"fallback"}] \ No newline at end of file diff --git a/cmd/devnettest/contracts/build/Subscription.bin b/cmd/devnettest/contracts/build/Subscription.bin deleted file mode 100644 index 2c423f4bd32..00000000000 --- a/cmd/devnettest/contracts/build/Subscription.bin +++ /dev/null @@ -1 +0,0 @@ -6080604052348015600f57600080fd5b50607180601d6000396000f3fe6080604052348015600f57600080fd5b506040517f67abc7edb0ab50964ef0e90541d39366b9c69f6f714520f2ff4570059ee8ad8090600090a100fea264697066735822122045a70478ef4f6a283c0e153ad72ec6731dc9ee2e1c191c7334b74dea21a92eaf64736f6c634300080c0033 \ No newline at end of file diff --git a/cmd/devnettest/contracts/gen_subscription.go b/cmd/devnettest/contracts/gen_subscription.go deleted file mode 100644 index f308506f4b9..00000000000 --- a/cmd/devnettest/contracts/gen_subscription.go +++ /dev/null @@ -1,343 +0,0 @@ -// Code generated - DO NOT EDIT. -// This file is a generated binding and any manual changes will be lost. - -package contracts - -import ( - "math/big" - "strings" - - ethereum "github.com/ledgerwatch/erigon" - "github.com/ledgerwatch/erigon/accounts/abi" - "github.com/ledgerwatch/erigon/accounts/abi/bind" - "github.com/ledgerwatch/erigon/common" - "github.com/ledgerwatch/erigon/core/types" - "github.com/ledgerwatch/erigon/event" -) - -// Reference imports to suppress errors if they are not otherwise used. -var ( - _ = big.NewInt - _ = strings.NewReader - _ = ethereum.NotFound - _ = bind.Bind - _ = common.Big1 - _ = types.BloomLookup - _ = event.NewSubscription -) - -// SubscriptionABI is the input ABI used to generate the binding from. -const SubscriptionABI = "[{\"anonymous\":false,\"inputs\":[],\"name\":\"SubscriptionEvent\",\"type\":\"event\"},{\"stateMutability\":\"nonpayable\",\"type\":\"fallback\"}]" - -// SubscriptionBin is the compiled bytecode used for deploying new contracts. -var SubscriptionBin = "0x6080604052348015600f57600080fd5b50607180601d6000396000f3fe6080604052348015600f57600080fd5b506040517f67abc7edb0ab50964ef0e90541d39366b9c69f6f714520f2ff4570059ee8ad8090600090a100fea264697066735822122045a70478ef4f6a283c0e153ad72ec6731dc9ee2e1c191c7334b74dea21a92eaf64736f6c634300080c0033" - -// DeploySubscription deploys a new Ethereum contract, binding an instance of Subscription to it. -func DeploySubscription(auth *bind.TransactOpts, backend bind.ContractBackend) (common.Address, types.Transaction, *Subscription, error) { - parsed, err := abi.JSON(strings.NewReader(SubscriptionABI)) - if err != nil { - return common.Address{}, nil, nil, err - } - - address, tx, contract, err := bind.DeployContract(auth, parsed, common.FromHex(SubscriptionBin), backend) - if err != nil { - return common.Address{}, nil, nil, err - } - return address, tx, &Subscription{SubscriptionCaller: SubscriptionCaller{contract: contract}, SubscriptionTransactor: SubscriptionTransactor{contract: contract}, SubscriptionFilterer: SubscriptionFilterer{contract: contract}}, nil -} - -// Subscription is an auto generated Go binding around an Ethereum contract. -type Subscription struct { - SubscriptionCaller // Read-only binding to the contract - SubscriptionTransactor // Write-only binding to the contract - SubscriptionFilterer // Log filterer for contract events -} - -// SubscriptionCaller is an auto generated read-only Go binding around an Ethereum contract. -type SubscriptionCaller struct { - contract *bind.BoundContract // Generic contract wrapper for the low level calls -} - -// SubscriptionTransactor is an auto generated write-only Go binding around an Ethereum contract. -type SubscriptionTransactor struct { - contract *bind.BoundContract // Generic contract wrapper for the low level calls -} - -// SubscriptionFilterer is an auto generated log filtering Go binding around an Ethereum contract events. -type SubscriptionFilterer struct { - contract *bind.BoundContract // Generic contract wrapper for the low level calls -} - -// SubscriptionSession is an auto generated Go binding around an Ethereum contract, -// with pre-set call and transact options. -type SubscriptionSession struct { - Contract *Subscription // Generic contract binding to set the session for - CallOpts bind.CallOpts // Call options to use throughout this session - TransactOpts bind.TransactOpts // Transaction auth options to use throughout this session -} - -// SubscriptionCallerSession is an auto generated read-only Go binding around an Ethereum contract, -// with pre-set call options. -type SubscriptionCallerSession struct { - Contract *SubscriptionCaller // Generic contract caller binding to set the session for - CallOpts bind.CallOpts // Call options to use throughout this session -} - -// SubscriptionTransactorSession is an auto generated write-only Go binding around an Ethereum contract, -// with pre-set transact options. -type SubscriptionTransactorSession struct { - Contract *SubscriptionTransactor // Generic contract transactor binding to set the session for - TransactOpts bind.TransactOpts // Transaction auth options to use throughout this session -} - -// SubscriptionRaw is an auto generated low-level Go binding around an Ethereum contract. -type SubscriptionRaw struct { - Contract *Subscription // Generic contract binding to access the raw methods on -} - -// SubscriptionCallerRaw is an auto generated low-level read-only Go binding around an Ethereum contract. -type SubscriptionCallerRaw struct { - Contract *SubscriptionCaller // Generic read-only contract binding to access the raw methods on -} - -// SubscriptionTransactorRaw is an auto generated low-level write-only Go binding around an Ethereum contract. -type SubscriptionTransactorRaw struct { - Contract *SubscriptionTransactor // Generic write-only contract binding to access the raw methods on -} - -// NewSubscription creates a new instance of Subscription, bound to a specific deployed contract. -func NewSubscription(address common.Address, backend bind.ContractBackend) (*Subscription, error) { - contract, err := bindSubscription(address, backend, backend, backend) - if err != nil { - return nil, err - } - return &Subscription{SubscriptionCaller: SubscriptionCaller{contract: contract}, SubscriptionTransactor: SubscriptionTransactor{contract: contract}, SubscriptionFilterer: SubscriptionFilterer{contract: contract}}, nil -} - -// NewSubscriptionCaller creates a new read-only instance of Subscription, bound to a specific deployed contract. -func NewSubscriptionCaller(address common.Address, caller bind.ContractCaller) (*SubscriptionCaller, error) { - contract, err := bindSubscription(address, caller, nil, nil) - if err != nil { - return nil, err - } - return &SubscriptionCaller{contract: contract}, nil -} - -// NewSubscriptionTransactor creates a new write-only instance of Subscription, bound to a specific deployed contract. -func NewSubscriptionTransactor(address common.Address, transactor bind.ContractTransactor) (*SubscriptionTransactor, error) { - contract, err := bindSubscription(address, nil, transactor, nil) - if err != nil { - return nil, err - } - return &SubscriptionTransactor{contract: contract}, nil -} - -// NewSubscriptionFilterer creates a new log filterer instance of Subscription, bound to a specific deployed contract. -func NewSubscriptionFilterer(address common.Address, filterer bind.ContractFilterer) (*SubscriptionFilterer, error) { - contract, err := bindSubscription(address, nil, nil, filterer) - if err != nil { - return nil, err - } - return &SubscriptionFilterer{contract: contract}, nil -} - -// bindSubscription binds a generic wrapper to an already deployed contract. -func bindSubscription(address common.Address, caller bind.ContractCaller, transactor bind.ContractTransactor, filterer bind.ContractFilterer) (*bind.BoundContract, error) { - parsed, err := abi.JSON(strings.NewReader(SubscriptionABI)) - if err != nil { - return nil, err - } - return bind.NewBoundContract(address, parsed, caller, transactor, filterer), nil -} - -// Call invokes the (constant) contract method with params as input values and -// sets the output to result. The result type might be a single field for simple -// returns, a slice of interfaces for anonymous returns and a struct for named -// returns. -func (_Subscription *SubscriptionRaw) Call(opts *bind.CallOpts, result *[]interface{}, method string, params ...interface{}) error { - return _Subscription.Contract.SubscriptionCaller.contract.Call(opts, result, method, params...) -} - -// Transfer initiates a plain transaction to move funds to the contract, calling -// its default method if one is available. -func (_Subscription *SubscriptionRaw) Transfer(opts *bind.TransactOpts) (types.Transaction, error) { - return _Subscription.Contract.SubscriptionTransactor.contract.Transfer(opts) -} - -// Transact invokes the (paid) contract method with params as input values. -func (_Subscription *SubscriptionRaw) Transact(opts *bind.TransactOpts, method string, params ...interface{}) (types.Transaction, error) { - return _Subscription.Contract.SubscriptionTransactor.contract.Transact(opts, method, params...) -} - -// Call invokes the (constant) contract method with params as input values and -// sets the output to result. The result type might be a single field for simple -// returns, a slice of interfaces for anonymous returns and a struct for named -// returns. -func (_Subscription *SubscriptionCallerRaw) Call(opts *bind.CallOpts, result *[]interface{}, method string, params ...interface{}) error { - return _Subscription.Contract.contract.Call(opts, result, method, params...) -} - -// Transfer initiates a plain transaction to move funds to the contract, calling -// its default method if one is available. -func (_Subscription *SubscriptionTransactorRaw) Transfer(opts *bind.TransactOpts) (types.Transaction, error) { - return _Subscription.Contract.contract.Transfer(opts) -} - -// Transact invokes the (paid) contract method with params as input values. -func (_Subscription *SubscriptionTransactorRaw) Transact(opts *bind.TransactOpts, method string, params ...interface{}) (types.Transaction, error) { - return _Subscription.Contract.contract.Transact(opts, method, params...) -} - -// Fallback is a paid mutator transaction binding the contract fallback function. -// -// Solidity: fallback() returns() -func (_Subscription *SubscriptionTransactor) Fallback(opts *bind.TransactOpts, calldata []byte) (types.Transaction, error) { - return _Subscription.contract.RawTransact(opts, calldata) -} - -// Fallback is a paid mutator transaction binding the contract fallback function. -// -// Solidity: fallback() returns() -func (_Subscription *SubscriptionSession) Fallback(calldata []byte) (types.Transaction, error) { - return _Subscription.Contract.Fallback(&_Subscription.TransactOpts, calldata) -} - -// Fallback is a paid mutator transaction binding the contract fallback function. -// -// Solidity: fallback() returns() -func (_Subscription *SubscriptionTransactorSession) Fallback(calldata []byte) (types.Transaction, error) { - return _Subscription.Contract.Fallback(&_Subscription.TransactOpts, calldata) -} - -// SubscriptionSubscriptionEventIterator is returned from FilterSubscriptionEvent and is used to iterate over the raw logs and unpacked data for SubscriptionEvent events raised by the Subscription contract. -type SubscriptionSubscriptionEventIterator struct { - Event *SubscriptionSubscriptionEvent // Event containing the contract specifics and raw log - - contract *bind.BoundContract // Generic contract to use for unpacking event data - event string // Event name to use for unpacking event data - - logs chan types.Log // Log channel receiving the found contract events - sub ethereum.Subscription // Subscription for errors, completion and termination - done bool // Whether the subscription completed delivering logs - fail error // Occurred error to stop iteration -} - -// Next advances the iterator to the subsequent event, returning whether there -// are any more events found. In case of a retrieval or parsing error, false is -// returned and Error() can be queried for the exact failure. -func (it *SubscriptionSubscriptionEventIterator) Next() bool { - // If the iterator failed, stop iterating - if it.fail != nil { - return false - } - // If the iterator completed, deliver directly whatever's available - if it.done { - select { - case log := <-it.logs: - it.Event = new(SubscriptionSubscriptionEvent) - if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { - it.fail = err - return false - } - it.Event.Raw = log - return true - - default: - return false - } - } - // Iterator still in progress, wait for either a data or an error event - select { - case log := <-it.logs: - it.Event = new(SubscriptionSubscriptionEvent) - if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { - it.fail = err - return false - } - it.Event.Raw = log - return true - - case err := <-it.sub.Err(): - it.done = true - it.fail = err - return it.Next() - } -} - -// Error returns any retrieval or parsing error occurred during filtering. -func (it *SubscriptionSubscriptionEventIterator) Error() error { - return it.fail -} - -// Close terminates the iteration process, releasing any pending underlying -// resources. -func (it *SubscriptionSubscriptionEventIterator) Close() error { - it.sub.Unsubscribe() - return nil -} - -// SubscriptionSubscriptionEvent represents a SubscriptionEvent event raised by the Subscription contract. -type SubscriptionSubscriptionEvent struct { - Raw types.Log // Blockchain specific contextual infos -} - -// FilterSubscriptionEvent is a free log retrieval operation binding the contract event 0x67abc7edb0ab50964ef0e90541d39366b9c69f6f714520f2ff4570059ee8ad80. -// -// Solidity: event SubscriptionEvent() -func (_Subscription *SubscriptionFilterer) FilterSubscriptionEvent(opts *bind.FilterOpts) (*SubscriptionSubscriptionEventIterator, error) { - - logs, sub, err := _Subscription.contract.FilterLogs(opts, "SubscriptionEvent") - if err != nil { - return nil, err - } - return &SubscriptionSubscriptionEventIterator{contract: _Subscription.contract, event: "SubscriptionEvent", logs: logs, sub: sub}, nil -} - -// WatchSubscriptionEvent is a free log subscription operation binding the contract event 0x67abc7edb0ab50964ef0e90541d39366b9c69f6f714520f2ff4570059ee8ad80. -// -// Solidity: event SubscriptionEvent() -func (_Subscription *SubscriptionFilterer) WatchSubscriptionEvent(opts *bind.WatchOpts, sink chan<- *SubscriptionSubscriptionEvent) (event.Subscription, error) { - - logs, sub, err := _Subscription.contract.WatchLogs(opts, "SubscriptionEvent") - if err != nil { - return nil, err - } - return event.NewSubscription(func(quit <-chan struct{}) error { - defer sub.Unsubscribe() - for { - select { - case log := <-logs: - // New log arrived, parse the event and forward to the user - event := new(SubscriptionSubscriptionEvent) - if err := _Subscription.contract.UnpackLog(event, "SubscriptionEvent", log); err != nil { - return err - } - event.Raw = log - - select { - case sink <- event: - case err := <-sub.Err(): - return err - case <-quit: - return nil - } - case err := <-sub.Err(): - return err - case <-quit: - return nil - } - } - }), nil -} - -// ParseSubscriptionEvent is a log parse operation binding the contract event 0x67abc7edb0ab50964ef0e90541d39366b9c69f6f714520f2ff4570059ee8ad80. -// -// Solidity: event SubscriptionEvent() -func (_Subscription *SubscriptionFilterer) ParseSubscriptionEvent(log types.Log) (*SubscriptionSubscriptionEvent, error) { - event := new(SubscriptionSubscriptionEvent) - if err := _Subscription.contract.UnpackLog(event, "SubscriptionEvent", log); err != nil { - return nil, err - } - event.Raw = log - return event, nil -} diff --git a/cmd/devnettest/contracts/subscription.sol b/cmd/devnettest/contracts/subscription.sol deleted file mode 100644 index 092d4470feb..00000000000 --- a/cmd/devnettest/contracts/subscription.sol +++ /dev/null @@ -1,10 +0,0 @@ -// SPDX-License-Identifier: GPL-3.0 - -pragma solidity ^0.8.0; - -contract Subscription { - event SubscriptionEvent(); - fallback() external { - emit SubscriptionEvent(); - } -} diff --git a/cmd/devnettest/erigon/node.go b/cmd/devnettest/erigon/node.go deleted file mode 100644 index 440c4688498..00000000000 --- a/cmd/devnettest/erigon/node.go +++ /dev/null @@ -1,61 +0,0 @@ -package erigon - -import ( - "fmt" - "os" - - "github.com/ledgerwatch/erigon-lib/common/dbg" - "github.com/ledgerwatch/erigon/cmd/devnettest/rpcdaemon" - "github.com/ledgerwatch/erigon/params" - erigonapp "github.com/ledgerwatch/erigon/turbo/app" - erigoncli "github.com/ledgerwatch/erigon/turbo/cli" - "github.com/ledgerwatch/erigon/turbo/node" - "github.com/ledgerwatch/log/v3" - "github.com/urfave/cli" -) - -const DevPeriod = 5 - -func RunNode() { - defer func() { - panicResult := recover() - if panicResult == nil { - return - } - - log.Error("catch panic", "err", panicResult, "stack", dbg.Stack()) - os.Exit(1) - }() - - app := erigonapp.MakeApp(runDevnet, erigoncli.DefaultFlags) // change to erigoncli.DefaultFlags later on - customArgs := []string{"./build/bin/devnettest", "--datadir=./dev", "--chain=dev", "--mine", fmt.Sprintf("--dev.period=%d", DevPeriod), "--verbosity=0"} - if err := app.Run(customArgs); err != nil { - fmt.Fprintln(os.Stderr, err) - os.Exit(1) - } -} - -func runDevnet(cliCtx *cli.Context) { - logger := log.New() - // initializing the node and providing the current git commit there - logger.Info("Build info", "git_branch", params.GitBranch, "git_tag", params.GitTag, "git_commit", params.GitCommit) - - nodeCfg := node.NewNodConfigUrfave(cliCtx) - ethCfg := node.NewEthConfigUrfave(cliCtx, nodeCfg) - - ethNode, err := node.New(nodeCfg, ethCfg, logger) - if err != nil { - log.Error("Devnet startup", "err", err) - return - } - err = ethNode.Serve() - if err != nil { - log.Error("error while serving a Devnet node", "err", err) - } -} - -func StartProcess() { - fmt.Println("Starting erigon node and rpc daemon...") - go RunNode() - go rpcdaemon.RunDaemon() -} diff --git a/cmd/devnettest/main.go b/cmd/devnettest/main.go deleted file mode 100644 index 66e419bfc7c..00000000000 --- a/cmd/devnettest/main.go +++ /dev/null @@ -1,24 +0,0 @@ -package main - -import ( - "fmt" - "time" - - "github.com/ledgerwatch/erigon/cmd/devnettest/commands" - "github.com/ledgerwatch/erigon/cmd/devnettest/erigon" - "github.com/ledgerwatch/erigon/cmd/devnettest/services" -) - -func main() { - erigon.StartProcess() - - time.Sleep(10 * time.Second) - - fmt.Printf("SUCCESS => Started!\n\n") - err := commands.Execute() - if err != nil { - panic(err) - } - - defer services.ClearDevDB() -} diff --git a/cmd/devnettest/requests/mock_requests.go b/cmd/devnettest/requests/mock_requests.go deleted file mode 100644 index 1e5c950d36d..00000000000 --- a/cmd/devnettest/requests/mock_requests.go +++ /dev/null @@ -1,13 +0,0 @@ -package requests - -import "fmt" - -func MockGetRequest(reqId int) error { - reqGen := initialiseRequestGenerator(reqId) - res := reqGen.Get() - if res.Err != nil { - return fmt.Errorf("failed to make get request: %v", res.Err) - } - fmt.Printf("SUCCESS => OK\n") - return nil -} diff --git a/cmd/devnettest/requests/request_generator.go b/cmd/devnettest/requests/request_generator.go deleted file mode 100644 index 0be4ca87b58..00000000000 --- a/cmd/devnettest/requests/request_generator.go +++ /dev/null @@ -1,124 +0,0 @@ -package requests - -import ( - "errors" - "fmt" - "io" - "net/http" - "time" - - "github.com/ledgerwatch/erigon/cmd/rpctest/rpctest" - "github.com/ledgerwatch/erigon/common" -) - -var ( - erigonUrl = "http://localhost:8545" -) - -type RequestGenerator struct { - reqID int - client *http.Client -} - -func initialiseRequestGenerator(reqId int) *RequestGenerator { - var client = &http.Client{ - Timeout: time.Second * 600, - } - - reqGen := RequestGenerator{ - client: client, - reqID: reqId, - } - if reqGen.reqID == 0 { - reqGen.reqID++ - } - - return &reqGen -} - -func (req *RequestGenerator) Get() rpctest.CallResult { - start := time.Now() - res := rpctest.CallResult{ - RequestID: req.reqID, - } - - resp, err := http.Get(erigonUrl) - if err != nil { - res.Took = time.Since(start) - res.Err = err - return res - } - defer resp.Body.Close() - - if resp.StatusCode != 200 { - res.Took = time.Since(start) - res.Err = errors.New("bad request") - return res - } - - body, err := io.ReadAll(resp.Body) - if err != nil { - res.Took = time.Since(start) - res.Err = err - return res - } - - res.Response = body - res.Took = time.Since(start) - res.Err = err - return res -} - -func (req *RequestGenerator) Erigon(method, body string, response interface{}) rpctest.CallResult { - return req.call(erigonUrl, method, body, response) -} - -func (req *RequestGenerator) call(target string, method, body string, response interface{}) rpctest.CallResult { - start := time.Now() - err := post(req.client, erigonUrl, body, response) - return rpctest.CallResult{ - RequestBody: body, - Target: target, - Took: time.Since(start), - RequestID: req.reqID, - Method: method, - Err: err, - } -} - -func (req *RequestGenerator) getBalance(address common.Address, blockNum string) string { - const template = `{"jsonrpc":"2.0","method":"eth_getBalance","params":["0x%x","%v"],"id":%d}` - return fmt.Sprintf(template, address, blockNum, req.reqID) -} - -func (req *RequestGenerator) sendRawTransaction(signedTx []byte) string { - const template = `{"jsonrpc":"2.0","method":"eth_sendRawTransaction","params":["0x%x"],"id":%d}` - return fmt.Sprintf(template, signedTx, req.reqID) -} - -func (req *RequestGenerator) txpoolContent() string { - const template = `{"jsonrpc":"2.0","method":"txpool_content","params":[],"id":%d}` - return fmt.Sprintf(template, req.reqID) -} - -func (req *RequestGenerator) parityStorageKeyListContent(address common.Address, quantity int, offset []byte, blockNum string) string { - const template = `{"jsonrpc":"2.0","method":"parity_listStorageKeys","params":["0x%x", %d, %v, "%s"],"id":%d}` - var offsetString string - if len(offset) != 0 { - offsetString = fmt.Sprintf(`"0x%x"`, offset) - } else { - offsetString = "null" - } - - return fmt.Sprintf(template, address, quantity, offsetString, blockNum, req.reqID) -} - -func (req *RequestGenerator) getLogs(fromBlock, toBlock uint64, address common.Address) string { - const template = `{"jsonrpc":"2.0","method":"eth_getLogs","params":[{"fromBlock": "0x%x", "toBlock": "0x%x", "address": "0x%x"}],"id":%d}` - return fmt.Sprintf(template, fromBlock, toBlock, address, req.reqID) -} - -func (req *RequestGenerator) getTransactionCount(address common.Address, blockNum string) string { - const template = `{"jsonrpc":"2.0","method":"eth_getTransactionCount","params":["0x%x","%v"],"id":%d}` - return fmt.Sprintf(template, address, blockNum, req.reqID) -} diff --git a/cmd/devnettest/requests/requests.go b/cmd/devnettest/requests/requests.go deleted file mode 100644 index a678a6b74af..00000000000 --- a/cmd/devnettest/requests/requests.go +++ /dev/null @@ -1,176 +0,0 @@ -package requests - -import ( - "bytes" - "encoding/json" - "fmt" - "github.com/ledgerwatch/erigon/cmd/devnettest/utils" - "net/http" - "strconv" - "strings" - "time" - - "github.com/ledgerwatch/erigon/cmd/rpctest/rpctest" - "github.com/ledgerwatch/erigon/common" - "github.com/ledgerwatch/erigon/core/types" - "github.com/ledgerwatch/log/v3" -) - -func post(client *http.Client, url, request string, response interface{}) error { - start := time.Now() - r, err := client.Post(url, "application/json", strings.NewReader(request)) - if err != nil { - return fmt.Errorf("client failed to make post request: %v", err) - } - defer r.Body.Close() - - if r.StatusCode != 200 { - return fmt.Errorf("status %s", r.Status) - } - - decoder := json.NewDecoder(r.Body) - err = decoder.Decode(response) - if err != nil { - return fmt.Errorf("failed to decode response: %v", err) - } - - log.Info("Got in", "time", time.Since(start).Seconds()) - return nil -} - -func GetBalance(reqId int, address common.Address, blockNum string) (uint64, error) { - reqGen := initialiseRequestGenerator(reqId) - var b rpctest.EthBalance - - if res := reqGen.Erigon("eth_getBalance", reqGen.getBalance(address, blockNum), &b); res.Err != nil { - return 0, fmt.Errorf("failed to get balance: %v", res.Err) - } - - bal, err := json.Marshal(b.Balance) - if err != nil { - fmt.Println(err) - } - - balStr := string(bal)[3 : len(bal)-1] - balance, err := strconv.ParseInt(balStr, 16, 64) - if err != nil { - return 0, fmt.Errorf("cannot convert balance to decimal: %v", err) - } - - return uint64(balance), nil -} - -func SendTx(reqId int, signedTx *types.Transaction) (*common.Hash, error) { - reqGen := initialiseRequestGenerator(reqId) - var b rpctest.EthSendRawTransaction - - var buf bytes.Buffer - if err := (*signedTx).MarshalBinary(&buf); err != nil { - return nil, fmt.Errorf("failed to marshal binary: %v", err) - } - - if res := reqGen.Erigon("eth_sendRawTransaction", reqGen.sendRawTransaction(buf.Bytes()), &b); res.Err != nil { - return nil, fmt.Errorf("could not make request to eth_sendRawTransaction: %v", res.Err) - } - - return &b.TxnHash, nil -} - -func TxpoolContent(reqId int) error { - reqGen := initialiseRequestGenerator(reqId) - var b rpctest.EthTxPool - - if res := reqGen.Erigon("txpool_content", reqGen.txpoolContent(), &b); res.Err != nil { - return fmt.Errorf("failed to fetch txpool content: %v", res.Err) - } - - //fmt.Printf("Pending: %+v\n", b.Result.(map[string]interface{})["pending"].(map[string]interface{})["hash"]) - //fmt.Printf("Type: %T\n", b.Result.(map[string]interface{})["pending"]) - - s, err := utils.ParseResponse(b) - if err != nil { - return fmt.Errorf("error parsing resonse: %v", err) - } - - fmt.Printf("Txpool content: %v\n", s) - return nil -} - -func ParityList(reqId int, account common.Address, quantity int, offset []byte, blockNum string) error { - reqGen := initialiseRequestGenerator(reqId) - var b rpctest.ParityListStorageKeysResult - - if res := reqGen.Erigon("parity_listStorageKeys", reqGen.parityStorageKeyListContent(account, quantity, offset, blockNum), &b); res.Err != nil { - return fmt.Errorf("failed to fetch storage keys: %v", res.Err) - } - - s, err := utils.ParseResponse(b) - if err != nil { - return fmt.Errorf("error parsing resonse: %v", err) - } - - fmt.Printf("Storage keys: %v\n", s) - return nil -} - -func GetLogs(reqId int, fromBlock, toBlock uint64, address common.Address, show bool) error { - reqGen := initialiseRequestGenerator(reqId) - var b rpctest.EthGetLogs - - if res := reqGen.Erigon("eth_getLogs", reqGen.getLogs(fromBlock, toBlock, address), &b); res.Err != nil { - return fmt.Errorf("error fetching logs: %v\n", res.Err) - } - - s, err := utils.ParseResponse(b) - if err != nil { - return fmt.Errorf("error parsing resonse: %v", err) - } - - if show { - fmt.Printf("Logs: %v\n", s) - } - - return nil -} - -func GetTransactionCountCmd(reqId int, address common.Address, blockNum string) (uint64, error) { - reqGen := initialiseRequestGenerator(reqId) - var b rpctest.EthGetTransactionCount - - if res := reqGen.Erigon("eth_getTransactionCount", reqGen.getTransactionCount(address, blockNum), &b); res.Err != nil { - return 0, fmt.Errorf("error getting transaction count: %v\n", res.Err) - } - - if b.Error != nil { - return 0, fmt.Errorf("error populating response object: %v", b.Error) - } - - n, err := json.Marshal(b.Result) - if err != nil { - fmt.Println(err) - } - - nonceStr := string(n)[3 : len(n)-1] - - nonce, err := strconv.ParseInt(nonceStr, 16, 64) - if err != nil { - return 0, fmt.Errorf("cannot convert nonce to decimal: %v", err) - } - - return uint64(nonce), nil -} - -func GetTransactionCount(reqId int, address common.Address, blockNum string) (rpctest.EthGetTransactionCount, error) { - reqGen := initialiseRequestGenerator(reqId) - var b rpctest.EthGetTransactionCount - - if res := reqGen.Erigon("eth_getTransactionCount", reqGen.getTransactionCount(address, blockNum), &b); res.Err != nil { - return b, fmt.Errorf("error getting transaction count: %v\n", res.Err) - } - - if b.Error != nil { - return b, fmt.Errorf("error populating response object: %v", b.Error) - } - - return b, nil -} diff --git a/cmd/devnettest/rpcdaemon/daemon.go b/cmd/devnettest/rpcdaemon/daemon.go deleted file mode 100644 index 226a2fd9a30..00000000000 --- a/cmd/devnettest/rpcdaemon/daemon.go +++ /dev/null @@ -1,51 +0,0 @@ -package rpcdaemon - -import ( - "github.com/ledgerwatch/erigon/cmd/rpcdaemon/cli/httpcfg" - "os" - "time" - - "github.com/ledgerwatch/erigon-lib/common" - "github.com/ledgerwatch/erigon/cmd/rpcdaemon/cli" - "github.com/ledgerwatch/erigon/cmd/rpcdaemon/commands" - "github.com/ledgerwatch/log/v3" - "github.com/spf13/cobra" -) - -func RunDaemon() { - cmd, cfg := cli.RootCommand() - setupCfg(cfg) - rootCtx, rootCancel := common.RootContext() - cmd.RunE = func(cmd *cobra.Command, args []string) error { - ctx := cmd.Context() - logger := log.New() - time.Sleep(100 * time.Millisecond) - db, borDb, backend, txPool, mining, starknet, stateCache, blockReader, ff, err := cli.RemoteServices(ctx, *cfg, logger, rootCancel) - if err != nil { - log.Error("Could not connect to DB", "err", err) - return nil - } - defer db.Close() - if borDb != nil { - defer borDb.Close() - } - - apiList := commands.APIList(db, borDb, backend, txPool, mining, starknet, ff, stateCache, blockReader, *cfg) - if err := cli.StartRpcServer(ctx, *cfg, apiList); err != nil { - log.Error(err.Error()) - return nil - } - - return nil - } - - if err := cmd.ExecuteContext(rootCtx); err != nil { - log.Error(err.Error()) - os.Exit(1) - } -} - -func setupCfg(cfg *httpcfg.HttpCfg) { - cfg.WebsocketEnabled = true - cfg.API = []string{"eth", "erigon", "web3", "net", "debug", "trace", "txpool"} -} diff --git a/cmd/devnettest/services/account.go b/cmd/devnettest/services/account.go deleted file mode 100644 index da067ed6861..00000000000 --- a/cmd/devnettest/services/account.go +++ /dev/null @@ -1,20 +0,0 @@ -package services - -import ( - "fmt" - "github.com/ledgerwatch/erigon/cmd/devnettest/requests" - "github.com/ledgerwatch/erigon/common" -) - -// GetNonce fetches the latest nonce of the developer account by making an JSONRPC request -func GetNonce(reqId int) (uint64, error) { - blockNum := "latest" - address := common.HexToAddress(DevAddress) - - res, err := requests.GetTransactionCount(reqId, address, blockNum) - if err != nil { - return 0, fmt.Errorf("failed to get transaction count for address 0x%x: %v", address, err) - } - - return uint64(res.Result), nil -} diff --git a/cmd/devnettest/services/block.go b/cmd/devnettest/services/block.go deleted file mode 100644 index 777e2a8dc23..00000000000 --- a/cmd/devnettest/services/block.go +++ /dev/null @@ -1,216 +0,0 @@ -package services - -import ( - "context" - "fmt" - "github.com/ledgerwatch/erigon/cmd/devnettest/utils" - "math/big" - "os/exec" - "time" - - "github.com/holiman/uint256" - "github.com/ledgerwatch/erigon/accounts/abi/bind" - "github.com/ledgerwatch/erigon/accounts/abi/bind/backends" - "github.com/ledgerwatch/erigon/cmd/devnettest/contracts" - "github.com/ledgerwatch/erigon/cmd/devnettest/requests" - "github.com/ledgerwatch/erigon/common" - "github.com/ledgerwatch/erigon/common/hexutil" - "github.com/ledgerwatch/erigon/core" - "github.com/ledgerwatch/erigon/core/types" - "github.com/ledgerwatch/erigon/crypto" - "github.com/ledgerwatch/erigon/params" - "github.com/ledgerwatch/erigon/rpc" -) - -const ( - gasPrice = 912345678 -) - -var ( - devnetSignPrivateKey, _ = crypto.HexToECDSA("26e86e45f6fc45ec6e2ecd128cec80fa1d1505e5507dcd2ae58c3130a7a97b48") - signer = types.LatestSigner(params.AllCliqueProtocolChanges) - DevAddress = "67b1d87101671b127f5f8714789C7192f7ad340e" - gspec = core.DeveloperGenesisBlock(uint64(0), common.HexToAddress(DevAddress)) - contractBackend = backends.NewSimulatedBackendWithConfig(gspec.Alloc, gspec.Config, 1_000_000) -) - -type Block struct { - Number *hexutil.Big - Transactions []common.Hash -} - -// CreateTransaction returns transaction details depending on what transaction type is given -func CreateTransaction(transactionType, addr string, value, nonce uint64) (*types.Transaction, common.Address, *contracts.Subscription, *bind.TransactOpts, error) { - if transactionType == "regular" { - tx, address, err := createNonContractTx(addr, value, nonce) - if err != nil { - return nil, common.Address{}, nil, nil, fmt.Errorf("failed to create non-contract transaction: %v", err) - } - return tx, address, nil, nil, nil - } - return createContractTx(nonce) -} - -// createNonContractTx takes in a signer and returns the signed transaction and the address receiving the sent value -func createNonContractTx(addr string, value, nonce uint64) (*types.Transaction, common.Address, error) { - toAddress := common.HexToAddress(addr) - signedTx, err := types.SignTx(types.NewTransaction(nonce, toAddress, uint256.NewInt(value), - params.TxGas, uint256.NewInt(gasPrice), nil), *signer, devnetSignPrivateKey) - if err != nil { - return nil, toAddress, fmt.Errorf("failed to sign transaction: %v", err) - } - return &signedTx, toAddress, nil -} - -// createContractTx creates and signs a transaction using the developer address, returns the contract and the signed transaction -func createContractTx(nonce uint64) (*types.Transaction, common.Address, *contracts.Subscription, *bind.TransactOpts, error) { - // initialize transactOpts - transactOpts, err := initializeTransactOps(nonce) - if err != nil { - return nil, common.Address{}, nil, nil, fmt.Errorf("failed to initialize transactOpts: %v", err) - } - - // deploy the contract and get the contract handler - address, txToSign, subscriptionContract, err := contracts.DeploySubscription(transactOpts, contractBackend) - if err != nil { - return nil, common.Address{}, nil, nil, fmt.Errorf("failed to deploy subscription: %v", err) - } - - // sign the transaction with the private key - signedTx, err := types.SignTx(txToSign, *signer, devnetSignPrivateKey) - if err != nil { - return nil, common.Address{}, nil, nil, fmt.Errorf("failed to sign tx: %v", err) - } - - return &signedTx, address, subscriptionContract, transactOpts, nil -} - -func initializeTransactOps(nonce uint64) (*bind.TransactOpts, error) { - const txGas uint64 = 200_000 - var chainID = big.NewInt(1337) - - transactOpts, err := bind.NewKeyedTransactorWithChainID(devnetSignPrivateKey, chainID) - if err != nil { - return nil, fmt.Errorf("cannot create transactor with chainID %s, error: %v", chainID, err) - } - - transactOpts.GasLimit = txGas - transactOpts.GasPrice = big.NewInt(880_000_000) - // TODO: Get Nonce from account automatically - transactOpts.Nonce = big.NewInt(int64(nonce)) - - return transactOpts, nil -} - -// SearchBlockForTx connects the client to a websocket and listens for new heads to search the blocks for a tx hash -func SearchBlockForTx(txnHash common.Hash) (uint64, error) { - client, clientErr := rpc.DialWebsocket(context.Background(), "ws://127.0.0.1:8545", "") - if clientErr != nil { - return 0, fmt.Errorf("failed to dial websocket: %v", clientErr) - } - - fmt.Printf("\nSearching for tx %q in new blocks...\n", txnHash) - blockN, err := subscribeToNewHeadsAndSearch(client, "eth_newHeads", txnHash) - if err != nil { - return 0, fmt.Errorf("failed to subscribe to ws: %v", err) - } - - // var count int - - // ForLoop: - // for { - // select { - // case v := <-ch: - // count++ - // _map := v.(map[string]interface{}) - // for k, val := range _map { - // fmt.Printf("%s: %+v, ", k, val) - // } - // fmt.Println() - // fmt.Println() - // if count == numberOfIterations { - // break ForLoop - // } - // } - // } - - return blockN, nil -} - -// blockHasHash checks if the current block has the transaction hash in its list of transactions -func blockHasHash(client *rpc.Client, hash common.Hash, blockNumber string) (uint64, bool, error) { - ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) - defer cancel() - - var currentBlock Block - err := client.CallContext(ctx, ¤tBlock, "eth_getBlockByNumber", blockNumber, false) - if err != nil { - return uint64(0), false, fmt.Errorf("failed to get block by number: %v", err) - } - - for _, txnHash := range currentBlock.Transactions { - if txnHash == hash { - fmt.Printf("SUCCESS => Tx with hash %q is in mined block with number %q\n", hash, blockNumber) - return utils.HexToInt(blockNumber), true, nil - } - } - - return uint64(0), false, nil -} - -// EmitEventAndGetLogs emits an event from the contract using the fallback method -func EmitEventAndGetLogs(reqId int, subContract *contracts.Subscription, opts *bind.TransactOpts, address common.Address) error { - opts.Nonce.Add(opts.Nonce, big.NewInt(1)) - - tx, err := subContract.Fallback(opts, []byte{}) - if err != nil { - return fmt.Errorf("failed to emit event from fallback: %v", err) - } - - signedTx, err := types.SignTx(tx, *signer, devnetSignPrivateKey) - if err != nil { - return fmt.Errorf("failed to sign transaction: %v", err) - } - - fmt.Printf("\nSending Fallback tx from contract...\n") - hash, err := requests.SendTx(reqId, &signedTx) - if err != nil { - return fmt.Errorf("failed to send transaction: %v", err) - } - fmt.Printf("SUCCESS => Tx submitted, adding tx with hash %q to txpool\n", hash) - - // TODO: Mining does not happen because node is stuck in StageSync - blockN, err := SearchBlockForTx(*hash) - if err != nil { - return fmt.Errorf("error searching block for tx: %v", err) - } - - if err = requests.GetLogs(reqId, blockN, blockN, address, false); err != nil { - return fmt.Errorf("failed to get logs: %v", err) - } - - return nil -} - -func ApplyTransaction(ctx context.Context, tx types.Transaction) error { - err := contractBackend.SendTransaction(ctx, tx) - if err != nil { - return fmt.Errorf("failed to send transaction: %v", err) - } - contractBackend.Commit() - return nil -} - -// ClearDevDB cleans up the dev folder used for the operations -func ClearDevDB() { - fmt.Printf("\nDeleting ./dev folder\n") - - cmd := exec.Command("rm", "-rf", "./dev") - err := cmd.Run() - if err != nil { - fmt.Println("Error occurred clearing Dev DB") - panic("could not clear dev DB") - } - - fmt.Printf("SUCCESS => Deleted ./dev\n") -} diff --git a/cmd/devnettest/services/event.go b/cmd/devnettest/services/event.go deleted file mode 100644 index a524898060f..00000000000 --- a/cmd/devnettest/services/event.go +++ /dev/null @@ -1,129 +0,0 @@ -package services - -import ( - "context" - "fmt" - "github.com/ledgerwatch/erigon/cmd/devnettest/utils" - "github.com/ledgerwatch/erigon/common" - "github.com/ledgerwatch/erigon/rpc" -) - -const numberOfIterations = 128 - -var ch = make(chan interface{}) - -// subscribe connects to a websocket and returns the subscription handler and a channel buffer -func subscribe(client *rpc.Client, method string, args ...interface{}) (*rpc.ClientSubscription, error) { - var ( - namespace string - subMethod string - splitErr error - ) - - namespace, subMethod, splitErr = utils.NamespaceAndSubMethodFromMethod(method) - if splitErr != nil { - return nil, fmt.Errorf("cannot get namespace and submethod from method: %v", splitErr) - } - - var arr = []interface{}{subMethod} - arr = append(arr, args...) - - sub, err := client.Subscribe(context.Background(), namespace, ch, arr...) - if err != nil { - return nil, fmt.Errorf("client failed to subscribe: %v", err) - } - - return sub, nil -} - -// subscribeToNewHeadsAndSearch makes a ws subscription for eth_newHeads and searches each new header for the tx hash -func subscribeToNewHeadsAndSearch(client *rpc.Client, method string, hash common.Hash) (uint64, error) { - sub, err := subscribe(client, method) - if err != nil { - return uint64(0), fmt.Errorf("error subscribing to newHeads: %v", err) - } - defer unSubscribe(sub) - - var ( - blockCount int - blockN uint64 - ) -mark: - for { - select { - case v := <-ch: - blockCount++ - blockNumber := v.(map[string]interface{})["number"] - num, foundTx, err := blockHasHash(client, hash, blockNumber.(string)) - if err != nil { - return uint64(0), fmt.Errorf("could not verify if current block contains the tx hash: %v", err) - } - if foundTx || blockCount == numberOfIterations { - blockN = num - break mark - } - case err := <-sub.Err(): - return uint64(0), fmt.Errorf("subscription error from client: %v", err) - } - } - - return blockN, nil -} - -// Logs dials a websocket connection and listens for log events by calling subscribeToLogs -func Logs(addresses, topics []string) error { - client, clientErr := rpc.DialWebsocket(context.Background(), "ws://127.0.0.1:8545", "") - if clientErr != nil { - return fmt.Errorf("failed to dial websocket: %v", clientErr) - } - - if err := subscribeToLogs(client, "eth_logs", addresses, topics); err != nil { - return fmt.Errorf("failed to subscribe to logs: %v", err) - } - - return nil -} - -// subscribeToLogs makes a ws subscription for eth_subscribeLogs -func subscribeToLogs(client *rpc.Client, method string, addresses []string, topics []string) error { - params := map[string][]string{ - "address": addresses, - "topics": topics, - } - - _, err := subscribe(client, method, params) - if err != nil { - return fmt.Errorf("error subscribing to logs: %v", err) - } - //defer unSubscribe(sub) - - //var count int - - //ForLoop: - // for { - // select { - // case v := <-ch: - // count++ - // _map := v.(map[string]interface{}) - // for k, val := range _map { - // fmt.Printf("%s: %+v, ", k, val) - // } - // fmt.Println() - // fmt.Println() - // if count == numberOfIterations { - // break ForLoop - // } - // case err := <-sub.Err(): - // return fmt.Errorf("subscription error from client: %v", err) - // } - // } - - return nil -} - -func unSubscribe(sub *rpc.ClientSubscription) { - sub.Unsubscribe() - for len(ch) > 0 { - <-ch - } -} diff --git a/cmd/devnettest/utils/tools.go b/cmd/devnettest/utils/tools.go deleted file mode 100644 index 820fed468d1..00000000000 --- a/cmd/devnettest/utils/tools.go +++ /dev/null @@ -1,36 +0,0 @@ -package utils - -import ( - "encoding/json" - "fmt" - "strconv" - "strings" -) - -// HexToInt converts a hex string to a type uint64 -func HexToInt(hexStr string) uint64 { - // Remove the 0x prefix - cleaned := strings.ReplaceAll(hexStr, "0x", "") - - result, _ := strconv.ParseUint(cleaned, 16, 64) - return result -} - -// ParseResponse converts any of the rpctest interfaces to a string for readability -func ParseResponse(resp interface{}) (string, error) { - result, err := json.Marshal(resp) - if err != nil { - return "", fmt.Errorf("error trying to marshal response: %v", err) - } - - return string(result), nil -} - -// NamespaceAndSubMethodFromMethod splits a parent method into namespace and the actual method -func NamespaceAndSubMethodFromMethod(method string) (string, string, error) { - parts := strings.SplitN(method, "_", 2) - if len(parts) != 2 { - return "", "", fmt.Errorf("invalid string to split") - } - return parts[0], parts[1], nil -} diff --git a/cmd/downloader/downloader/downloader.go b/cmd/downloader/downloader/downloader.go index a6c341b6db6..3c49a7adeb9 100644 --- a/cmd/downloader/downloader/downloader.go +++ b/cmd/downloader/downloader/downloader.go @@ -64,13 +64,10 @@ func New(cfg *downloadercfg.Cfg) (*Downloader, error) { if common.FileExist(cfg.DataDir + "_tmp") { // migration from prev versions _ = os.Rename(cfg.DataDir+"_tmp", filepath.Join(cfg.DataDir, "tmp")) // ignore error, because maybe they are on different drive, or target folder already created manually, all is fine } - if !common.FileExist(filepath.Join(cfg.DataDir, "db")) && !HasSegFile(cfg.DataDir) { // it's ok to remove "datadir/snapshots/db" dir or add .seg files manually - cfg.DataDir = filepath.Join(cfg.DataDir, "tmp") - } else { - if err := copyFromTmp(cfg.DataDir); err != nil { - return nil, err - } + if err := moveFromTmp(cfg.DataDir); err != nil { + return nil, err } + db, c, m, torrentClient, err := openClient(cfg.ClientConfig) if err != nil { return nil, fmt.Errorf("openClient: %w", err) @@ -154,14 +151,10 @@ func (d *Downloader) ReCalcStats(interval time.Duration) { stats.PeersUnique = int32(len(peers)) stats.FilesTotal = int32(len(torrents)) - if !prevStats.Completed && stats.Completed { - d.onComplete() - } - d.stats = stats } -func copyFromTmp(snapDir string) error { +func moveFromTmp(snapDir string) error { tmpDir := filepath.Join(snapDir, "tmp") if !common.FileExist(tmpDir) { return nil @@ -189,40 +182,6 @@ func copyFromTmp(snapDir string) error { return nil } -// onComplete - only once - after download of all files fully done: -// - closing torrent client, closing downloader db -// - removing _tmp suffix from snapDir -// - open new torrentClient and db -func (d *Downloader) onComplete() { - snapDir, lastPart := filepath.Split(d.cfg.DataDir) - if lastPart != "tmp" { - return - } - - d.clientLock.Lock() - defer d.clientLock.Unlock() - - d.torrentClient.Close() - d.folder.Close() - d.pieceCompletionDB.Close() - d.db.Close() - - if err := copyFromTmp(snapDir); err != nil { - panic(err) - } - d.cfg.DataDir = snapDir - - db, c, m, torrentClient, err := openClient(d.cfg.ClientConfig) - if err != nil { - panic(err) - } - d.db = db - d.pieceCompletionDB = c - d.folder = m - d.torrentClient = torrentClient - _ = d.addSegments() -} - func (d *Downloader) verify() error { total := 0 for _, t := range d.torrentClient.Torrents() { @@ -265,13 +224,19 @@ func (d *Downloader) verify() error { func (d *Downloader) addSegments() error { logEvery := time.NewTicker(20 * time.Second) defer logEvery.Stop() - if err := BuildTorrentFilesIfNeed(context.Background(), d.cfg.DataDir); err != nil { + _, err := BuildTorrentFilesIfNeed(context.Background(), d.SnapDir()) + if err != nil { return err } - files, err := seedableSegmentFiles(d.cfg.DataDir) + files, err := seedableSegmentFiles(d.SnapDir()) if err != nil { return fmt.Errorf("seedableSegmentFiles: %w", err) } + files2, err := seedableHistorySnapshots(d.SnapDir()) + if err != nil { + return fmt.Errorf("seedableHistorySnapshots: %w", err) + } + files = append(files, files2...) wg := &sync.WaitGroup{} i := atomic.NewInt64(0) for _, f := range files { @@ -340,7 +305,7 @@ func openClient(cfg *torrent.ClientConfig) (db kv.RwDB, c storage.PieceCompletio db, err = mdbx.NewMDBX(log.New()). Flags(func(f uint) uint { return f | mdbx2.SafeNoSync }). Label(kv.DownloaderDB). - WithTablessCfg(func(defaultBuckets kv.TableCfg) kv.TableCfg { return kv.DownloaderTablesCfg }). + WithTableCfg(func(defaultBuckets kv.TableCfg) kv.TableCfg { return kv.DownloaderTablesCfg }). SyncPeriod(15 * time.Second). Path(filepath.Join(snapDir, "db")). Open() @@ -447,19 +412,3 @@ func MainLoop(ctx context.Context, d *Downloader, silent bool) { } } } - -func HasSegFile(dir string) bool { - files, err := os.ReadDir(dir) - if err != nil { - return false - } - for _, f := range files { - if f.IsDir() { - continue - } - if filepath.Ext(f.Name()) == ".seg" { - return true - } - } - return false -} diff --git a/cmd/downloader/downloader/downloader_grpc_server.go b/cmd/downloader/downloader/downloader_grpc_server.go index 062d086c219..79d9e11858f 100644 --- a/cmd/downloader/downloader/downloader_grpc_server.go +++ b/cmd/downloader/downloader/downloader_grpc_server.go @@ -101,7 +101,7 @@ func Proto2InfoHash(in *prototypes.H160) metainfo.Hash { // have .seg no .torrent => get .torrent from .seg func seedNewSnapshot(it *proto_downloader.DownloadItem, torrentClient *torrent.Client, snapDir string) (bool, error) { // if we dont have the torrent file we build it if we have the .seg file - if err := BuildTorrentFileIfNeed(it.Path, snapDir); err != nil { + if err := buildTorrentIfNeed(it.Path, snapDir); err != nil { return false, err } @@ -127,10 +127,10 @@ func createMagnetLinkWithInfoHash(hash *prototypes.H160, torrentClient *torrent. return false, nil } infoHash := Proto2InfoHash(hash) - log.Debug("[downloader] downloading torrent and seg file", "hash", infoHash) + //log.Debug("[downloader] downloading torrent and seg file", "hash", infoHash) if _, ok := torrentClient.Torrent(infoHash); ok { - log.Debug("[downloader] torrent client related to hash found", "hash", infoHash) + //log.Debug("[downloader] torrent client related to hash found", "hash", infoHash) return true, nil } @@ -151,6 +151,6 @@ func createMagnetLinkWithInfoHash(hash *prototypes.H160, torrentClient *torrent. return } }(magnet.String()) - log.Debug("[downloader] downloaded both seg and torrent files", "hash", infoHash) + //log.Debug("[downloader] downloaded both seg and torrent files", "hash", infoHash) return false, nil } diff --git a/cmd/downloader/downloader/downloadercfg/downloadercfg.go b/cmd/downloader/downloader/downloadercfg/downloadercfg.go index 761c421f322..c3c62bb3e21 100644 --- a/cmd/downloader/downloader/downloadercfg/downloadercfg.go +++ b/cmd/downloader/downloader/downloadercfg/downloadercfg.go @@ -20,8 +20,7 @@ const DefaultPieceSize = 2 * 1024 * 1024 // DefaultNetworkChunkSize - how much data request per 1 network call to peer. // default: 16Kb -// TODO: can we increase this value together with --torrent.upload.rate ? -const DefaultNetworkChunkSize = DefaultPieceSize +const DefaultNetworkChunkSize = 1 * 1024 * 1024 type Cfg struct { *torrent.ClientConfig @@ -104,19 +103,17 @@ func New(snapDir string, verbosity lg.Level, dbg bool, natif nat.Interface, down } } // rates are divided by 2 - I don't know why it works, maybe bug inside torrent lib accounting - torrentConfig.UploadRateLimiter = rate.NewLimiter(rate.Limit(uploadRate.Bytes()), 2*DefaultPieceSize) // default: unlimited + torrentConfig.UploadRateLimiter = rate.NewLimiter(rate.Limit(uploadRate.Bytes()), 2*DefaultNetworkChunkSize) // default: unlimited if downloadRate.Bytes() < 500_000_000 { - b := int(2 * DefaultPieceSize) - if downloadRate.Bytes() > DefaultPieceSize { + b := 2 * DefaultNetworkChunkSize + if downloadRate.Bytes() > DefaultNetworkChunkSize { b = int(2 * downloadRate.Bytes()) } torrentConfig.DownloadRateLimiter = rate.NewLimiter(rate.Limit(downloadRate.Bytes()), b) // default: unlimited } // debug - if dbg { - torrentConfig.Debug = true - } + // torrentConfig.Debug = false torrentConfig.Logger = lg.Default.FilterLevel(verbosity) torrentConfig.Logger.Handlers = []lg.Handler{adapterHandler{}} diff --git a/cmd/downloader/downloader/downloadercfg/logger.go b/cmd/downloader/downloader/downloadercfg/logger.go index 27989595f8e..87442eda39a 100644 --- a/cmd/downloader/downloader/downloadercfg/logger.go +++ b/cmd/downloader/downloader/downloadercfg/logger.go @@ -47,7 +47,7 @@ func (b adapterHandler) Handle(r lg.Record) { switch lvl { case lg.Debug: - log.Debug(r.String()) + log.Info("[downloader] " + r.String()) case lg.Info: str := r.String() if strings.Contains(str, "EOF") || @@ -100,6 +100,6 @@ func (b adapterHandler) Handle(r lg.Record) { log.Error(str) default: - log.Debug(r.String(), "torrent_log_type", "unknown") + log.Info("[downloader] "+r.String(), "torrent_log_type", "unknown", "or", lvl.LogString()) } } diff --git a/cmd/downloader/downloader/util.go b/cmd/downloader/downloader/util.go index d057250434e..cc2a086b144 100644 --- a/cmd/downloader/downloader/util.go +++ b/cmd/downloader/downloader/util.go @@ -10,7 +10,9 @@ import ( "net" "os" "path/filepath" + "regexp" "runtime" + "strconv" "sync" "time" @@ -21,12 +23,14 @@ import ( "github.com/edsrzf/mmap-go" common2 "github.com/ledgerwatch/erigon-lib/common" "github.com/ledgerwatch/erigon-lib/common/cmp" + dir2 "github.com/ledgerwatch/erigon-lib/common/dir" "github.com/ledgerwatch/erigon-lib/kv" "github.com/ledgerwatch/erigon/cmd/downloader/downloader/downloadercfg" "github.com/ledgerwatch/erigon/cmd/downloader/trackers" "github.com/ledgerwatch/erigon/common" "github.com/ledgerwatch/erigon/turbo/snapshotsync/snap" "github.com/ledgerwatch/log/v3" + atomic2 "go.uber.org/atomic" "golang.org/x/sync/semaphore" ) @@ -43,11 +47,20 @@ func AllTorrentPaths(dir string) ([]string, error) { if err != nil { return nil, err } - var res []string + histDir := filepath.Join(dir, "history") + files2, err := AllTorrentFiles(histDir) + if err != nil { + return nil, err + } + res := make([]string, 0, len(files)+len(files2)) for _, f := range files { torrentFilePath := filepath.Join(dir, f) res = append(res, torrentFilePath) } + for _, f := range files2 { + torrentFilePath := filepath.Join(histDir, f) + res = append(res, torrentFilePath) + } return res, nil } @@ -56,9 +69,9 @@ func AllTorrentFiles(dir string) ([]string, error) { if err != nil { return nil, err } - var res []string + res := make([]string, 0, len(files)) for _, f := range files { - if !snap.IsCorrectFileName(f.Name()) { + if filepath.Ext(f.Name()) != ".torrent" { // filter out only compressed files continue } fileInfo, err := f.Info() @@ -68,20 +81,24 @@ func AllTorrentFiles(dir string) ([]string, error) { if fileInfo.Size() == 0 { continue } - if filepath.Ext(f.Name()) != ".torrent" { // filter out only compressed files - continue - } res = append(res, f.Name()) } return res, nil } + func seedableSegmentFiles(dir string) ([]string, error) { files, err := os.ReadDir(dir) if err != nil { return nil, err } - var res []string + res := make([]string, 0, len(files)) for _, f := range files { + if f.IsDir() { + continue + } + if !f.Type().IsRegular() { + continue + } if !snap.IsCorrectFileName(f.Name()) { continue } @@ -107,41 +124,77 @@ func seedableSegmentFiles(dir string) ([]string, error) { return res, nil } -// BuildTorrentFileIfNeed - create .torrent files from .seg files (big IO) - if .seg files were added manually -func BuildTorrentFileIfNeed(originalFileName, root string) (err error) { - f, err := snap.ParseFileName(root, originalFileName) +var historyFileRegex = regexp.MustCompile("^([[:lower:]]+).([0-9]+)-([0-9]+).(v|ef)$") + +func seedableHistorySnapshots(dir string) ([]string, error) { + historyDir := filepath.Join(dir, "history") + dir2.MustExist(historyDir) + files, err := os.ReadDir(historyDir) if err != nil { - return fmt.Errorf("ParseFileName: %w", err) - } - if !f.NeedTorrentFile() { - return nil + return nil, err } - if err := createTorrentFileFromSegment(f, nil); err != nil { - return fmt.Errorf("createTorrentFileFromInfo: %w", err) + res := make([]string, 0, len(files)) + for _, f := range files { + if f.IsDir() { + continue + } + if !f.Type().IsRegular() { + continue + } + fileInfo, err := f.Info() + if err != nil { + return nil, err + } + if fileInfo.Size() == 0 { + continue + } + ext := filepath.Ext(f.Name()) + if ext != ".v" && ext != ".ef" { // filter out only compressed files + continue + } + + subs := historyFileRegex.FindStringSubmatch(f.Name()) + if len(subs) != 5 { + continue + } + + from, err := strconv.ParseUint(subs[2], 10, 64) + if err != nil { + return nil, fmt.Errorf("ParseFileName: %w", err) + } + to, err := strconv.ParseUint(subs[3], 10, 64) + if err != nil { + return nil, fmt.Errorf("ParseFileName: %w", err) + } + if to-from != snap.Erigon3SeedableSteps { + continue + } + res = append(res, filepath.Join("history", f.Name())) } - return nil + return res, nil } -func createTorrentFileFromSegment(f snap.FileInfo, mi *metainfo.MetaInfo) error { - info := &metainfo.Info{PieceLength: downloadercfg.DefaultPieceSize} - if err := info.BuildFromFilePath(f.Path); err != nil { +func buildTorrentIfNeed(fName, root string) (err error) { + fPath := filepath.Join(root, fName) + if common.FileExist(fPath + ".torrent") { + return + } + info := &metainfo.Info{PieceLength: downloadercfg.DefaultPieceSize, Name: fName} + if err := info.BuildFromFilePath(fPath); err != nil { return fmt.Errorf("createTorrentFileFromSegment: %w", err) } + info.Name = fName - dir, _ := filepath.Split(f.Path) - return createTorrentFileFromInfo(dir, info, mi) + return createTorrentFileFromInfo(root, info, nil) } // AddSegment - add existing .seg file, create corresponding .torrent if need func AddSegment(originalFileName, snapDir string, client *torrent.Client) (bool, error) { - f, err := snap.ParseFileName(snapDir, originalFileName) - if err != nil { - return false, fmt.Errorf("ParseFileName: %w", err) - } - if !f.TorrentFileExists() { + fPath := filepath.Join(snapDir, originalFileName) + if !common.FileExist(fPath + ".torrent") { return false, nil } - _, err = AddTorrentFile(f.Path+".torrent", client) + _, err := AddTorrentFile(fPath+".torrent", client) if err != nil { return false, fmt.Errorf("AddTorrentFile: %w", err) } @@ -149,28 +202,35 @@ func AddSegment(originalFileName, snapDir string, client *torrent.Client) (bool, } // BuildTorrentFilesIfNeed - create .torrent files from .seg files (big IO) - if .seg files were added manually -func BuildTorrentFilesIfNeed(ctx context.Context, snapDir string) error { +func BuildTorrentFilesIfNeed(ctx context.Context, snapDir string) ([]string, error) { logEvery := time.NewTicker(20 * time.Second) defer logEvery.Stop() files, err := seedableSegmentFiles(snapDir) if err != nil { - return err + return nil, err + } + files2, err := seedableHistorySnapshots(snapDir) + if err != nil { + return nil, err } + files = append(files, files2...) + errs := make(chan error, len(files)*2) wg := &sync.WaitGroup{} workers := cmp.Max(1, runtime.GOMAXPROCS(-1)-1) * 2 var sem = semaphore.NewWeighted(int64(workers)) - for i, f := range files { + i := atomic2.NewInt32(0) + for _, f := range files { wg.Add(1) if err := sem.Acquire(ctx, 1); err != nil { - return err + return nil, err } - go func(f string, i int) { + go func(f string) { + defer i.Inc() defer sem.Release(1) defer wg.Done() - err = BuildTorrentFileIfNeed(f, snapDir) - if err != nil { + if err := buildTorrentIfNeed(f, snapDir); err != nil { errs <- err } @@ -179,9 +239,9 @@ func BuildTorrentFilesIfNeed(ctx context.Context, snapDir string) error { case <-ctx.Done(): errs <- ctx.Err() case <-logEvery.C: - log.Info("[Snapshots] Creating .torrent files", "Progress", fmt.Sprintf("%d/%d", i, len(files))) + log.Info("[Snapshots] Creating .torrent files", "Progress", fmt.Sprintf("%d/%d", i.Load(), len(files))) } - }(f, i) + }(f) } go func() { wg.Wait() @@ -189,18 +249,15 @@ func BuildTorrentFilesIfNeed(ctx context.Context, snapDir string) error { }() for err := range errs { if err != nil { - return err + return nil, err } } - return nil + return files, nil } func CreateTorrentFileIfNotExists(root string, info *metainfo.Info, mi *metainfo.MetaInfo) error { - f, err := snap.ParseFileName(root, info.Name) - if err != nil { - return fmt.Errorf("ParseFileName: %w", err) - } - if !f.NeedTorrentFile() { + fPath := filepath.Join(root, info.Name) + if common.FileExist(fPath + ".torrent") { return nil } if err := createTorrentFileFromInfo(root, info, mi); err != nil { @@ -315,6 +372,7 @@ func AddTorrentFile(torrentFilePath string, torrentClient *torrent.Client) (*tor if err != nil { return nil, err } + t.DisallowDataDownload() t.AllowDataUpload() return t, nil @@ -326,10 +384,6 @@ func VerifyDtaFiles(ctx context.Context, snapDir string) error { logEvery := time.NewTicker(5 * time.Second) defer logEvery.Stop() - tmpSnapDir := filepath.Join(snapDir, "tmp") // snapshots are in sub-dir "tmp", if not fully downloaded - if common.FileExist(tmpSnapDir) { - snapDir = tmpSnapDir - } files, err := AllTorrentPaths(snapDir) if err != nil { return err diff --git a/cmd/downloader/downloadergrpc/client.go b/cmd/downloader/downloadergrpc/client.go index 47ae9492f21..4271589f616 100644 --- a/cmd/downloader/downloadergrpc/client.go +++ b/cmd/downloader/downloadergrpc/client.go @@ -13,6 +13,7 @@ import ( "github.com/ledgerwatch/erigon/common" "google.golang.org/grpc" "google.golang.org/grpc/backoff" + "google.golang.org/grpc/credentials/insecure" "google.golang.org/grpc/keepalive" ) @@ -29,7 +30,7 @@ func NewClient(ctx context.Context, downloaderAddr string) (proto_downloader.Dow grpc.WithKeepaliveParams(keepalive.ClientParameters{}), } - dialOpts = append(dialOpts, grpc.WithInsecure()) + dialOpts = append(dialOpts, grpc.WithTransportCredentials(insecure.NewCredentials())) conn, err := grpc.DialContext(ctx, downloaderAddr, dialOpts...) if err != nil { return nil, fmt.Errorf("creating client connection to sentry P2P: %w", err) diff --git a/cmd/downloader/main.go b/cmd/downloader/main.go index 731fd5ab3bf..4911664e9a8 100644 --- a/cmd/downloader/main.go +++ b/cmd/downloader/main.go @@ -183,7 +183,7 @@ var printTorrentHashes = &cobra.Command{ return err } } - if err := downloader.BuildTorrentFilesIfNeed(ctx, dirs.Snap); err != nil { + if _, err := downloader.BuildTorrentFilesIfNeed(ctx, dirs.Snap); err != nil { return err } } @@ -226,14 +226,14 @@ var printTorrentHashes = &cobra.Command{ log.Info("amount of lines in target file is equal or greater than amount of lines in snapshot dir", "old", len(oldLines), "new", len(res)) return nil } - if err := os.WriteFile(targetFile, serialized, 0644); err != nil { + if err := os.WriteFile(targetFile, serialized, 0644); err != nil { // nolint return err } return nil }, } -//nolint +// nolint func removePieceCompletionStorage(snapDir string) { _ = os.RemoveAll(filepath.Join(snapDir, "db")) _ = os.RemoveAll(filepath.Join(snapDir, ".torrent.db")) diff --git a/cmd/downloader/readme.md b/cmd/downloader/readme.md index 0b898e90bb7..4d5ddcf899a 100644 --- a/cmd/downloader/readme.md +++ b/cmd/downloader/readme.md @@ -57,7 +57,7 @@ Additional info: ```shell # Snapshots creation does not require fully-synced Erigon - few first stages enough. For example: -STOP_BEFORE_STAGE=Execution ./build/bin/erigon --snapshots=false --datadir= +STOP_AFTER_STAGE=Senders ./build/bin/erigon --snapshots=false --datadir= # But for security - better have fully-synced Erigon diff --git a/cmd/erigon/main.go b/cmd/erigon/main.go index 525aca262f3..a9618584f38 100644 --- a/cmd/erigon/main.go +++ b/cmd/erigon/main.go @@ -1,16 +1,24 @@ package main import ( + "errors" "fmt" "os" + "path/filepath" + "reflect" + "strings" + + "github.com/ledgerwatch/log/v3" + "github.com/pelletier/go-toml" + "github.com/urfave/cli" + "gopkg.in/yaml.v2" "github.com/ledgerwatch/erigon-lib/common/dbg" + "github.com/ledgerwatch/erigon/cmd/utils" "github.com/ledgerwatch/erigon/params" erigonapp "github.com/ledgerwatch/erigon/turbo/app" erigoncli "github.com/ledgerwatch/erigon/turbo/cli" "github.com/ledgerwatch/erigon/turbo/node" - "github.com/ledgerwatch/log/v3" - "github.com/urfave/cli" ) func main() { @@ -26,7 +34,10 @@ func main() { app := erigonapp.MakeApp(runErigon, erigoncli.DefaultFlags) if err := app.Run(os.Args); err != nil { - fmt.Fprintln(os.Stderr, err) + _, printErr := fmt.Fprintln(os.Stderr, err) + if printErr != nil { + log.Warn("Fprintln error", "err", printErr) + } os.Exit(1) } } @@ -35,6 +46,14 @@ func runErigon(cliCtx *cli.Context) { logger := log.New() // initializing the node and providing the current git commit there logger.Info("Build info", "git_branch", params.GitBranch, "git_tag", params.GitTag, "git_commit", params.GitCommit) + + configFilePath := cliCtx.GlobalString(utils.ConfigFlag.Name) + if configFilePath != "" { + if err := setFlagsFromConfigFile(cliCtx, configFilePath); err != nil { + log.Warn("failed setting config flags from yaml/toml file", "err", err) + } + } + nodeCfg := node.NewNodConfigUrfave(cliCtx) ethCfg := node.NewEthConfigUrfave(cliCtx, nodeCfg) @@ -48,3 +67,55 @@ func runErigon(cliCtx *cli.Context) { log.Error("error while serving an Erigon node", "err", err) } } + +func setFlagsFromConfigFile(ctx *cli.Context, filePath string) error { + fileExtension := filepath.Ext(filePath) + + fileConfig := make(map[string]interface{}) + + if fileExtension == ".yaml" { + yamlFile, err := os.ReadFile(filePath) + if err != nil { + return err + } + err = yaml.Unmarshal(yamlFile, fileConfig) + if err != nil { + return err + } + } else if fileExtension == ".toml" { + tomlFile, err := os.ReadFile(filePath) + if err != nil { + return err + } + err = toml.Unmarshal(tomlFile, &fileConfig) + if err != nil { + return err + } + } else { + return errors.New("config files only accepted are .yaml and .toml") + } + // sets global flags to value in yaml/toml file + for key, value := range fileConfig { + if !ctx.GlobalIsSet(key) { + if reflect.ValueOf(value).Kind() == reflect.Slice { + sliceInterface := value.([]interface{}) + s := make([]string, len(sliceInterface)) + for i, v := range sliceInterface { + s[i] = fmt.Sprintf("%v", v) + } + err := ctx.GlobalSet(key, strings.Join(s, ",")) + if err != nil { + return fmt.Errorf("failed setting %s flag with values=%s error=%s", key, s, err) + } + } else { + err := ctx.GlobalSet(key, fmt.Sprintf("%v", value)) + if err != nil { + return fmt.Errorf("failed setting %s flag with value=%v error=%s", key, value, err) + + } + } + } + } + + return nil +} diff --git a/cmd/evm/internal/t8ntool/transition.go b/cmd/evm/internal/t8ntool/transition.go index 230efb89be4..2f7a183653e 100644 --- a/cmd/evm/internal/t8ntool/transition.go +++ b/cmd/evm/internal/t8ntool/transition.go @@ -277,11 +277,12 @@ func Main(ctx *cli.Context) error { if err != nil { return err } + defer tx.Rollback() reader, writer := MakePreState(chainConfig.Rules(0), tx, prestate.Pre) engine := ethash.NewFaker() - result, err := core.ExecuteBlockEphemerally(chainConfig, &vmConfig, getHash, engine, block, reader, writer, nil, nil, nil, true, getTracer) + result, err := core.ExecuteBlockEphemerally(chainConfig, &vmConfig, getHash, engine, block, reader, writer, nil, nil, true, getTracer) if hashError != nil { return NewError(ErrorMissingBlockhash, fmt.Errorf("blockhash error: %v", err)) @@ -375,7 +376,7 @@ func getTransaction(txJson commands.RPCTransaction) (types.Transaction, error) { switch txJson.Type { case types.LegacyTxType, types.AccessListTxType: - var toAddr common.Address = common.Address{} + var toAddr = common.Address{} if txJson.To != nil { toAddr = *txJson.To } @@ -441,8 +442,9 @@ func getTransaction(txJson commands.RPCTransaction) (types.Transaction, error) { // signUnsignedTransactions converts the input txs to canonical transactions. // // The transactions can have two forms, either -// 1. unsigned or -// 2. signed +// 1. unsigned or +// 2. signed +// // For (1), r, s, v, need so be zero, and the `secretKey` needs to be set. // If so, we sign it here and now, with the given `secretKey` // If the condition above is not met, then it's considered a signed transaction. diff --git a/cmd/evm/main.go b/cmd/evm/main.go index 8d6bb1ef5f6..3c95823ac3f 100644 --- a/cmd/evm/main.go +++ b/cmd/evm/main.go @@ -22,6 +22,8 @@ import ( "math/big" "os" + "github.com/ledgerwatch/log/v3" + "github.com/ledgerwatch/erigon/cmd/evm/internal/t8ntool" "github.com/ledgerwatch/erigon/cmd/utils" "github.com/ledgerwatch/erigon/internal/flags" @@ -193,7 +195,10 @@ func main() { if ec, ok := err.(*t8ntool.NumberedError); ok { code = ec.ExitCode() } - fmt.Fprintln(os.Stderr, err) + _, printErr := fmt.Fprintln(os.Stderr, err) + if printErr != nil { + log.Warn("print error", "err", printErr) + } os.Exit(code) } } diff --git a/cmd/evm/runner.go b/cmd/evm/runner.go index ff96209d604..77370bec1c9 100644 --- a/cmd/evm/runner.go +++ b/cmd/evm/runner.go @@ -65,7 +65,12 @@ func readGenesis(genesisPath string) *core.Genesis { if err != nil { utils.Fatalf("Failed to read genesis file: %v", err) } - defer file.Close() + defer func(file *os.File) { + closeErr := file.Close() + if closeErr != nil { + log.Warn("Failed to close file", "err", closeErr) + } + }(file) genesis := new(core.Genesis) if err := json.NewDecoder(file).Decode(genesis); err != nil { @@ -299,24 +304,36 @@ func runCmd(ctx *cli.Context) error { fmt.Println("could not write memory profile: ", err) os.Exit(1) } - f.Close() + closeErr := f.Close() + if closeErr != nil { + log.Warn("Failed to close file", "err", closeErr) + } } if ctx.GlobalBool(DebugFlag.Name) { if debugLogger != nil { - fmt.Fprintln(os.Stderr, "#### TRACE ####") + _, printErr := fmt.Fprintln(os.Stderr, "#### TRACE ####") + if printErr != nil { + log.Warn("Failed to print to stderr", "err", printErr) + } vm.WriteTrace(os.Stderr, debugLogger.StructLogs()) } - fmt.Fprintln(os.Stderr, "#### LOGS ####") + _, printErr := fmt.Fprintln(os.Stderr, "#### LOGS ####") + if printErr != nil { + log.Warn("Failed to print to stderr", "err", printErr) + } vm.WriteLogs(os.Stderr, statedb.Logs()) } if bench || ctx.GlobalBool(StatDumpFlag.Name) { - fmt.Fprintf(os.Stderr, `EVM gas used: %d + _, printErr := fmt.Fprintf(os.Stderr, `EVM gas used: %d execution time: %v allocations: %d allocated bytes: %d `, initialGas-leftOverGas, stats.time, stats.allocs, stats.bytesAllocated) + if printErr != nil { + log.Warn("Failed to print to stderr", "err", printErr) + } } if tracer == nil { fmt.Printf("0x%x\n", output) diff --git a/cmd/evm/staterunner.go b/cmd/evm/staterunner.go index 7d4cddb4e67..815355a7f3b 100644 --- a/cmd/evm/staterunner.go +++ b/cmd/evm/staterunner.go @@ -56,9 +56,7 @@ func stateTestCmd(ctx *cli.Context) error { return errors.New("path-to-test argument required") } // Configure the go-ethereum logger - log.Root().SetHandler(log.LvlFilterHandler(log.LvlInfo, log.StderrHandler)) - //glogger := log.NewGlogHandler(log.StreamHandler(os.Stderr, log.TerminalFormat(false))) - //glogger.Verbosity(log.Lvl(ctx.GlobalInt(VerbosityFlag.Name))) + log.Root().SetHandler(log.LvlFilterHandler(log.LvlDebug, log.StderrHandler)) // Configure the EVM logger config := &vm.LogConfig{ @@ -87,26 +85,44 @@ func stateTestCmd(ctx *cli.Context) error { if err != nil { return err } - var tests map[string]tests.StateTest - if err = json.Unmarshal(src, &tests); err != nil { + var stateTests map[string]tests.StateTest + if err = json.Unmarshal(src, &stateTests); err != nil { return err } - // Iterate over all the tests, run them and aggregate the results + + // Iterate over all the stateTests, run them and aggregate the results + results, err := aggregateResultsFromStateTests(ctx, stateTests, tracer, debugger) + if err != nil { + return err + } + + out, _ := json.MarshalIndent(results, "", " ") + fmt.Println(string(out)) + return nil +} + +func aggregateResultsFromStateTests( + ctx *cli.Context, + stateTests map[string]tests.StateTest, + tracer vm.Tracer, + debugger *vm.StructLogger, +) ([]StatetestResult, error) { + // Iterate over all the stateTests, run them and aggregate the results cfg := vm.Config{ Tracer: tracer, Debug: ctx.GlobalBool(DebugFlag.Name) || ctx.GlobalBool(MachineFlag.Name), } - results := make([]StatetestResult, 0, len(tests)) db := memdb.New() defer db.Close() tx, txErr := db.BeginRw(context.Background()) if txErr != nil { - return txErr + return nil, txErr } defer tx.Rollback() + results := make([]StatetestResult, 0, len(stateTests)) - for key, test := range tests { + for key, test := range stateTests { for _, st := range test.Subtests() { // Run the test and aggregate the result result := &StatetestResult{Name: key, Fork: st.Fork, Pass: true} @@ -141,7 +157,10 @@ func stateTestCmd(ctx *cli.Context) error { // print state root for evmlab tracing if ctx.GlobalBool(MachineFlag.Name) && statedb != nil { - fmt.Fprintf(os.Stderr, "{\"stateRoot\": \"%x\"}\n", root.Bytes()) + _, printErr := fmt.Fprintf(os.Stderr, "{\"stateRoot\": \"%x\"}\n", root.Bytes()) + if printErr != nil { + log.Warn("Failed to write to stderr", "err", printErr) + } } results = append(results, *result) @@ -149,13 +168,14 @@ func stateTestCmd(ctx *cli.Context) error { // Print any structured logs collected if ctx.GlobalBool(DebugFlag.Name) { if debugger != nil { - fmt.Fprintln(os.Stderr, "#### TRACE ####") + _, printErr := fmt.Fprintln(os.Stderr, "#### TRACE ####") + if printErr != nil { + log.Warn("Failed to write to stderr", "err", printErr) + } vm.WriteTrace(os.Stderr, debugger.StructLogs()) } } } } - out, _ := json.MarshalIndent(results, "", " ") - fmt.Println(string(out)) - return nil + return results, nil } diff --git a/cmd/hack/db/lmdb.go b/cmd/hack/db/lmdb.go index 640bfb44690..288bcf10208 100644 --- a/cmd/hack/db/lmdb.go +++ b/cmd/hack/db/lmdb.go @@ -5,8 +5,6 @@ import ( "bytes" "context" "encoding/binary" - - // "errors" "fmt" "io" "math" @@ -212,7 +210,7 @@ type mdbx_db struct { txnID uint64 /* txnid of last committed modification */ } -//nolint // database size-related parameters, used as placeholder, doesn't have any meaning in this code +// nolint // database size-related parameters, used as placeholder, doesn't have any meaning in this code type mdbx_geo struct { grow_pv uint16 //nolint shrink_pv uint16 //nolint @@ -779,7 +777,7 @@ func defragSteps(filename string, bucketsCfg kv.TableCfg, generateFs ...func(kv. } defer os.RemoveAll(dir) var db kv.RwDB - db, err = kv2.NewMDBX(logger).Path(dir).WithTablessCfg(func(kv.TableCfg) kv.TableCfg { + db, err = kv2.NewMDBX(logger).Path(dir).WithTableCfg(func(kv.TableCfg) kv.TableCfg { return bucketsCfg }).Open() if err != nil { diff --git a/cmd/hack/flow/flow.go b/cmd/hack/flow/flow.go index e619c48e9ae..212e6f94f8a 100644 --- a/cmd/hack/flow/flow.go +++ b/cmd/hack/flow/flow.go @@ -344,7 +344,7 @@ func absIntAndJumpImprecision() { runCfgAnly("AndJumpImprecision00", s) } -//17891 transactions, 588 bytecode len +// 17891 transactions, 588 bytecode len func absIntTestSmallImprecision2() { const s = "6080604052600436106100405763ffffffff7c010000000000000000000000000000000000000000000000000000000060003504166393e84cd98114610045575b600080fd5b34801561005157600080fd5b5061005a61005c565b005b60008060008060008060008060008060008060008073a62142888aba8370742be823c1782d17a0389da173ffffffffffffffffffffffffffffffffffffffff1663747dff426040518163ffffffff167c01000000000000000000000000000000000000000000000000000000000281526004016101c060405180830381600087803b1580156100ea57600080fd5b505af11580156100fe573d6000803e3d6000fd5b505050506040513d6101c081101561011557600080fd5b8101908080519060200190929190805190602001909291908051906020019092919080519060200190929190805190602001909291908051906020019092919080519060200190929190805190602001909291908051906020019092919080519060200190929190805190602001909291908051906020019092919080519060200190929190805190602001909291905050509d509d509d509d509d509d509d509d509d509d509d509d509d509d508673ffffffffffffffffffffffffffffffffffffffff167318a0451ea56fd4ff58f59837e9ec30f346ffdca573ffffffffffffffffffffffffffffffffffffffff161415151561021057fe5b50505050505050505050505050505600a165627a7a72305820ec5e1703d3b74688c3350622a2bcfc097615733fa5f8df7adf51d66ebf42d0260029" runCfgAnly("SmallImprecision02", s) diff --git a/cmd/hack/hack.go b/cmd/hack/hack.go index ebdebd0546a..41f355dd5b0 100644 --- a/cmd/hack/hack.go +++ b/cmd/hack/hack.go @@ -23,6 +23,8 @@ import ( "github.com/ledgerwatch/erigon-lib/compress" "github.com/ledgerwatch/erigon-lib/kv" "github.com/ledgerwatch/erigon-lib/kv/mdbx" + "github.com/ledgerwatch/erigon-lib/recsplit" + "github.com/ledgerwatch/erigon-lib/recsplit/eliasfano32" "golang.org/x/exp/slices" hackdb "github.com/ledgerwatch/erigon/cmd/hack/db" @@ -37,12 +39,14 @@ import ( "github.com/ledgerwatch/erigon/core/state" "github.com/ledgerwatch/erigon/core/types" "github.com/ledgerwatch/erigon/crypto" + "github.com/ledgerwatch/erigon/eth/ethconfig" "github.com/ledgerwatch/erigon/eth/stagedsync/stages" "github.com/ledgerwatch/erigon/ethdb" "github.com/ledgerwatch/erigon/ethdb/cbor" "github.com/ledgerwatch/erigon/internal/debug" "github.com/ledgerwatch/erigon/params" "github.com/ledgerwatch/erigon/rlp" + "github.com/ledgerwatch/erigon/turbo/snapshotsync" "github.com/ledgerwatch/log/v3" ) @@ -171,7 +175,23 @@ func readAccount(chaindata string, account common.Address) error { if err != nil { return err } - for k, v, e := c.Seek(account.Bytes()); k != nil && e == nil; k, v, e = c.Next() { + defer c.Close() + for k, v, e := c.Seek(account.Bytes()); k != nil; k, v, e = c.Next() { + if e != nil { + return e + } + if !bytes.HasPrefix(k, account.Bytes()) { + break + } + fmt.Printf("%x => %x\n", k, v) + } + cc, err := tx.Cursor(kv.PlainContractCode) + if err != nil { + return err + } + defer cc.Close() + fmt.Printf("code hashes\n") + for k, v, e := cc.Seek(account.Bytes()); k != nil; k, v, e = c.Next() { if e != nil { return e } @@ -473,8 +493,53 @@ func extractHeaders(chaindata string, block uint64, blockTotalOrOffset int64) er return nil } -func extractBodies(chaindata string, block uint64) error { - db := mdbx.MustOpen(chaindata) +func extractBodies(datadir string) error { + snaps := snapshotsync.NewRoSnapshots(ethconfig.Snapshot{ + Enabled: true, + KeepBlocks: true, + Produce: false, + }, filepath.Join(datadir, "snapshots")) + snaps.ReopenFolder() + snaps.Bodies.View(func(sns []*snapshotsync.BodySegment) error { + for _, sn := range sns { + var firstBlockNum, firstBaseTxNum, firstAmount uint64 + var lastBlockNum, lastBaseTxNum, lastAmount uint64 + var prevBlockNum, prevBaseTxNum, prevAmount uint64 + first := true + sn.Iterate(func(blockNum uint64, baseTxNum uint64, txAmount uint64) error { + if first { + firstBlockNum = blockNum + firstBaseTxNum = baseTxNum + firstAmount = txAmount + first = false + } else { + if blockNum != prevBlockNum+1 { + fmt.Printf("Discount block Num: %d => %d\n", prevBlockNum, blockNum) + } + if baseTxNum != prevBaseTxNum+prevAmount { + fmt.Printf("Wrong baseTxNum: %d+%d => %d\n", prevBaseTxNum, prevAmount, baseTxNum) + } + } + prevBlockNum = blockNum + lastBlockNum = blockNum + prevBaseTxNum = baseTxNum + lastBaseTxNum = baseTxNum + prevAmount = txAmount + lastAmount = txAmount + return nil + }) + fmt.Printf("Seg: [%d, %d, %d] => [%d, %d, %d]\n", firstBlockNum, firstBaseTxNum, firstAmount, lastBlockNum, lastBaseTxNum, lastAmount) + } + return nil + }) + if _, err := snaps.ViewTxs(snaps.BlocksAvailable(), func(sn *snapshotsync.TxnSegment) error { + lastTxnID := sn.IdxTxnHash.BaseDataID() + uint64(sn.Seg.Count()) + fmt.Printf("txTxnID = %d\n", lastTxnID) + return nil + }); err != nil { + return err + } + db := mdbx.MustOpen(filepath.Join(datadir, "chaindata")) defer db.Close() tx, err := db.BeginRo(context.Background()) if err != nil { @@ -486,18 +551,32 @@ func extractBodies(chaindata string, block uint64) error { return err } defer c.Close() - blockEncoded := dbutils.EncodeBlockNumber(block) i := 0 - for k, _, err := c.Seek(blockEncoded); k != nil; k, _, err = c.Next() { + var txId uint64 + for k, _, err := c.First(); k != nil; k, _, err = c.Next() { if err != nil { return err } blockNumber := binary.BigEndian.Uint64(k[:8]) blockHash := common.BytesToHash(k[8:]) + var hash common.Hash + if hash, err = rawdb.ReadCanonicalHash(tx, blockNumber); err != nil { + return err + } _, baseTxId, txAmount := rawdb.ReadBody(tx, blockHash, blockNumber) fmt.Printf("Body %d %x: baseTxId %d, txAmount %d\n", blockNumber, blockHash, baseTxId, txAmount) + if hash != blockHash { + fmt.Printf("Non-canonical\n") + continue + } i++ - if i == 1 { + if txId > 0 { + if txId != baseTxId { + fmt.Printf("Mismatch txId for block %d, txId = %d, baseTxId = %d\n", blockNumber, txId, baseTxId) + } + } + txId = baseTxId + uint64(txAmount) + 2 + if i == 50 { break } } @@ -1003,8 +1082,6 @@ func chainConfig(name string) error { chainConfig = params.RinkebyChainConfig case "goerli": chainConfig = params.GoerliChainConfig - case "kiln-devnet": - chainConfig = params.KilnDevnetChainConfig case "bsc": chainConfig = params.BSCChainConfig case "sokol": @@ -1165,20 +1242,51 @@ func findLogs(chaindata string, block uint64, blockTotal uint64) error { return nil } -func iterate(filename string) error { - d, err := compress.NewDecompressor(filename) +func iterate(filename string, prefix string) error { + pBytes := common.FromHex(prefix) + efFilename := filename + ".ef" + viFilename := filename + ".vi" + vFilename := filename + ".v" + efDecomp, err := compress.NewDecompressor(efFilename) if err != nil { return err } - defer d.Close() - g := d.MakeGetter() - var buf, bufv []byte + defer efDecomp.Close() + viIndex, err := recsplit.OpenIndex(viFilename) + if err != nil { + return err + } + defer viIndex.Close() + r := recsplit.NewIndexReader(viIndex) + vDecomp, err := compress.NewDecompressor(vFilename) + if err != nil { + return err + } + defer vDecomp.Close() + gv := vDecomp.MakeGetter() + g := efDecomp.MakeGetter() for g.HasNext() { - buf, _ = g.Next(buf[:0]) - bufv, _ = g.Next(bufv[:0]) - s := fmt.Sprintf("%x", buf) - if strings.HasPrefix(s, "000000000000006f6502b7f2bbac8c30a3f67e9a") { - fmt.Printf("%s [%x]\n", s, bufv) + key, _ := g.NextUncompressed() + if bytes.HasPrefix(key, pBytes) { + val, _ := g.NextUncompressed() + ef, _ := eliasfano32.ReadEliasFano(val) + efIt := ef.Iterator() + fmt.Printf("[%x] =>", key) + for efIt.HasNext() { + txNum := efIt.Next() + var txKey [8]byte + binary.BigEndian.PutUint64(txKey[:], txNum) + offset := r.Lookup2(txKey[:], key) + gv.Reset(offset) + v, _ := gv.Next(nil) + fmt.Printf(" %d", txNum) + if len(v) == 0 { + fmt.Printf("*") + } + } + fmt.Printf("\n") + } else { + g.SkipUncompressed() } } return nil @@ -1260,7 +1368,7 @@ func main() { err = hackdb.TextInfo(*chaindata, &strings.Builder{}) case "extractBodies": - err = extractBodies(*chaindata, uint64(*block)) + err = extractBodies(*chaindata) case "repairCurrent": repairCurrent() @@ -1307,7 +1415,7 @@ func main() { case "findLogs": err = findLogs(*chaindata, uint64(*block), uint64(*blockTotal)) case "iterate": - err = iterate(*chaindata) + err = iterate(*chaindata, *account) } if err != nil { diff --git a/cmd/hack/tool/fromdb/tool.go b/cmd/hack/tool/fromdb/tool.go new file mode 100644 index 00000000000..df9eea782d0 --- /dev/null +++ b/cmd/hack/tool/fromdb/tool.go @@ -0,0 +1,48 @@ +package fromdb + +import ( + "context" + + "github.com/ledgerwatch/erigon-lib/kv" + "github.com/ledgerwatch/erigon/cmd/hack/tool" + "github.com/ledgerwatch/erigon/core/rawdb" + "github.com/ledgerwatch/erigon/ethdb/prune" + "github.com/ledgerwatch/erigon/params" +) + +func HistoryV3(db kv.RoDB) (enabled bool) { + if err := db.View(context.Background(), func(tx kv.Tx) error { + var err error + enabled, err = rawdb.HistoryV3.Enabled(tx) + if err != nil { + return err + } + return nil + }); err != nil { + panic(err) + } + return +} + +func ChainConfig(db kv.RoDB) (cc *params.ChainConfig) { + err := db.View(context.Background(), func(tx kv.Tx) error { + cc = tool.ChainConfig(tx) + return nil + }) + tool.Check(err) + return cc +} + +func PruneMode(db kv.RoDB) (pm prune.Mode) { + if err := db.View(context.Background(), func(tx kv.Tx) error { + var err error + pm, err = prune.Get(tx) + if err != nil { + return err + } + return nil + }); err != nil { + panic(err) + } + return +} diff --git a/cmd/hack/tool/tool.go b/cmd/hack/tool/tool.go index efd808414f4..64ed5cc5120 100644 --- a/cmd/hack/tool/tool.go +++ b/cmd/hack/tool/tool.go @@ -1,7 +1,6 @@ package tool import ( - "context" "strconv" "github.com/ledgerwatch/erigon-lib/kv" @@ -27,12 +26,3 @@ func ChainConfig(tx kv.Tx) *params.ChainConfig { Check(err) return chainConfig } - -func ChainConfigFromDB(db kv.RoDB) (cc *params.ChainConfig) { - err := db.View(context.Background(), func(tx kv.Tx) error { - cc = ChainConfig(tx) - return nil - }) - Check(err) - return cc -} diff --git a/cmd/integration/commands/flags.go b/cmd/integration/commands/flags.go index 36c7e8e587b..e8b420b6a7b 100644 --- a/cmd/integration/commands/flags.go +++ b/cmd/integration/commands/flags.go @@ -1,10 +1,10 @@ package commands import ( + "github.com/ledgerwatch/erigon/turbo/cli" "github.com/spf13/cobra" "github.com/ledgerwatch/erigon/cmd/utils" - "github.com/ledgerwatch/erigon/common/paths" "github.com/ledgerwatch/erigon/eth/ethconfig" ) @@ -29,6 +29,9 @@ var ( pruneTBefore, pruneCBefore uint64 experiments []string chain string // Which chain to use (mainnet, ropsten, rinkeby, goerli, etc.) + + _forceSetHistoryV3 bool + workers uint64 ) func must(err error) { @@ -89,14 +92,16 @@ func withBucket(cmd *cobra.Command) { } func withDataDir2(cmd *cobra.Command) { - cmd.Flags().StringVar(&datadirCli, utils.DataDirFlag.Name, paths.DefaultDataDir(), utils.DataDirFlag.Usage) + // --datadir is required, but no --chain flag: read chainConfig from db instead + cmd.Flags().StringVar(&datadirCli, utils.DataDirFlag.Name, "", utils.DataDirFlag.Usage) must(cmd.MarkFlagDirname(utils.DataDirFlag.Name)) must(cmd.MarkFlagRequired(utils.DataDirFlag.Name)) cmd.Flags().IntVar(&databaseVerbosity, "database.verbosity", 2, "Enabling internal db logs. Very high verbosity levels may require recompile db. Default: 2, means warning.") } func withDataDir(cmd *cobra.Command) { - cmd.Flags().StringVar(&datadirCli, "datadir", paths.DefaultDataDir(), "data directory for temporary ELT files") + cmd.Flags().StringVar(&datadirCli, "datadir", "", "data directory for temporary ELT files") + must(cmd.MarkFlagRequired("datadir")) must(cmd.MarkFlagDirname("datadir")) cmd.Flags().StringVar(&chaindata, "chaindata", "", "path to the db") @@ -106,7 +111,7 @@ func withDataDir(cmd *cobra.Command) { } func withBatchSize(cmd *cobra.Command) { - cmd.Flags().StringVar(&batchSizeStr, "batchSize", "512M", "batch size for execution stage") + cmd.Flags().StringVar(&batchSizeStr, "batchSize", cli.BatchSizeFlag.Value, cli.BatchSizeFlag.Usage) } func withIntegrityChecks(cmd *cobra.Command) { @@ -123,9 +128,14 @@ func withTxTrace(cmd *cobra.Command) { } func withChain(cmd *cobra.Command) { - cmd.Flags().StringVar(&chain, "chain", "", "pick a chain to assume (mainnet, ropsten, etc.)") + cmd.Flags().StringVar(&chain, "chain", "mainnet", "pick a chain to assume (mainnet, ropsten, etc.)") + must(cmd.MarkFlagRequired("chain")) } func withHeimdall(cmd *cobra.Command) { cmd.Flags().StringVar(&HeimdallURL, "bor.heimdall", "http://localhost:1317", "URL of Heimdall service") } + +func withWorkers(cmd *cobra.Command) { + cmd.Flags().Uint64Var(&workers, "workers", 1, "") +} diff --git a/cmd/integration/commands/refetence_db.go b/cmd/integration/commands/refetence_db.go index 4e0a1697d17..11349bd371f 100644 --- a/cmd/integration/commands/refetence_db.go +++ b/cmd/integration/commands/refetence_db.go @@ -349,7 +349,10 @@ MainLoop: func mdbxToMdbx(ctx context.Context, logger log.Logger, from, to string) error { _ = os.RemoveAll(to) src := mdbx2.NewMDBX(logger).Path(from).Flags(func(flags uint) uint { return mdbx.Readonly | mdbx.Accede }).MustOpen() - dst := mdbx2.NewMDBX(logger).Path(to).MustOpen() + dst := mdbx2.NewMDBX(logger).Path(to). + WriteMap(). + Flags(func(flags uint) uint { return flags | mdbx.NoMemInit }). + MustOpen() return kv2kv(ctx, src, dst) } @@ -368,6 +371,7 @@ func kv2kv(ctx context.Context, src, dst kv.RwDB) error { commitEvery := time.NewTicker(30 * time.Second) defer commitEvery.Stop() + var total uint64 for name, b := range src.AllBuckets() { if b.IsDeprecated { continue @@ -382,7 +386,10 @@ func kv2kv(ctx context.Context, src, dst kv.RwDB) error { if err != nil { return err } + total, _ = srcC.Count() + casted, isDupsort := c.(kv.RwCursorDupSort) + i := uint64(0) for k, v, err := srcC.First(); k != nil; k, v, err = srcC.Next() { if err != nil { @@ -399,11 +406,12 @@ func kv2kv(ctx context.Context, src, dst kv.RwDB) error { } } + i++ select { case <-ctx.Done(): return ctx.Err() case <-commitEvery.C: - log.Info("Progress", "bucket", name, "key", fmt.Sprintf("%x", k)) + log.Info("Progress", "bucket", name, "progress", fmt.Sprintf("%.1fm/%.1fm", float64(i)/1_000_000, float64(total)/1_000_000), "key", fmt.Sprintf("%x", k)) if err2 := dstTx.Commit(); err2 != nil { return err2 } diff --git a/cmd/integration/commands/reset_state.go b/cmd/integration/commands/reset_state.go index 925e87d279d..8a5c9f153e3 100644 --- a/cmd/integration/commands/reset_state.go +++ b/cmd/integration/commands/reset_state.go @@ -22,15 +22,14 @@ var cmdResetState = &cobra.Command{ Short: "Reset StateStages (5,6,7,8,9,10) and buckets", RunE: func(cmd *cobra.Command, args []string) error { ctx, _ := common.RootContext() - logger := log.New() - db := openDB(dbCfg(kv.ChainDB, logger, chaindata), true) + db := openDB(dbCfg(kv.ChainDB, chaindata), true) defer db.Close() - if err := db.View(ctx, func(tx kv.Tx) error { return printStages(tx, allSnapshots(db)) }); err != nil { + sn, _ := allSnapshots(db) + if err := db.View(ctx, func(tx kv.Tx) error { return printStages(tx, sn) }); err != nil { return err } - genesis, _ := genesisByChain(chain) - err := reset2.ResetState(db, ctx, genesis) + err := reset2.ResetState(db, ctx, chain) if err != nil { log.Error(err.Error()) return err @@ -38,7 +37,8 @@ var cmdResetState = &cobra.Command{ // set genesis after reset all buckets fmt.Printf("After reset: \n") - if err := db.View(ctx, func(tx kv.Tx) error { return printStages(tx, allSnapshots(db)) }); err != nil { + sn, _ = allSnapshots(db) + if err := db.View(ctx, func(tx kv.Tx) error { return printStages(tx, sn) }); err != nil { return err } @@ -60,7 +60,7 @@ func printStages(db kv.Tx, snapshots *snapshotsync.RoSnapshots) error { defer w.Flush() w.Init(os.Stdout, 8, 8, 0, '\t', 0) fmt.Fprintf(w, "Note: prune_at doesn't mean 'all data before were deleted' - it just mean stage.Prune function were run to this block. Because 1 stage may prune multiple data types to different prune distance.\n") - fmt.Fprint(w, "\n \t stage_at \t prune_at\n") + fmt.Fprint(w, "\n \t\t stage_at \t prune_at\n") for _, stage := range stages.AllStages { if progress, err = stages.GetStageProgress(db, stage); err != nil { return err @@ -69,7 +69,7 @@ func printStages(db kv.Tx, snapshots *snapshotsync.RoSnapshots) error { if err != nil { return err } - fmt.Fprintf(w, "%s \t %d \t %d\n", string(stage), progress, prunedTo) + fmt.Fprintf(w, "%s \t\t %d \t %d\n", string(stage), progress, prunedTo) } pm, err := prune.Get(db) if err != nil { @@ -112,5 +112,25 @@ func printStages(db kv.Tx, snapshots *snapshotsync.RoSnapshots) error { fmt.Fprintf(w, "--\n") fmt.Fprintf(w, "snapsthos: blocks=%d, segments=%d, indices=%d\n\n", snapshots.BlocksAvailable(), snapshots.SegmentsMax(), snapshots.IndicesMax()) + //fmt.Printf("==== state =====\n") + //db.ForEach(kv.PlainState, nil, func(k, v []byte) error { + // fmt.Printf("st: %x, %x\n", k, v) + // return nil + //}) + //fmt.Printf("==== code =====\n") + //db.ForEach(kv.Code, nil, func(k, v []byte) error { + // fmt.Printf("code: %x, %x\n", k, v) + // return nil + //}) + //fmt.Printf("==== PlainContractCode =====\n") + //db.ForEach(kv.PlainContractCode, nil, func(k, v []byte) error { + // fmt.Printf("code2: %x, %x\n", k, v) + // return nil + //}) + //fmt.Printf("==== IncarnationMap =====\n") + //db.ForEach(kv.IncarnationMap, nil, func(k, v []byte) error { + // fmt.Printf("IncarnationMap: %x, %x\n", k, v) + // return nil + //}) return nil } diff --git a/cmd/integration/commands/root.go b/cmd/integration/commands/root.go index 0bef08f0553..4d24e1c022c 100644 --- a/cmd/integration/commands/root.go +++ b/cmd/integration/commands/root.go @@ -2,6 +2,7 @@ package commands import ( "path/filepath" + "runtime" "github.com/c2h5oh/datasize" "github.com/ledgerwatch/erigon-lib/kv" @@ -12,6 +13,7 @@ import ( "github.com/ledgerwatch/log/v3" "github.com/spf13/cobra" "github.com/torquem-ch/mdbx-go/mdbx" + "golang.org/x/sync/semaphore" ) var rootCmd = &cobra.Command{ @@ -35,8 +37,9 @@ func RootCommand() *cobra.Command { return rootCmd } -func dbCfg(label kv.Label, logger log.Logger, path string) kv2.MdbxOpts { - opts := kv2.NewMDBX(logger).Path(path).Label(label) +func dbCfg(label kv.Label, path string) kv2.MdbxOpts { + limiterB := semaphore.NewWeighted(int64(runtime.NumCPU()*10 + 1)) + opts := kv2.NewMDBX(log.New()).Path(path).Label(label).RoTxsLimiter(limiterB) if label == kv.ChainDB { opts = opts.MapSize(8 * datasize.TB) } diff --git a/cmd/integration/commands/stages.go b/cmd/integration/commands/stages.go index 896715d9fb4..a38de97aceb 100644 --- a/cmd/integration/commands/stages.go +++ b/cmd/integration/commands/stages.go @@ -4,6 +4,7 @@ import ( "bytes" "context" "fmt" + "path" "path/filepath" "runtime" "strings" @@ -12,9 +13,16 @@ import ( "github.com/c2h5oh/datasize" common2 "github.com/ledgerwatch/erigon-lib/common" "github.com/ledgerwatch/erigon-lib/common/cmp" + "github.com/ledgerwatch/erigon-lib/common/dir" "github.com/ledgerwatch/erigon-lib/etl" "github.com/ledgerwatch/erigon-lib/kv" - "github.com/ledgerwatch/erigon/cmd/hack/tool" + libstate "github.com/ledgerwatch/erigon-lib/state" + "github.com/ledgerwatch/log/v3" + "github.com/ledgerwatch/secp256k1" + "github.com/spf13/cobra" + "golang.org/x/exp/slices" + + "github.com/ledgerwatch/erigon/cmd/hack/tool/fromdb" "github.com/ledgerwatch/erigon/cmd/sentry/sentry" "github.com/ledgerwatch/erigon/consensus" "github.com/ledgerwatch/erigon/consensus/ethash" @@ -28,29 +36,40 @@ import ( "github.com/ledgerwatch/erigon/eth/integrity" "github.com/ledgerwatch/erigon/eth/stagedsync" "github.com/ledgerwatch/erigon/eth/stagedsync/stages" - "github.com/ledgerwatch/erigon/ethdb/privateapi" "github.com/ledgerwatch/erigon/ethdb/prune" "github.com/ledgerwatch/erigon/migrations" "github.com/ledgerwatch/erigon/node/nodecfg/datadir" "github.com/ledgerwatch/erigon/p2p" "github.com/ledgerwatch/erigon/params" "github.com/ledgerwatch/erigon/turbo/services" + "github.com/ledgerwatch/erigon/turbo/shards" "github.com/ledgerwatch/erigon/turbo/snapshotsync" "github.com/ledgerwatch/erigon/turbo/snapshotsync/snap" stages2 "github.com/ledgerwatch/erigon/turbo/stages" - "github.com/ledgerwatch/log/v3" - "github.com/ledgerwatch/secp256k1" - "github.com/spf13/cobra" - "golang.org/x/exp/slices" ) +var cmdStageSnapshots = &cobra.Command{ + Use: "stage_snapshots", + Short: "", + RunE: func(cmd *cobra.Command, args []string) error { + ctx, _ := common2.RootContext() + db := openDB(dbCfg(kv.ChainDB, chaindata), true) + defer db.Close() + + if err := stageSnapshots(db, ctx); err != nil { + log.Error("Error", "err", err) + return err + } + return nil + }, +} + var cmdStageHeaders = &cobra.Command{ Use: "stage_headers", Short: "", RunE: func(cmd *cobra.Command, args []string) error { ctx, _ := common2.RootContext() - logger := log.New() - db := openDB(dbCfg(kv.ChainDB, logger, chaindata), true) + db := openDB(dbCfg(kv.ChainDB, chaindata), true) defer db.Close() if err := stageHeaders(db, ctx); err != nil { @@ -66,8 +85,7 @@ var cmdStageBodies = &cobra.Command{ Short: "", RunE: func(cmd *cobra.Command, args []string) error { ctx, _ := common2.RootContext() - logger := log.New() - db := openDB(dbCfg(kv.ChainDB, logger, chaindata), true) + db := openDB(dbCfg(kv.ChainDB, chaindata), true) defer db.Close() if err := stageBodies(db, ctx); err != nil { @@ -82,9 +100,8 @@ var cmdStageSenders = &cobra.Command{ Use: "stage_senders", Short: "", RunE: func(cmd *cobra.Command, args []string) error { - logger := log.New() ctx, _ := common2.RootContext() - db := openDB(dbCfg(kv.ChainDB, logger, chaindata), true) + db := openDB(dbCfg(kv.ChainDB, chaindata), true) defer db.Close() if err := stageSenders(db, ctx); err != nil { @@ -100,8 +117,7 @@ var cmdStageExec = &cobra.Command{ Short: "", RunE: func(cmd *cobra.Command, args []string) error { ctx, _ := common2.RootContext() - logger := log.New() - db := openDB(dbCfg(kv.ChainDB, logger, chaindata), true) + db := openDB(dbCfg(kv.ChainDB, chaindata), true) defer db.Close() if err := stageExec(db, ctx); err != nil { @@ -117,8 +133,7 @@ var cmdStageTrie = &cobra.Command{ Short: "", RunE: func(cmd *cobra.Command, args []string) error { ctx, _ := common2.RootContext() - logger := log.New() - db := openDB(dbCfg(kv.ChainDB, logger, chaindata), true) + db := openDB(dbCfg(kv.ChainDB, chaindata), true) defer db.Close() if err := stageTrie(db, ctx); err != nil { @@ -133,9 +148,8 @@ var cmdStageHashState = &cobra.Command{ Use: "stage_hash_state", Short: "", RunE: func(cmd *cobra.Command, args []string) error { - logger := log.New() ctx, _ := common2.RootContext() - db := openDB(dbCfg(kv.ChainDB, logger, chaindata), true) + db := openDB(dbCfg(kv.ChainDB, chaindata), true) defer db.Close() if err := stageHashState(db, ctx); err != nil { @@ -151,8 +165,7 @@ var cmdStageHistory = &cobra.Command{ Short: "", RunE: func(cmd *cobra.Command, args []string) error { ctx, _ := common2.RootContext() - logger := log.New() - db := openDB(dbCfg(kv.ChainDB, logger, chaindata), true) + db := openDB(dbCfg(kv.ChainDB, chaindata), true) defer db.Close() if err := stageHistory(db, ctx); err != nil { @@ -168,8 +181,7 @@ var cmdLogIndex = &cobra.Command{ Short: "", RunE: func(cmd *cobra.Command, args []string) error { ctx, _ := common2.RootContext() - logger := log.New() - db := openDB(dbCfg(kv.ChainDB, logger, chaindata), true) + db := openDB(dbCfg(kv.ChainDB, chaindata), true) defer db.Close() if err := stageLogIndex(db, ctx); err != nil { @@ -185,8 +197,7 @@ var cmdCallTraces = &cobra.Command{ Short: "", RunE: func(cmd *cobra.Command, args []string) error { ctx, _ := common2.RootContext() - logger := log.New() - db := openDB(dbCfg(kv.ChainDB, logger, chaindata), true) + db := openDB(dbCfg(kv.ChainDB, chaindata), true) defer db.Close() if err := stageCallTraces(db, ctx); err != nil { @@ -202,8 +213,7 @@ var cmdStageTxLookup = &cobra.Command{ Short: "", RunE: func(cmd *cobra.Command, args []string) error { ctx, _ := common2.RootContext() - logger := log.New() - db := openDB(dbCfg(kv.ChainDB, logger, chaindata), true) + db := openDB(dbCfg(kv.ChainDB, chaindata), true) defer db.Close() if err := stageTxLookup(db, ctx); err != nil { @@ -218,8 +228,7 @@ var cmdPrintStages = &cobra.Command{ Short: "", RunE: func(cmd *cobra.Command, args []string) error { ctx, _ := common2.RootContext() - logger := log.New() - db := openDB(dbCfg(kv.ChainDB, logger, chaindata).Readonly(), false) + db := openDB(dbCfg(kv.ChainDB, chaindata).Readonly(), false) defer db.Close() if err := printAllStages(db, ctx); err != nil { @@ -235,8 +244,7 @@ var cmdPrintMigrations = &cobra.Command{ Short: "", RunE: func(cmd *cobra.Command, args []string) error { ctx, _ := common2.RootContext() - logger := log.New() - db := openDB(dbCfg(kv.ChainDB, logger, chaindata), false) + db := openDB(dbCfg(kv.ChainDB, chaindata), false) defer db.Close() if err := printAppliedMigrations(db, ctx); err != nil { log.Error("Error", "err", err) @@ -251,8 +259,7 @@ var cmdRemoveMigration = &cobra.Command{ Short: "", RunE: func(cmd *cobra.Command, args []string) error { ctx, _ := common2.RootContext() - logger := log.New() - db := openDB(dbCfg(kv.ChainDB, logger, chaindata), false) + db := openDB(dbCfg(kv.ChainDB, chaindata), false) defer db.Close() if err := removeMigration(db, ctx); err != nil { log.Error("Error", "err", err) @@ -266,8 +273,7 @@ var cmdRunMigrations = &cobra.Command{ Use: "run_migrations", Short: "", RunE: func(cmd *cobra.Command, args []string) error { - logger := log.New() - db := openDB(dbCfg(kv.ChainDB, logger, chaindata), true) + db := openDB(dbCfg(kv.ChainDB, chaindata), true) defer db.Close() // Nothing to do, migrations will be applied automatically return nil @@ -278,21 +284,19 @@ var cmdSetPrune = &cobra.Command{ Use: "force_set_prune", Short: "Override existing --prune flag value (if you know what you are doing)", RunE: func(cmd *cobra.Command, args []string) error { - logger := log.New() - db := openDB(dbCfg(kv.ChainDB, logger, chaindata), true) + db := openDB(dbCfg(kv.ChainDB, chaindata), true) defer db.Close() return overrideStorageMode(db) }, } -var cmdSetSnapshto = &cobra.Command{ +var cmdSetSnap = &cobra.Command{ Use: "force_set_snapshot", Short: "Override existing --snapshots flag value (if you know what you are doing)", RunE: func(cmd *cobra.Command, args []string) error { - logger := log.New() - db := openDB(dbCfg(kv.ChainDB, logger, chaindata), true) + db := openDB(dbCfg(kv.ChainDB, chaindata), true) defer db.Close() - snapshots := allSnapshots(db) + snapshots, _ := allSnapshots(db) if err := db.Update(context.Background(), func(tx kv.RwTx) error { return snap.ForceSetFlags(tx, snapshots.Cfg()) }); err != nil { @@ -302,6 +306,18 @@ var cmdSetSnapshto = &cobra.Command{ }, } +var cmdForceSetHistoryV3 = &cobra.Command{ + Use: "force_set_history_v3", + Short: "Override existing --history.v3 flag value (if you know what you are doing)", + RunE: func(cmd *cobra.Command, args []string) error { + db := openDB(dbCfg(kv.ChainDB, chaindata), true) + defer db.Close() + return db.Update(context.Background(), func(tx kv.RwTx) error { + return rawdb.HistoryV3.ForceWrite(tx, _forceSetHistoryV3) + }) + }, +} + func init() { withDataDir(cmdPrintStages) withChain(cmdPrintStages) @@ -318,6 +334,11 @@ func init() { rootCmd.AddCommand(cmdStageSenders) + withDataDir(cmdStageSnapshots) + withReset(cmdStageSnapshots) + + rootCmd.AddCommand(cmdStageSnapshots) + withDataDir(cmdStageHeaders) withUnwind(cmdStageHeaders) withReset(cmdStageHeaders) @@ -342,6 +363,7 @@ func init() { withTxTrace(cmdStageExec) withChain(cmdStageExec) withHeimdall(cmdStageExec) + withWorkers(cmdStageExec) rootCmd.AddCommand(cmdStageExec) @@ -421,9 +443,13 @@ func init() { withHeimdall(cmdRunMigrations) rootCmd.AddCommand(cmdRunMigrations) - withDataDir2(cmdSetSnapshto) - withChain(cmdSetSnapshto) - rootCmd.AddCommand(cmdSetSnapshto) + withDataDir2(cmdSetSnap) + withChain(cmdSetSnap) + rootCmd.AddCommand(cmdSetSnap) + + withDataDir2(cmdForceSetHistoryV3) + cmdForceSetHistoryV3.Flags().BoolVar(&_forceSetHistoryV3, "history.v3", false, "") + rootCmd.AddCommand(cmdForceSetHistoryV3) withDataDir(cmdSetPrune) withChain(cmdSetPrune) @@ -440,14 +466,33 @@ func init() { rootCmd.AddCommand(cmdSetPrune) } +func stageSnapshots(db kv.RwDB, ctx context.Context) error { + return db.Update(ctx, func(tx kv.RwTx) error { + if reset { + if err := stages.SaveStageProgress(tx, stages.Snapshots, 0); err != nil { + return fmt.Errorf("saving Snapshots progress failed: %w", err) + } + } + progress, err := stages.GetStageProgress(tx, stages.Snapshots) + if err != nil { + return fmt.Errorf("re-read Snapshots progress: %w", err) + } + log.Info("Progress", "snapshots", progress) + return nil + }) +} + func stageHeaders(db kv.RwDB, ctx context.Context) error { + sn, _ := allSnapshots(db) + br := getBlockReader(db) return db.Update(ctx, func(tx kv.RwTx) error { if !(unwind > 0 || reset) { log.Info("This command only works with --unwind or --reset options") } if reset { - if err := reset2.ResetBlocks(tx); err != nil { + dirs := datadir.New(datadirCli) + if err := reset2.ResetBlocks(tx, db, sn, br, dirs.Tmp); err != nil { return err } return nil @@ -465,11 +510,11 @@ func stageHeaders(db kv.RwDB, ctx context.Context) error { } if err = stages.SaveStageProgress(tx, stages.Headers, unwindTo); err != nil { - return fmt.Errorf("saving Bodies progress failed: %w", err) + return fmt.Errorf("saving Headers progress failed: %w", err) } progress, err = stages.GetStageProgress(tx, stages.Headers) if err != nil { - return fmt.Errorf("re-read Bodies progress: %w", err) + return fmt.Errorf("re-read Headers progress: %w", err) } { // hard-unwind stage_body also if err := rawdb.TruncateBlocks(ctx, tx, progress+1); err != nil { @@ -506,8 +551,10 @@ func stageHeaders(db kv.RwDB, ctx context.Context) error { } func stageBodies(db kv.RwDB, ctx context.Context) error { - _, _, _, sync, _, _ := newSync(ctx, db, nil) - chainConfig := tool.ChainConfigFromDB(db) + _, _, sync, _, _ := newSync(ctx, db, nil) + chainConfig, historyV3 := fromdb.ChainConfig(db), fromdb.HistoryV3(db) + sn, _ := allSnapshots(db) + if err := db.Update(ctx, func(tx kv.RwTx) error { s := stage(sync, tx, nil, stages.Bodies) @@ -517,7 +564,7 @@ func stageBodies(db kv.RwDB, ctx context.Context) error { } u := sync.NewUnwindState(stages.Bodies, s.BlockNumber-unwind, s.BlockNumber) - if err := stagedsync.UnwindBodiesStage(u, tx, stagedsync.StageBodiesCfg(db, nil, nil, nil, nil, 0, *chainConfig, 0, allSnapshots(db), getBlockReader(db)), ctx); err != nil { + if err := stagedsync.UnwindBodiesStage(u, tx, stagedsync.StageBodiesCfg(db, nil, nil, nil, nil, 0, *chainConfig, 0, sn, getBlockReader(db), historyV3), ctx); err != nil { return err } @@ -538,8 +585,9 @@ func stageBodies(db kv.RwDB, ctx context.Context) error { func stageSenders(db kv.RwDB, ctx context.Context) error { tmpdir := filepath.Join(datadirCli, etl.TmpDirName) - _, _, _, sync, _, _ := newSync(ctx, db, nil) - chainConfig := tool.ChainConfigFromDB(db) + _, _, sync, _, _ := newSync(ctx, db, nil) + chainConfig := fromdb.ChainConfig(db) + snapshots, _ := allSnapshots(db) must(sync.SetCurrentStage(stages.Senders)) @@ -597,7 +645,6 @@ func stageSenders(db kv.RwDB, ctx context.Context) error { log.Info("Stage", "name", s.ID, "progress", s.BlockNumber) var br *snapshotsync.BlockRetire - snapshots := allSnapshots(db) if snapshots.Cfg().Enabled { workers := runtime.GOMAXPROCS(-1) - 1 if workers < 1 { @@ -627,7 +674,7 @@ func stageSenders(db kv.RwDB, ctx context.Context) error { } return nil } else { - if err = stagedsync.SpawnRecoverSendersStage(cfg, s, sync, tx, block, ctx); err != nil { + if err = stagedsync.SpawnRecoverSendersStage(cfg, s, sync, tx, block, ctx, false /* quiet */); err != nil { return err } } @@ -635,18 +682,19 @@ func stageSenders(db kv.RwDB, ctx context.Context) error { } func stageExec(db kv.RwDB, ctx context.Context) error { - pm, engine, vmConfig, sync, _, _ := newSync(ctx, db, nil) - chainConfig := tool.ChainConfigFromDB(db) + chainConfig, historyV3, pm := fromdb.ChainConfig(db), fromdb.HistoryV3(db), fromdb.PruneMode(db) + dirs := datadir.New(datadirCli) + engine, vmConfig, sync, _, _ := newSync(ctx, db, nil) must(sync.SetCurrentStage(stages.Execution)) - tmpdir := filepath.Join(datadirCli, etl.TmpDirName) + _, agg := allSnapshots(db) if reset { - genesis, _ := genesisByChain(chain) - if err := db.Update(ctx, func(tx kv.RwTx) error { return reset2.ResetExec(tx, genesis) }); err != nil { + if err := db.Update(ctx, func(tx kv.RwTx) error { return reset2.ResetExec(tx, chain) }); err != nil { return err } return nil } + if txtrace { // Activate tracing and writing into json files for each transaction vmConfig.Tracer = nil @@ -666,12 +714,13 @@ func stageExec(db kv.RwDB, ctx context.Context) error { pm.TxIndex = prune.Distance(s.BlockNumber - pruneTo) } + genesis := core.DefaultGenesisBlockByChainName(chain) cfg := stagedsync.StageExecuteBlocksCfg(db, pm, batchSize, nil, chainConfig, engine, vmConfig, nil, /*stateStream=*/ false, - /*badBlockHalt=*/ false, tmpdir, getBlockReader(db), nil) + /*badBlockHalt=*/ false, historyV3, dirs, getBlockReader(db), nil, genesis, int(workers), agg) if unwind > 0 { u := sync.NewUnwindState(stages.Execution, s.BlockNumber-unwind, s.BlockNumber) - err := stagedsync.UnwindExecutionStage(u, s, nil, ctx, cfg, false) + err := stagedsync.UnwindExecutionStage(u, s, nil, ctx, cfg, true) if err != nil { return err } @@ -683,14 +732,14 @@ func stageExec(db kv.RwDB, ctx context.Context) error { if err != nil { return err } - err = stagedsync.PruneExecutionStage(p, nil, cfg, ctx, false) + err = stagedsync.PruneExecutionStage(p, nil, cfg, ctx, true) if err != nil { return err } return nil } - err := stagedsync.SpawnExecuteBlocksStage(s, sync, nil, block, ctx, cfg, false) + err := stagedsync.SpawnExecuteBlocksStage(s, sync, nil, block, ctx, cfg, true /* initialCycle */, false /* quiet */) if err != nil { return err } @@ -698,9 +747,10 @@ func stageExec(db kv.RwDB, ctx context.Context) error { } func stageTrie(db kv.RwDB, ctx context.Context) error { - pm, _, _, sync, _, _ := newSync(ctx, db, nil) + dirs, pm, historyV3 := datadir.New(datadirCli), fromdb.PruneMode(db), fromdb.HistoryV3(db) + _, _, sync, _, _ := newSync(ctx, db, nil) must(sync.SetCurrentStage(stages.IntermediateHashes)) - tmpdir := filepath.Join(datadirCli, etl.TmpDirName) + _, agg := allSnapshots(db) tx, err := db.BeginRw(ctx) if err != nil { @@ -723,11 +773,10 @@ func stageTrie(db kv.RwDB, ctx context.Context) error { pm.CallTraces = prune.Distance(s.BlockNumber - pruneTo) pm.TxIndex = prune.Distance(s.BlockNumber - pruneTo) } - fmt.Printf("distance: %d\n", pm.History) log.Info("StageExec", "progress", execStage.BlockNumber) log.Info("StageTrie", "progress", s.BlockNumber) - cfg := stagedsync.StageTrieCfg(db, true, true, false, tmpdir, getBlockReader(db), nil) + cfg := stagedsync.StageTrieCfg(db, true, true, false, dirs.Tmp, getBlockReader(db), nil, historyV3, agg) if unwind > 0 { u := sync.NewUnwindState(stages.IntermediateHashes, s.BlockNumber-unwind, s.BlockNumber) if err := stagedsync.UnwindIntermediateHashesStage(u, s, tx, cfg, ctx); err != nil { @@ -743,7 +792,7 @@ func stageTrie(db kv.RwDB, ctx context.Context) error { return err } } else { - if _, err := stagedsync.SpawnIntermediateHashesStage(s, sync /* Unwinder */, tx, cfg, ctx); err != nil { + if _, err := stagedsync.SpawnIntermediateHashesStage(s, sync /* Unwinder */, tx, cfg, ctx, false /* quiet */); err != nil { return err } } @@ -752,10 +801,10 @@ func stageTrie(db kv.RwDB, ctx context.Context) error { } func stageHashState(db kv.RwDB, ctx context.Context) error { - tmpdir := filepath.Join(datadirCli, etl.TmpDirName) - - pm, _, _, sync, _, _ := newSync(ctx, db, nil) + dirs, pm, historyV3 := datadir.New(datadirCli), fromdb.PruneMode(db), fromdb.HistoryV3(db) + _, _, sync, _, _ := newSync(ctx, db, nil) must(sync.SetCurrentStage(stages.HashState)) + _, agg := allSnapshots(db) tx, err := db.BeginRw(ctx) if err != nil { @@ -780,7 +829,8 @@ func stageHashState(db kv.RwDB, ctx context.Context) error { } log.Info("Stage", "name", s.ID, "progress", s.BlockNumber) - cfg := stagedsync.StageHashStateCfg(db, tmpdir) + + cfg := stagedsync.StageHashStateCfg(db, dirs, historyV3, agg) if unwind > 0 { u := sync.NewUnwindState(stages.HashState, s.BlockNumber-unwind, s.BlockNumber) err = stagedsync.UnwindHashStateStage(u, s, tx, cfg, ctx) @@ -797,7 +847,7 @@ func stageHashState(db kv.RwDB, ctx context.Context) error { return err } } else { - err = stagedsync.SpawnHashStateStage(s, tx, cfg, ctx) + err = stagedsync.SpawnHashStateStage(s, tx, cfg, ctx, false /* quiet */) if err != nil { return err } @@ -806,9 +856,11 @@ func stageHashState(db kv.RwDB, ctx context.Context) error { } func stageLogIndex(db kv.RwDB, ctx context.Context) error { - tmpdir := filepath.Join(datadirCli, etl.TmpDirName) - - pm, _, _, sync, _, _ := newSync(ctx, db, nil) + dirs, pm, historyV3 := datadir.New(datadirCli), fromdb.PruneMode(db), fromdb.HistoryV3(db) + if historyV3 { + return fmt.Errorf("this stage is disable in --history.v3=true") + } + _, _, sync, _, _ := newSync(ctx, db, nil) must(sync.SetCurrentStage(stages.LogIndex)) tx, err := db.BeginRw(ctx) if err != nil { @@ -836,7 +888,7 @@ func stageLogIndex(db kv.RwDB, ctx context.Context) error { log.Info("Stage exec", "progress", execAt) log.Info("Stage", "name", s.ID, "progress", s.BlockNumber) - cfg := stagedsync.StageLogIndexCfg(db, pm, tmpdir) + cfg := stagedsync.StageLogIndexCfg(db, pm, dirs.Tmp) if unwind > 0 { u := sync.NewUnwindState(stages.LogIndex, s.BlockNumber-unwind, s.BlockNumber) err = stagedsync.UnwindLogIndex(u, s, tx, cfg, ctx) @@ -860,12 +912,14 @@ func stageLogIndex(db kv.RwDB, ctx context.Context) error { return tx.Commit() } -func stageCallTraces(kv kv.RwDB, ctx context.Context) error { - tmpdir := filepath.Join(datadirCli, etl.TmpDirName) - - pm, _, _, sync, _, _ := newSync(ctx, kv, nil) +func stageCallTraces(db kv.RwDB, ctx context.Context) error { + dirs, pm, historyV3 := datadir.New(datadirCli), fromdb.PruneMode(db), fromdb.HistoryV3(db) + if historyV3 { + return fmt.Errorf("this stage is disable in --history.v3=true") + } + _, _, sync, _, _ := newSync(ctx, db, nil) must(sync.SetCurrentStage(stages.CallTraces)) - tx, err := kv.BeginRw(ctx) + tx, err := db.BeginRw(ctx) if err != nil { return err } @@ -896,7 +950,7 @@ func stageCallTraces(kv kv.RwDB, ctx context.Context) error { } log.Info("ID call traces", "progress", s.BlockNumber) - cfg := stagedsync.StageCallTracesCfg(kv, pm, block, tmpdir) + cfg := stagedsync.StageCallTracesCfg(db, pm, block, dirs.Tmp) if unwind > 0 { u := sync.NewUnwindState(stages.CallTraces, s.BlockNumber-unwind, s.BlockNumber) @@ -922,8 +976,11 @@ func stageCallTraces(kv kv.RwDB, ctx context.Context) error { } func stageHistory(db kv.RwDB, ctx context.Context) error { - tmpdir := filepath.Join(datadirCli, etl.TmpDirName) - pm, _, _, sync, _, _ := newSync(ctx, db, nil) + dirs, pm, historyV3 := datadir.New(datadirCli), fromdb.PruneMode(db), fromdb.HistoryV3(db) + if historyV3 { + return fmt.Errorf("this stage is disable in --history.v3=true") + } + _, _, sync, _, _ := newSync(ctx, db, nil) must(sync.SetCurrentStage(stages.AccountHistoryIndex)) tx, err := db.BeginRw(ctx) @@ -952,7 +1009,7 @@ func stageHistory(db kv.RwDB, ctx context.Context) error { log.Info("ID acc history", "progress", stageAcc.BlockNumber) log.Info("ID storage history", "progress", stageStorage.BlockNumber) - cfg := stagedsync.StageHistoryCfg(db, pm, tmpdir) + cfg := stagedsync.StageHistoryCfg(db, pm, dirs.Tmp) if unwind > 0 { //nolint:staticcheck u := sync.NewUnwindState(stages.StorageHistoryIndex, stageStorage.BlockNumber-unwind, stageStorage.BlockNumber) if err := stagedsync.UnwindStorageHistoryIndex(u, stageStorage, tx, cfg, ctx); err != nil { @@ -979,7 +1036,8 @@ func stageHistory(db kv.RwDB, ctx context.Context) error { if err != nil { return err } - _ = printStages(tx, allSnapshots(db)) + sn, _ := allSnapshots(db) + _ = printStages(tx, sn) } else { if err := stagedsync.SpawnAccountHistoryIndex(stageAcc, tx, cfg, ctx); err != nil { return err @@ -992,11 +1050,11 @@ func stageHistory(db kv.RwDB, ctx context.Context) error { } func stageTxLookup(db kv.RwDB, ctx context.Context) error { - tmpdir := filepath.Join(datadirCli, etl.TmpDirName) - - pm, _, _, sync, _, _ := newSync(ctx, db, nil) - chainConfig := tool.ChainConfigFromDB(db) + dirs, pm := datadir.New(datadirCli), fromdb.PruneMode(db) + _, _, sync, _, _ := newSync(ctx, db, nil) + chainConfig := fromdb.ChainConfig(db) must(sync.SetCurrentStage(stages.TxLookup)) + sn, _ := allSnapshots(db) tx, err := db.BeginRw(ctx) if err != nil { @@ -1020,7 +1078,12 @@ func stageTxLookup(db kv.RwDB, ctx context.Context) error { } log.Info("Stage", "name", s.ID, "progress", s.BlockNumber) isBor := chainConfig.Bor != nil - cfg := stagedsync.StageTxLookupCfg(db, pm, tmpdir, allSnapshots(db), isBor) + var sprint uint64 + if isBor { + sprint = chainConfig.Bor.Sprint + } + + cfg := stagedsync.StageTxLookupCfg(db, pm, dirs.Tmp, sn, isBor, sprint) if unwind > 0 { u := sync.NewUnwindState(stages.TxLookup, s.BlockNumber-unwind, s.BlockNumber) err = stagedsync.UnwindTxLookup(u, s, tx, cfg, ctx) @@ -1046,7 +1109,8 @@ func stageTxLookup(db kv.RwDB, ctx context.Context) error { } func printAllStages(db kv.RoDB, ctx context.Context) error { - return db.View(ctx, func(tx kv.Tx) error { return printStages(tx, allSnapshots(db)) }) + sn, _ := allSnapshots(db) + return db.View(ctx, func(tx kv.Tx) error { return printStages(tx, sn) }) } func printAppliedMigrations(db kv.RwDB, ctx context.Context) error { @@ -1075,8 +1139,9 @@ func removeMigration(db kv.RwDB, ctx context.Context) error { var openSnapshotOnce sync.Once var _allSnapshotsSingleton *snapshotsync.RoSnapshots +var _aggSingleton *libstate.Aggregator22 -func allSnapshots(db kv.RoDB) *snapshotsync.RoSnapshots { +func allSnapshots(db kv.RoDB) (*snapshotsync.RoSnapshots, *libstate.Aggregator22) { openSnapshotOnce.Do(func() { var useSnapshots bool _ = db.View(context.Background(), func(tx kv.Tx) error { @@ -1085,14 +1150,34 @@ func allSnapshots(db kv.RoDB) *snapshotsync.RoSnapshots { }) snapCfg := ethconfig.NewSnapCfg(useSnapshots, true, true) _allSnapshotsSingleton = snapshotsync.NewRoSnapshots(snapCfg, filepath.Join(datadirCli, "snapshots")) + + aggDir := path.Join(datadirCli, "snapshots", "history") + dir.MustExist(aggDir) + var err error + _aggSingleton, err = libstate.NewAggregator22(aggDir, ethconfig.HistoryV3AggregationStep) + if err != nil { + panic(err) + } + err = _aggSingleton.ReopenFiles() + if err != nil { + panic(err) + } + if useSnapshots { if err := _allSnapshotsSingleton.ReopenFolder(); err != nil { panic(err) } - _allSnapshotsSingleton.ReopenFolder() + _allSnapshotsSingleton.LogStat() + db.View(context.Background(), func(tx kv.Tx) error { + _aggSingleton.LogStats(tx, func(endTxNumMinimax uint64) uint64 { + _, histBlockNumProgress, _ := rawdb.TxNums.FindBlockNum(tx, endTxNumMinimax) + return histBlockNumProgress + }) + return nil + }) } }) - return _allSnapshotsSingleton + return _allSnapshotsSingleton, _aggSingleton } var openBlockReaderOnce sync.Once @@ -1101,7 +1186,7 @@ var _blockReaderSingleton services.FullBlockReader func getBlockReader(db kv.RoDB) (blockReader services.FullBlockReader) { openBlockReaderOnce.Do(func() { _blockReaderSingleton = snapshotsync.NewBlockReader() - if sn := allSnapshots(db); sn.Cfg().Enabled { + if sn, _ := allSnapshots(db); sn.Cfg().Enabled { x := snapshotsync.NewBlockReaderWithSnapshots(sn) _blockReaderSingleton = x } @@ -1109,34 +1194,20 @@ func getBlockReader(db kv.RoDB) (blockReader services.FullBlockReader) { return _blockReaderSingleton } -func newSync(ctx context.Context, db kv.RwDB, miningConfig *params.MiningConfig) (prune.Mode, consensus.Engine, *vm.Config, *stagedsync.Sync, *stagedsync.Sync, stagedsync.MiningState) { - tmpdir := filepath.Join(datadirCli, etl.TmpDirName) +func newSync(ctx context.Context, db kv.RwDB, miningConfig *params.MiningConfig) (consensus.Engine, *vm.Config, *stagedsync.Sync, *stagedsync.Sync, stagedsync.MiningState) { logger := log.New() + dirs, historyV3, pm := datadir.New(datadirCli), fromdb.HistoryV3(db), fromdb.PruneMode(db) - var pm prune.Mode - var err error - if err = db.View(context.Background(), func(tx kv.Tx) error { - pm, err = prune.Get(tx) - if err != nil { - return err - } - if err = stagedsync.UpdateMetrics(tx); err != nil { - return err - } - return nil - }); err != nil { - panic(err) - } vmConfig := &vm.Config{} - events := privateapi.NewEvents() + events := shards.NewEvents() - genesis, _ := genesisByChain(chain) + genesis := core.DefaultGenesisBlockByChainName(chain) chainConfig, genesisBlock, genesisErr := core.CommitGenesisBlock(db, genesis) if _, ok := genesisErr.(*params.ConfigCompatError); genesisErr != nil && !ok { panic(genesisErr) } - log.Info("Initialised chain configuration", "config", chainConfig) + //log.Info("Initialised chain configuration", "config", chainConfig) // Apply special hacks for BSC params if chainConfig.Parlia != nil { @@ -1147,6 +1218,7 @@ func newSync(ctx context.Context, db kv.RwDB, miningConfig *params.MiningConfig) must(batchSize.UnmarshalText([]byte(batchSizeStr))) cfg := ethconfig.Defaults + cfg.HistoryV3 = historyV3 cfg.Prune = pm cfg.BatchSize = batchSize cfg.DeprecatedTxPool.Disable = true @@ -1154,25 +1226,10 @@ func newSync(ctx context.Context, db kv.RwDB, miningConfig *params.MiningConfig) cfg.Miner = *miningConfig } cfg.Dirs = datadir.New(datadirCli) - allSn := allSnapshots(db) + allSn, agg := allSnapshots(db) cfg.Snapshot = allSn.Cfg() - var engine consensus.Engine - config := ðconfig.Defaults - if chainConfig.Clique != nil { - c := params.CliqueSnapshot - c.DBPath = filepath.Join(datadirCli, "clique", "db") - engine = ethconsensusconfig.CreateConsensusEngine(chainConfig, logger, c, config.Miner.Notify, config.Miner.Noverify, "", true, datadirCli, allSn) - } else if chainConfig.Aura != nil { - engine = ethconsensusconfig.CreateConsensusEngine(chainConfig, logger, ¶ms.AuRaConfig{DBPath: filepath.Join(datadirCli, "aura")}, config.Miner.Notify, config.Miner.Noverify, "", true, datadirCli, allSn) - } else if chainConfig.Parlia != nil { - consensusConfig := ¶ms.ParliaConfig{DBPath: filepath.Join(datadirCli, "parlia")} - engine = ethconsensusconfig.CreateConsensusEngine(chainConfig, logger, consensusConfig, config.Miner.Notify, config.Miner.Noverify, "", true, datadirCli, allSn) - } else if chainConfig.Bor != nil { - consensusConfig := &config.Bor - engine = ethconsensusconfig.CreateConsensusEngine(chainConfig, logger, consensusConfig, config.Miner.Notify, config.Miner.Noverify, HeimdallURL, false, datadirCli, allSn) - } else { //ethash - engine = ethash.NewFaker() - } + + engine := initConsensusEngine(chainConfig, logger, allSn, cfg.Dirs.DataDir, db) br := getBlockReader(db) sentryControlServer, err := sentry.NewMultiClient( @@ -1186,30 +1243,35 @@ func newSync(ctx context.Context, db kv.RwDB, miningConfig *params.MiningConfig) ethconfig.Defaults.Sync, br, false, + nil, ) if err != nil { panic(err) } - sync, err := stages2.NewStagedSync(context.Background(), logger, db, p2p.Config{}, cfg, sentryControlServer, tmpdir, &stagedsync.Notifications{}, nil, allSn, nil, nil) + sync, err := stages2.NewStagedSync(context.Background(), db, p2p.Config{}, &cfg, sentryControlServer, &shards.Notifications{}, nil, allSn, agg, nil) if err != nil { panic(err) } miner := stagedsync.NewMiningState(&cfg.Miner) - + miningCancel := make(chan struct{}) + go func() { + <-ctx.Done() + close(miningCancel) + }() miningSync := stagedsync.New( stagedsync.MiningStages(ctx, - stagedsync.StageMiningCreateBlockCfg(db, miner, *chainConfig, engine, nil, nil, nil, tmpdir), - stagedsync.StageMiningExecCfg(db, miner, events, *chainConfig, engine, &vm.Config{}, tmpdir, nil), - stagedsync.StageHashStateCfg(db, tmpdir), - stagedsync.StageTrieCfg(db, false, true, false, tmpdir, br, nil), - stagedsync.StageMiningFinishCfg(db, *chainConfig, engine, miner, ctx.Done()), + stagedsync.StageMiningCreateBlockCfg(db, miner, *chainConfig, engine, nil, nil, nil, dirs.Tmp), + stagedsync.StageMiningExecCfg(db, miner, events, *chainConfig, engine, &vm.Config{}, dirs.Tmp, nil, 0), + stagedsync.StageHashStateCfg(db, dirs, historyV3, agg), + stagedsync.StageTrieCfg(db, false, true, false, dirs.Tmp, br, nil, historyV3, agg), + stagedsync.StageMiningFinishCfg(db, *chainConfig, engine, miner, miningCancel), ), stagedsync.MiningUnwindOrder, stagedsync.MiningPruneOrder, ) - return pm, engine, vmConfig, sync, miningSync, miner + return engine, vmConfig, sync, miningSync, miner } func progress(tx kv.Getter, stage stages.SyncStage) uint64 { @@ -1229,7 +1291,8 @@ func stage(st *stagedsync.Sync, tx kv.Tx, db kv.RoDB, stage stages.SyncStage) *s } func overrideStorageMode(db kv.RwDB) error { - pm, err := prune.FromCli(pruneFlag, pruneH, pruneR, pruneT, pruneC, + chainConfig := fromdb.ChainConfig(db) + pm, err := prune.FromCli(chainConfig.ChainID.Uint64(), pruneFlag, pruneH, pruneR, pruneT, pruneC, pruneHBefore, pruneRBefore, pruneTBefore, pruneCBefore, experiments) if err != nil { return err @@ -1247,15 +1310,27 @@ func overrideStorageMode(db kv.RwDB) error { }) } -func genesisByChain(chain string) (*core.Genesis, *params.ChainConfig) { - var chainConfig *params.ChainConfig - var genesis *core.Genesis - if chain == "" { - chainConfig = params.MainnetChainConfig - genesis = core.DefaultGenesisBlock() - } else { - chainConfig = params.ChainConfigByChainName(chain) - genesis = core.DefaultGenesisBlockByChainName(chain) +func initConsensusEngine(chainConfig *params.ChainConfig, logger log.Logger, snapshots *snapshotsync.RoSnapshots, datadir string, db kv.RwDB) (engine consensus.Engine) { + config := ethconfig.Defaults + + switch { + case chainConfig.Clique != nil: + c := params.CliqueSnapshot + c.DBPath = filepath.Join(datadir, "clique", "db") + engine = ethconsensusconfig.CreateConsensusEngine(chainConfig, logger, c, config.Miner.Notify, config.Miner.Noverify, "", true, datadir, snapshots, true /* readonly */, db) + case chainConfig.Aura != nil: + consensusConfig := ¶ms.AuRaConfig{DBPath: filepath.Join(datadir, "aura")} + engine = ethconsensusconfig.CreateConsensusEngine(chainConfig, logger, consensusConfig, config.Miner.Notify, config.Miner.Noverify, "", true, datadir, snapshots, true /* readonly */, db) + case chainConfig.Parlia != nil: + // Apply special hacks for BSC params + params.ApplyBinanceSmartChainParams() + consensusConfig := ¶ms.ParliaConfig{DBPath: filepath.Join(datadir, "parlia")} + engine = ethconsensusconfig.CreateConsensusEngine(chainConfig, logger, consensusConfig, config.Miner.Notify, config.Miner.Noverify, "", true, datadir, snapshots, true /* readonly */, db) + case chainConfig.Bor != nil: + consensusConfig := &config.Bor + engine = ethconsensusconfig.CreateConsensusEngine(chainConfig, logger, consensusConfig, config.Miner.Notify, config.Miner.Noverify, HeimdallURL, false, datadir, snapshots, true /* readonly */, db) + default: //ethash + engine = ethash.NewFaker() } - return genesis, chainConfig + return } diff --git a/cmd/integration/commands/state_stages.go b/cmd/integration/commands/state_stages.go index c9cc53cbe85..9d4ccdcec56 100644 --- a/cmd/integration/commands/state_stages.go +++ b/cmd/integration/commands/state_stages.go @@ -12,12 +12,13 @@ import ( "github.com/c2h5oh/datasize" common2 "github.com/ledgerwatch/erigon-lib/common" "github.com/ledgerwatch/erigon-lib/kv" - "github.com/ledgerwatch/erigon/cmd/hack/tool" + "github.com/ledgerwatch/erigon/cmd/hack/tool/fromdb" "github.com/ledgerwatch/erigon/cmd/utils" "github.com/ledgerwatch/erigon/common" "github.com/ledgerwatch/erigon/common/changeset" "github.com/ledgerwatch/erigon/common/dbutils" "github.com/ledgerwatch/erigon/common/debugprint" + "github.com/ledgerwatch/erigon/core" "github.com/ledgerwatch/erigon/core/rawdb" "github.com/ledgerwatch/erigon/core/state" "github.com/ledgerwatch/erigon/core/types" @@ -52,11 +53,11 @@ Examples: cfg := &nodecfg.DefaultConfig utils.SetNodeConfigCobra(cmd, cfg) ethConfig := ðconfig.Defaults + ethConfig.Genesis = core.DefaultGenesisBlockByChainName(chain) erigoncli.ApplyFlagsForEthConfigCobra(cmd.Flags(), ethConfig) miningConfig := params.MiningConfig{} utils.SetupMinerCobra(cmd, &miningConfig) - logger := log.New() - db := openDB(dbCfg(kv.ChainDB, logger, chaindata), true) + db := openDB(dbCfg(kv.ChainDB, chaindata), true) defer db.Close() if err := syncBySmallSteps(db, miningConfig, ctx); err != nil { @@ -78,8 +79,7 @@ var loopIhCmd = &cobra.Command{ Use: "loop_ih", RunE: func(cmd *cobra.Command, args []string) error { ctx, _ := common2.RootContext() - logger := log.New() - db := openDB(dbCfg(kv.ChainDB, logger, chaindata), true) + db := openDB(dbCfg(kv.ChainDB, chaindata), true) defer db.Close() if unwind == 0 { @@ -98,8 +98,7 @@ var loopExecCmd = &cobra.Command{ Use: "loop_exec", RunE: func(cmd *cobra.Command, args []string) error { ctx, _ := common2.RootContext() - logger := log.New() - db := openDB(dbCfg(kv.ChainDB, logger, chaindata), true) + db := openDB(dbCfg(kv.ChainDB, chaindata), true) defer db.Close() if unwind == 0 { unwind = 1 @@ -123,6 +122,7 @@ func init() { withMining(stateStags) withChain(stateStags) withHeimdall(stateStags) + withWorkers(stateStags) rootCmd.AddCommand(stateStags) @@ -139,13 +139,16 @@ func init() { withUnwind(loopExecCmd) withChain(loopExecCmd) withHeimdall(loopExecCmd) + withWorkers(loopExecCmd) rootCmd.AddCommand(loopExecCmd) } func syncBySmallSteps(db kv.RwDB, miningConfig params.MiningConfig, ctx context.Context) error { - pm, engine, vmConfig, stateStages, miningStages, miner := newSync(ctx, db, &miningConfig) - chainConfig := tool.ChainConfigFromDB(db) + engine, vmConfig, stateStages, miningStages, miner := newSync(ctx, db, &miningConfig) + chainConfig, historyV3, pm := fromdb.ChainConfig(db), fromdb.HistoryV3(db), fromdb.PruneMode(db) + dirs := datadir.New(datadirCli) + _, agg := allSnapshots(db) tx, err := db.BeginRw(ctx) if err != nil { @@ -153,7 +156,6 @@ func syncBySmallSteps(db kv.RwDB, miningConfig params.MiningConfig, ctx context. } defer tx.Rollback() - dirs := datadir.New(datadirCli) quit := ctx.Done() var batchSize datasize.ByteSize @@ -182,11 +184,12 @@ func syncBySmallSteps(db kv.RwDB, miningConfig params.MiningConfig, ctx context. stateStages.DisableStages(stages.Headers, stages.BlockHashes, stages.Bodies, stages.Senders) - execCfg := stagedsync.StageExecuteBlocksCfg(db, pm, batchSize, changeSetHook, chainConfig, engine, vmConfig, nil, false, false, dirs.Tmp, getBlockReader(db), nil) + genesis := core.DefaultGenesisBlockByChainName(chain) + execCfg := stagedsync.StageExecuteBlocksCfg(db, pm, batchSize, changeSetHook, chainConfig, engine, vmConfig, nil, false, false, historyV3, dirs, getBlockReader(db), nil, genesis, int(workers), agg) - execUntilFunc := func(execToBlock uint64) func(firstCycle bool, badBlockUnwind bool, stageState *stagedsync.StageState, unwinder stagedsync.Unwinder, tx kv.RwTx) error { - return func(firstCycle bool, badBlockUnwind bool, s *stagedsync.StageState, unwinder stagedsync.Unwinder, tx kv.RwTx) error { - if err := stagedsync.SpawnExecuteBlocksStage(s, unwinder, tx, execToBlock, ctx, execCfg, firstCycle); err != nil { + execUntilFunc := func(execToBlock uint64) func(firstCycle bool, badBlockUnwind bool, stageState *stagedsync.StageState, unwinder stagedsync.Unwinder, tx kv.RwTx, quiet bool) error { + return func(firstCycle bool, badBlockUnwind bool, s *stagedsync.StageState, unwinder stagedsync.Unwinder, tx kv.RwTx, quiet bool) error { + if err := stagedsync.SpawnExecuteBlocksStage(s, unwinder, tx, execToBlock, ctx, execCfg, firstCycle, quiet); err != nil { return fmt.Errorf("spawnExecuteBlocksStage: %w", err) } return nil @@ -273,7 +276,7 @@ func syncBySmallSteps(db kv.RwDB, miningConfig params.MiningConfig, ctx context. stateStages.MockExecFunc(stages.Execution, execUntilFunc(execToBlock)) _ = stateStages.SetCurrentStage(stages.Execution) - if err := stateStages.Run(db, tx, false); err != nil { + if err := stateStages.Run(db, tx, false /* firstCycle */, false /* quiet */); err != nil { return err } @@ -308,7 +311,7 @@ func syncBySmallSteps(db kv.RwDB, miningConfig params.MiningConfig, ctx context. if miner.MiningConfig.Enabled && nextBlock != nil && nextBlock.Coinbase() != (common.Address{}) { miner.MiningConfig.Etherbase = nextBlock.Coinbase() miner.MiningConfig.ExtraData = nextBlock.Extra() - miningStages.MockExecFunc(stages.MiningCreateBlock, func(firstCycle bool, badBlockUnwind bool, s *stagedsync.StageState, u stagedsync.Unwinder, tx kv.RwTx) error { + miningStages.MockExecFunc(stages.MiningCreateBlock, func(firstCycle bool, badBlockUnwind bool, s *stagedsync.StageState, u stagedsync.Unwinder, tx kv.RwTx, quiet bool) error { err = stagedsync.SpawnMiningCreateBlockStage(s, tx, stagedsync.StageMiningCreateBlockCfg(db, miner, *chainConfig, engine, nil, nil, nil, dirs.Tmp), quit) @@ -332,7 +335,7 @@ func syncBySmallSteps(db kv.RwDB, miningConfig params.MiningConfig, ctx context. //}) _ = miningStages.SetCurrentStage(stages.MiningCreateBlock) - if err := miningStages.Run(db, tx, false); err != nil { + if err := miningStages.Run(db, tx, false /* firstCycle */, false /* quiet */); err != nil { return err } tx.Rollback() @@ -406,28 +409,30 @@ func checkMinedBlock(b1, b2 *types.Block, chainConfig *params.ChainConfig) { } func loopIh(db kv.RwDB, ctx context.Context, unwind uint64) error { - _, _, _, sync, _, _ := newSync(ctx, db, nil) + _, _, sync, _, _ := newSync(ctx, db, nil) dirs := datadir.New(datadirCli) + historyV3 := fromdb.HistoryV3(db) + _, agg := allSnapshots(db) + tx, err := db.BeginRw(ctx) if err != nil { return err } defer tx.Rollback() - sync.DisableStages(stages.Headers, stages.BlockHashes, stages.Bodies, stages.Senders, stages.Execution, stages.Translation, stages.AccountHistoryIndex, stages.StorageHistoryIndex, stages.TxLookup, stages.Finish) - if err = sync.Run(db, tx, false); err != nil { + if err = sync.Run(db, tx, false /* firstCycle */, false /* quiet */); err != nil { return err } execStage := stage(sync, tx, nil, stages.HashState) to := execStage.BlockNumber - unwind _ = sync.SetCurrentStage(stages.HashState) u := &stagedsync.UnwindState{ID: stages.HashState, UnwindPoint: to} - if err = stagedsync.UnwindHashStateStage(u, stage(sync, tx, nil, stages.HashState), tx, stagedsync.StageHashStateCfg(db, dirs.Tmp), ctx); err != nil { + if err = stagedsync.UnwindHashStateStage(u, stage(sync, tx, nil, stages.HashState), tx, stagedsync.StageHashStateCfg(db, dirs, historyV3, agg), ctx); err != nil { return err } _ = sync.SetCurrentStage(stages.IntermediateHashes) u = &stagedsync.UnwindState{ID: stages.IntermediateHashes, UnwindPoint: to} - if err = stagedsync.UnwindIntermediateHashesStage(u, stage(sync, tx, nil, stages.IntermediateHashes), tx, stagedsync.StageTrieCfg(db, true, true, false, dirs.Tmp, getBlockReader(db), nil), ctx); err != nil { + if err = stagedsync.UnwindIntermediateHashesStage(u, stage(sync, tx, nil, stages.IntermediateHashes), tx, stagedsync.StageTrieCfg(db, true, true, false, dirs.Tmp, getBlockReader(db), nil, historyV3, agg), ctx); err != nil { return err } must(tx.Commit()) @@ -437,7 +442,7 @@ func loopIh(db kv.RwDB, ctx context.Context, unwind uint64) error { sync.DisableStages(stages.IntermediateHashes) _ = sync.SetCurrentStage(stages.HashState) - if err = sync.Run(db, tx, false); err != nil { + if err = sync.Run(db, tx, false /* firstCycle */, false /* quiet*/); err != nil { return err } must(tx.Commit()) @@ -457,7 +462,7 @@ func loopIh(db kv.RwDB, ctx context.Context, unwind uint64) error { _ = sync.SetCurrentStage(stages.IntermediateHashes) t := time.Now() - if err = sync.Run(db, tx, false); err != nil { + if err = sync.Run(db, tx, false /* firstCycle */, false /* quiet */); err != nil { return err } log.Warn("loop", "time", time.Since(t).String()) @@ -471,9 +476,10 @@ func loopIh(db kv.RwDB, ctx context.Context, unwind uint64) error { } func loopExec(db kv.RwDB, ctx context.Context, unwind uint64) error { - pm, engine, vmConfig, sync, _, _ := newSync(ctx, db, nil) - chainConfig := tool.ChainConfigFromDB(db) - dirs := datadir.New(datadirCli) + engine, vmConfig, sync, _, _ := newSync(ctx, db, nil) + chainConfig := fromdb.ChainConfig(db) + dirs, pm := datadir.New(datadirCli), fromdb.PruneMode(db) + _, agg := allSnapshots(db) tx, err := db.BeginRw(ctx) if err != nil { @@ -489,17 +495,21 @@ func loopExec(db kv.RwDB, ctx context.Context, unwind uint64) error { sync.EnableStages(stages.Execution) var batchSize datasize.ByteSize must(batchSize.UnmarshalText([]byte(batchSizeStr))) - + historyV3, err := rawdb.HistoryV3.Enabled(tx) + if err != nil { + return err + } from := progress(tx, stages.Execution) to := from + unwind + genesis := core.DefaultGenesisBlockByChainName(chain) cfg := stagedsync.StageExecuteBlocksCfg(db, pm, batchSize, nil, chainConfig, engine, vmConfig, nil, /*stateStream=*/ false, - /*badBlockHalt=*/ false, dirs.Tmp, getBlockReader(db), nil) + /*badBlockHalt=*/ false, historyV3, dirs, getBlockReader(db), nil, genesis, int(workers), agg) // set block limit of execute stage - sync.MockExecFunc(stages.Execution, func(firstCycle bool, badBlockUnwind bool, stageState *stagedsync.StageState, unwinder stagedsync.Unwinder, tx kv.RwTx) error { - if err = stagedsync.SpawnExecuteBlocksStage(stageState, sync, tx, to, ctx, cfg, false); err != nil { + sync.MockExecFunc(stages.Execution, func(firstCycle bool, badBlockUnwind bool, stageState *stagedsync.StageState, unwinder stagedsync.Unwinder, tx kv.RwTx, quiet bool) error { + if err = stagedsync.SpawnExecuteBlocksStage(stageState, sync, tx, to, ctx, cfg, false /* initialCycle */, false /* quiet */); err != nil { return fmt.Errorf("spawnExecuteBlocksStage: %w", err) } return nil @@ -514,7 +524,7 @@ func loopExec(db kv.RwDB, ctx context.Context, unwind uint64) error { _ = sync.SetCurrentStage(stages.Execution) t := time.Now() - if err = sync.Run(db, tx, false); err != nil { + if err = sync.Run(db, tx, false /* firstCycle */, false /* quiet */); err != nil { return err } fmt.Printf("loop time: %s\n", time.Since(t)) diff --git a/cmd/integration/commands/testing.go b/cmd/integration/commands/testing.go deleted file mode 100644 index bc096adfc0c..00000000000 --- a/cmd/integration/commands/testing.go +++ /dev/null @@ -1,205 +0,0 @@ -package commands - -import ( - "context" - "fmt" - "net" - "runtime" - "syscall" - "time" - - grpc_middleware "github.com/grpc-ecosystem/go-grpc-middleware" - grpc_recovery "github.com/grpc-ecosystem/go-grpc-middleware/recovery" - "github.com/ledgerwatch/erigon-lib/common" - - //grpc_prometheus "github.com/grpc-ecosystem/go-grpc-prometheus" - proto_sentry "github.com/ledgerwatch/erigon-lib/gointerfaces/sentry" - proto_testing "github.com/ledgerwatch/erigon-lib/gointerfaces/testing" - "github.com/ledgerwatch/log/v3" - "github.com/spf13/cobra" - "google.golang.org/grpc" - "google.golang.org/grpc/keepalive" -) - -var ( - testingAddr string // Address of the gRPC endpoint of the integration testing server - sentryAddr string // Address of the gRPC endpoint of the test sentry (mimicking sentry for the integration tests) - consAddr string // Address of the gRPC endpoint of consensus engine to test - consSpecFile string // Path to the specification file for the consensus engine (ideally, integration test and consensus engine use identical spec files) -) - -func init() { - cmdTestCore.Flags().StringVar(&testingAddr, "testing.api.addr", "localhost:9092", "address of the gRPC endpoint of the integration testing server") - cmdTestCore.Flags().StringVar(&sentryAddr, "sentry.api.addr", "localhost:9091", "Address of the gRPC endpoint of the test sentry (mimicking sentry for the integration tests)") - rootCmd.AddCommand(cmdTestCore) - - cmdTestCons.Flags().StringVar(&consAddr, "cons.api.addr", "locahost:9093", "Address of the gRPC endpoint of the consensus engine to test") - cmdTestCons.Flags().StringVar(&consSpecFile, "cons.spec.file", "", "Specification file for the consensis engine (ideally, integration test and consensus engine use identical spec files)") - rootCmd.AddCommand(cmdTestCons) -} - -var cmdTestCore = &cobra.Command{ - Use: "test_core", - Short: "Test server for testing core of Erigon or equivalent component", - RunE: func(cmd *cobra.Command, args []string) error { - ctx, _ := common.RootContext() - - if err := testCore(ctx); err != nil { - log.Error("Error", "err", err) - return err - } - return nil - }, -} - -var cmdTestCons = &cobra.Command{ - Use: "test_cons", - Short: "Integration test for consensus engine", - RunE: func(cmd *cobra.Command, args []string) error { - ctx, _ := common.RootContext() - if err := testCons(ctx); err != nil { - log.Error("Error", "err", err) - return err - } - return nil - }, -} - -func testCore(ctx context.Context) error { - if _, err := grpcTestDriverServer(ctx, testingAddr); err != nil { - return fmt.Errorf("start test driver gRPC server: %w", err) - } - if _, err := grpcTestSentryServer(ctx, testingAddr); err != nil { - return fmt.Errorf("start test sentry gRPC server: %w", err) - } - <-ctx.Done() - return nil -} - -func grpcTestDriverServer(ctx context.Context, testingAddr string) (*TestDriverServerImpl, error) { - // STARTING GRPC SERVER - log.Info("Starting Test driver server", "on", testingAddr) - listenConfig := net.ListenConfig{ - Control: func(network, address string, _ syscall.RawConn) error { - log.Info("Test driver received connection", "via", network, "from", address) - return nil - }, - } - lis, err := listenConfig.Listen(ctx, "tcp", sentryAddr) - if err != nil { - return nil, fmt.Errorf("could not create Test driver listener: %w, addr=%s", err, testingAddr) - } - var ( - streamInterceptors []grpc.StreamServerInterceptor - unaryInterceptors []grpc.UnaryServerInterceptor - ) - //if metrics.Enabled { - // streamInterceptors = append(streamInterceptors, grpc_prometheus.StreamServerInterceptor) - // unaryInterceptors = append(unaryInterceptors, grpc_prometheus.UnaryServerInterceptor) - //} - streamInterceptors = append(streamInterceptors, grpc_recovery.StreamServerInterceptor()) - unaryInterceptors = append(unaryInterceptors, grpc_recovery.UnaryServerInterceptor()) - var grpcServer *grpc.Server - cpus := uint32(runtime.GOMAXPROCS(-1)) - opts := []grpc.ServerOption{ - grpc.NumStreamWorkers(cpus), // reduce amount of goroutines - grpc.WriteBufferSize(1024), // reduce buffers to save mem - grpc.ReadBufferSize(1024), - grpc.MaxConcurrentStreams(100), // to force clients reduce concurrency level - grpc.KeepaliveParams(keepalive.ServerParameters{ - Time: 10 * time.Minute, - }), - // Don't drop the connection, settings accordign to this comment on GitHub - // https://github.com/grpc/grpc-go/issues/3171#issuecomment-552796779 - grpc.KeepaliveEnforcementPolicy(keepalive.EnforcementPolicy{ - MinTime: 10 * time.Second, - PermitWithoutStream: true, - }), - grpc.StreamInterceptor(grpc_middleware.ChainStreamServer(streamInterceptors...)), - grpc.UnaryInterceptor(grpc_middleware.ChainUnaryServer(unaryInterceptors...)), - } - grpcServer = grpc.NewServer(opts...) - - testDriverServer := NewTestDriverServer(ctx) - proto_testing.RegisterTestDriverServer(grpcServer, testDriverServer) - //if metrics.Enabled { - // grpc_prometheus.Register(grpcServer) - //} - go func() { - if err1 := grpcServer.Serve(lis); err1 != nil { - log.Error("Test driver server fail", "err", err1) - } - }() - return testDriverServer, nil -} - -type TestDriverServerImpl struct { - proto_testing.UnimplementedTestDriverServer -} - -func NewTestDriverServer(_ context.Context) *TestDriverServerImpl { - return &TestDriverServerImpl{} -} - -func grpcTestSentryServer(ctx context.Context, sentryAddr string) (*TestSentryServerImpl, error) { - // STARTING GRPC SERVER - log.Info("Starting Test sentry server", "on", testingAddr) - listenConfig := net.ListenConfig{ - Control: func(network, address string, _ syscall.RawConn) error { - log.Info("Test sentry received connection", "via", network, "from", address) - return nil - }, - } - lis, err := listenConfig.Listen(ctx, "tcp", sentryAddr) - if err != nil { - return nil, fmt.Errorf("could not create Test sentry listener: %w, addr=%s", err, testingAddr) - } - var ( - streamInterceptors []grpc.StreamServerInterceptor - unaryInterceptors []grpc.UnaryServerInterceptor - ) - //if metrics.Enabled { - // streamInterceptors = append(streamInterceptors, grpc_prometheus.StreamServerInterceptor) - // unaryInterceptors = append(unaryInterceptors, grpc_prometheus.UnaryServerInterceptor) - //} - streamInterceptors = append(streamInterceptors, grpc_recovery.StreamServerInterceptor()) - unaryInterceptors = append(unaryInterceptors, grpc_recovery.UnaryServerInterceptor()) - var grpcServer *grpc.Server - cpus := uint32(runtime.GOMAXPROCS(-1)) - opts := []grpc.ServerOption{ - grpc.NumStreamWorkers(cpus), // reduce amount of goroutines - grpc.WriteBufferSize(1024), // reduce buffers to save mem - grpc.ReadBufferSize(1024), - grpc.MaxConcurrentStreams(100), // to force clients reduce concurrency level - grpc.KeepaliveParams(keepalive.ServerParameters{ - Time: 10 * time.Minute, - }), - grpc.StreamInterceptor(grpc_middleware.ChainStreamServer(streamInterceptors...)), - grpc.UnaryInterceptor(grpc_middleware.ChainUnaryServer(unaryInterceptors...)), - } - grpcServer = grpc.NewServer(opts...) - - testSentryServer := NewTestSentryServer(ctx) - proto_sentry.RegisterSentryServer(grpcServer, testSentryServer) - //if metrics.Enabled { - // grpc_prometheus.Register(grpcServer) - //} - go func() { - if err1 := grpcServer.Serve(lis); err1 != nil { - log.Error("Test driver server fail", "err", err1) - } - }() - return testSentryServer, nil -} - -type TestSentryServerImpl struct { - proto_sentry.UnimplementedSentryServer -} - -func NewTestSentryServer(_ context.Context) *TestSentryServerImpl { - return &TestSentryServerImpl{} -} - -func testCons(ctx context.Context) error { - return nil -} diff --git a/cmd/lightclient/LICENSE b/cmd/lightclient/LICENSE new file mode 100644 index 00000000000..23664a7e480 --- /dev/null +++ b/cmd/lightclient/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [2022] Erigon-Ligthclient Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. \ No newline at end of file diff --git a/cmd/lightclient/clparams/config.go b/cmd/lightclient/clparams/config.go new file mode 100644 index 00000000000..5bc0bdbab71 --- /dev/null +++ b/cmd/lightclient/clparams/config.go @@ -0,0 +1,658 @@ +/* + Copyright 2022 Erigon-Lightclient contributors + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + http://www.apache.org/licenses/LICENSE-2.0 + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package clparams + +import ( + "fmt" + "math" + "time" + + "github.com/ledgerwatch/erigon/cmd/lightclient/utils" + "github.com/ledgerwatch/erigon/common" +) + +type NetworkType int + +const ( + MainnetNetwork NetworkType = 1 + GoerliNetwork NetworkType = 5 + SepoliaNetwork NetworkType = 11155111 +) + +const ( + MaxDialTimeout = 10 * time.Second + VersionLength int = 4 + MaxChunkSize uint64 = 1 << 20 // 1 MiB + ReqTimeout time.Duration = 5 * time.Second + RespTimeout time.Duration = 10 * time.Second +) + +var ( + MainnetBootstrapNodes = []string{ + // Teku team's bootnode + "enr:-KG4QOtcP9X1FbIMOe17QNMKqDxCpm14jcX5tiOE4_TyMrFqbmhPZHK_ZPG2Gxb1GE2xdtodOfx9-cgvNtxnRyHEmC0ghGV0aDKQ9aX9QgAAAAD__________4JpZIJ2NIJpcIQDE8KdiXNlY3AyNTZrMaEDhpehBDbZjM_L9ek699Y7vhUJ-eAdMyQW_Fil522Y0fODdGNwgiMog3VkcIIjKA", + "enr:-KG4QL-eqFoHy0cI31THvtZjpYUu_Jdw_MO7skQRJxY1g5HTN1A0epPCU6vi0gLGUgrzpU-ygeMSS8ewVxDpKfYmxMMGhGV0aDKQtTA_KgAAAAD__________4JpZIJ2NIJpcIQ2_DUbiXNlY3AyNTZrMaED8GJ2vzUqgL6-KD1xalo1CsmY4X1HaDnyl6Y_WayCo9GDdGNwgiMog3VkcIIjKA", + // Prylab team's bootnodes + "enr:-Ku4QImhMc1z8yCiNJ1TyUxdcfNucje3BGwEHzodEZUan8PherEo4sF7pPHPSIB1NNuSg5fZy7qFsjmUKs2ea1Whi0EBh2F0dG5ldHOIAAAAAAAAAACEZXRoMpD1pf1CAAAAAP__________gmlkgnY0gmlwhBLf22SJc2VjcDI1NmsxoQOVphkDqal4QzPMksc5wnpuC3gvSC8AfbFOnZY_On34wIN1ZHCCIyg", + "enr:-Ku4QP2xDnEtUXIjzJ_DhlCRN9SN99RYQPJL92TMlSv7U5C1YnYLjwOQHgZIUXw6c-BvRg2Yc2QsZxxoS_pPRVe0yK8Bh2F0dG5ldHOIAAAAAAAAAACEZXRoMpD1pf1CAAAAAP__________gmlkgnY0gmlwhBLf22SJc2VjcDI1NmsxoQMeFF5GrS7UZpAH2Ly84aLK-TyvH-dRo0JM1i8yygH50YN1ZHCCJxA", + "enr:-Ku4QPp9z1W4tAO8Ber_NQierYaOStqhDqQdOPY3bB3jDgkjcbk6YrEnVYIiCBbTxuar3CzS528d2iE7TdJsrL-dEKoBh2F0dG5ldHOIAAAAAAAAAACEZXRoMpD1pf1CAAAAAP__________gmlkgnY0gmlwhBLf22SJc2VjcDI1NmsxoQMw5fqqkw2hHC4F5HZZDPsNmPdB1Gi8JPQK7pRc9XHh-oN1ZHCCKvg", + // Lighthouse team's bootnodes + "enr:-Jq4QItoFUuug_n_qbYbU0OY04-np2wT8rUCauOOXNi0H3BWbDj-zbfZb7otA7jZ6flbBpx1LNZK2TDebZ9dEKx84LYBhGV0aDKQtTA_KgEAAAD__________4JpZIJ2NIJpcISsaa0ZiXNlY3AyNTZrMaEDHAD2JKYevx89W0CcFJFiskdcEzkH_Wdv9iW42qLK79ODdWRwgiMo", + "enr:-Jq4QN_YBsUOqQsty1OGvYv48PMaiEt1AzGD1NkYQHaxZoTyVGqMYXg0K9c0LPNWC9pkXmggApp8nygYLsQwScwAgfgBhGV0aDKQtTA_KgEAAAD__________4JpZIJ2NIJpcISLosQxiXNlY3AyNTZrMaEDBJj7_dLFACaxBfaI8KZTh_SSJUjhyAyfshimvSqo22WDdWRwgiMo", + // EF bootnodes + "enr:-Ku4QHqVeJ8PPICcWk1vSn_XcSkjOkNiTg6Fmii5j6vUQgvzMc9L1goFnLKgXqBJspJjIsB91LTOleFmyWWrFVATGngBh2F0dG5ldHOIAAAAAAAAAACEZXRoMpC1MD8qAAAAAP__________gmlkgnY0gmlwhAMRHkWJc2VjcDI1NmsxoQKLVXFOhp2uX6jeT0DvvDpPcU8FWMjQdR4wMuORMhpX24N1ZHCCIyg", + "enr:-Ku4QG-2_Md3sZIAUebGYT6g0SMskIml77l6yR-M_JXc-UdNHCmHQeOiMLbylPejyJsdAPsTHJyjJB2sYGDLe0dn8uYBh2F0dG5ldHOIAAAAAAAAAACEZXRoMpC1MD8qAAAAAP__________gmlkgnY0gmlwhBLY-NyJc2VjcDI1NmsxoQORcM6e19T1T9gi7jxEZjk_sjVLGFscUNqAY9obgZaxbIN1ZHCCIyg", + "enr:-Ku4QPn5eVhcoF1opaFEvg1b6JNFD2rqVkHQ8HApOKK61OIcIXD127bKWgAtbwI7pnxx6cDyk_nI88TrZKQaGMZj0q0Bh2F0dG5ldHOIAAAAAAAAAACEZXRoMpC1MD8qAAAAAP__________gmlkgnY0gmlwhDayLMaJc2VjcDI1NmsxoQK2sBOLGcUb4AwuYzFuAVCaNHA-dy24UuEKkeFNgCVCsIN1ZHCCIyg", + "enr:-Ku4QEWzdnVtXc2Q0ZVigfCGggOVB2Vc1ZCPEc6j21NIFLODSJbvNaef1g4PxhPwl_3kax86YPheFUSLXPRs98vvYsoBh2F0dG5ldHOIAAAAAAAAAACEZXRoMpC1MD8qAAAAAP__________gmlkgnY0gmlwhDZBrP2Jc2VjcDI1NmsxoQM6jr8Rb1ktLEsVcKAPa08wCsKUmvoQ8khiOl_SLozf9IN1ZHCCIyg", + // Nimbus bootnodes + "enr:-LK4QA8FfhaAjlb_BXsXxSfiysR7R52Nhi9JBt4F8SPssu8hdE1BXQQEtVDC3qStCW60LSO7hEsVHv5zm8_6Vnjhcn0Bh2F0dG5ldHOIAAAAAAAAAACEZXRoMpC1MD8qAAAAAP__________gmlkgnY0gmlwhAN4aBKJc2VjcDI1NmsxoQJerDhsJ-KxZ8sHySMOCmTO6sHM3iCFQ6VMvLTe948MyYN0Y3CCI4yDdWRwgiOM", + "enr:-LK4QKWrXTpV9T78hNG6s8AM6IO4XH9kFT91uZtFg1GcsJ6dKovDOr1jtAAFPnS2lvNltkOGA9k29BUN7lFh_sjuc9QBh2F0dG5ldHOIAAAAAAAAAACEZXRoMpC1MD8qAAAAAP__________gmlkgnY0gmlwhANAdd-Jc2VjcDI1NmsxoQLQa6ai7y9PMN5hpLe5HmiJSlYzMuzP7ZhwRiwHvqNXdoN0Y3CCI4yDdWRwgiOM", + } + GoerliBootstrapNodes = []string{ + "enr:-Ku4QFmUkNp0g9bsLX2PfVeIyT-9WO-PZlrqZBNtEyofOOfLMScDjaTzGxIb1Ns9Wo5Pm_8nlq-SZwcQfTH2cgO-s88Bh2F0dG5ldHOIAAAAAAAAAACEZXRoMpDkvpOTAAAQIP__________gmlkgnY0gmlwhBLf22SJc2VjcDI1NmsxoQLV_jMOIxKbjHFKgrkFvwDvpexo6Nd58TK5k7ss4Vt0IoN1ZHCCG1g", + "enr:-LK4QH1xnjotgXwg25IDPjrqRGFnH1ScgNHA3dv1Z8xHCp4uP3N3Jjl_aYv_WIxQRdwZvSukzbwspXZ7JjpldyeVDzMCh2F0dG5ldHOIAAAAAAAAAACEZXRoMpB53wQoAAAQIP__________gmlkgnY0gmlwhIe1te-Jc2VjcDI1NmsxoQOkcGXqbCJYbcClZ3z5f6NWhX_1YPFRYRRWQpJjwSHpVIN0Y3CCIyiDdWRwgiMo", + "enr:-Ly4QFPk-cTMxZ3jWTafiNblEZkQIXGF2aVzCIGW0uHp6KaEAvBMoctE8S7YU0qZtuS7By0AA4YMfKoN9ls_GJRccVpFh2F0dG5ldHOI__________-EZXRoMpCC9KcrAgAQIIS2AQAAAAAAgmlkgnY0gmlwhKh3joWJc2VjcDI1NmsxoQKrxz8M1IHwJqRIpDqdVW_U1PeixMW5SfnBD-8idYIQrIhzeW5jbmV0cw-DdGNwgiMog3VkcIIjKA", + "enr:-L64QJmwSDtaHVgGiqIxJWUtxWg6uLCipsms6j-8BdsOJfTWAs7CLF9HJnVqFE728O-JYUDCxzKvRdeMqBSauHVCMdaCAVWHYXR0bmV0c4j__________4RldGgykIL0pysCABAghLYBAAAAAACCaWSCdjSCaXCEQWxOdolzZWNwMjU2azGhA7Qmod9fK86WidPOzLsn5_8QyzL7ZcJ1Reca7RnD54vuiHN5bmNuZXRzD4N0Y3CCIyiDdWRwgiMo", + "enr:-KG4QCIzJZTY_fs_2vqWEatJL9RrtnPwDCv-jRBuO5FQ2qBrfJubWOWazri6s9HsyZdu-fRUfEzkebhf1nvO42_FVzwDhGV0aDKQed8EKAAAECD__________4JpZIJ2NIJpcISHtbYziXNlY3AyNTZrMaED4m9AqVs6F32rSCGsjtYcsyfQE2K8nDiGmocUY_iq-TSDdGNwgiMog3VkcIIjKA", + } + + SepoliaBootstrapNodes = []string{ + // EF boot nodes + "enr:-Iq4QMCTfIMXnow27baRUb35Q8iiFHSIDBJh6hQM5Axohhf4b6Kr_cOCu0htQ5WvVqKvFgY28893DHAg8gnBAXsAVqmGAX53x8JggmlkgnY0gmlwhLKAlv6Jc2VjcDI1NmsxoQK6S-Cii_KmfFdUJL2TANL3ksaKUnNXvTCv1tLwXs0QgIN1ZHCCIyk", + "enr:-KG4QE5OIg5ThTjkzrlVF32WT_-XT14WeJtIz2zoTqLLjQhYAmJlnk4ItSoH41_2x0RX0wTFIe5GgjRzU2u7Q1fN4vADhGV0aDKQqP7o7pAAAHAyAAAAAAAAAIJpZIJ2NIJpcISlFsStiXNlY3AyNTZrMaEC-Rrd_bBZwhKpXzFCrStKp1q_HmGOewxY3KwM8ofAj_ODdGNwgiMog3VkcIIjKA", + // Teku boot node + "enr:-Ly4QFoZTWR8ulxGVsWydTNGdwEESueIdj-wB6UmmjUcm-AOPxnQi7wprzwcdo7-1jBW_JxELlUKJdJES8TDsbl1EdNlh2F0dG5ldHOI__78_v2bsV-EZXRoMpA2-lATkAAAcf__________gmlkgnY0gmlwhBLYJjGJc2VjcDI1NmsxoQI0gujXac9rMAb48NtMqtSTyHIeNYlpjkbYpWJw46PmYYhzeW5jbmV0cw-DdGNwgiMog3VkcIIjKA", + } +) + +type NetworkConfig struct { + GossipMaxSize uint64 `json:"gossip_max_size"` // The maximum allowed size of uncompressed gossip messages. + GossipMaxSizeBellatrix uint64 `json:"gossip_max_size_bellatrix"` // The maximum allowed size of bellatrix uncompressed gossip messages. + MaxRequestBlocks uint64 `json:"max_request_blocks"` // Maximum number of blocks in a single request + MinEpochsForBlockRequests uint64 `json:"min_epochs_for_block_requests"` // The minimum epoch range over which a node must serve blocks + MaxChunkSize uint64 `json:"max_chunk_size"` // The maximum allowed size of uncompressed req/resp chunked responses. + AttestationSubnetCount uint64 `json:"attestation_subnet_count"` // The number of attestation subnets used in the gossipsub protocol. + TtfbTimeout time.Duration `json:"ttfbt_timeout"` // The maximum time to wait for first byte of request response (time-to-first-byte). + RespTimeout time.Duration `json:"resp_timeout"` // The maximum time for complete response transfer. + AttestationPropagationSlotRange uint64 `json:"attestation_propagation_slot_range"` // The maximum number of slots during which an attestation can be propagated. + MaximumGossipClockDisparity time.Duration `json:"maximum_gossip_clock_disparity"` // The maximum milliseconds of clock disparity assumed between honest nodes. + MessageDomainInvalidSnappy [4]byte `json:"message_domain_invalid_snappy"` // 4-byte domain for gossip message-id isolation of invalid snappy messages + MessageDomainValidSnappy [4]byte `json:"message_domain_valid_snappy"` // 4-byte domain for gossip message-id isolation of valid snappy messages + + // DiscoveryV5 Config + Eth2key string // ETH2Key is the ENR key of the Ethereum consensus object in an enr. + AttSubnetKey string // AttSubnetKey is the ENR key of the subnet bitfield in the enr. + SyncCommsSubnetKey string // SyncCommsSubnetKey is the ENR key of the sync committee subnet bitfield in the enr. + MinimumPeersInSubnetSearch uint64 // PeersInSubnetSearch is the required amount of peers that we need to be able to lookup in a subnet search. + + ContractDeploymentBlock uint64 // the eth1 block in which the deposit contract is deployed. + BootNodes []string +} + +type GenesisConfig struct { + GenesisValidatorRoot common.Hash // Merkle Root at Genesis + GenesisTime uint64 // Unix time at Genesis +} + +var NetworkConfigs map[NetworkType]NetworkConfig = map[NetworkType]NetworkConfig{ + MainnetNetwork: { + GossipMaxSize: 1 << 20, // 1 MiB + GossipMaxSizeBellatrix: 10485760, + MaxChunkSize: MaxChunkSize, + AttestationSubnetCount: 64, + AttestationPropagationSlotRange: 32, + MaxRequestBlocks: 1 << 10, // 1024 + TtfbTimeout: ReqTimeout, + RespTimeout: RespTimeout, + MaximumGossipClockDisparity: 500 * time.Millisecond, + MessageDomainInvalidSnappy: [4]byte{00, 00, 00, 00}, + MessageDomainValidSnappy: [4]byte{01, 00, 00, 00}, + Eth2key: "eth2", + AttSubnetKey: "attnets", + SyncCommsSubnetKey: "syncnets", + MinimumPeersInSubnetSearch: 20, + ContractDeploymentBlock: 11184524, + BootNodes: MainnetBootstrapNodes, + }, + + SepoliaNetwork: { + GossipMaxSize: 1 << 20, // 1 MiB + GossipMaxSizeBellatrix: 10485760, + MaxChunkSize: 1 << 20, // 1 MiB + AttestationSubnetCount: 64, + AttestationPropagationSlotRange: 32, + MaxRequestBlocks: 1 << 10, // 1024 + TtfbTimeout: ReqTimeout, + RespTimeout: RespTimeout, + MaximumGossipClockDisparity: 500 * time.Millisecond, + MessageDomainInvalidSnappy: [4]byte{00, 00, 00, 00}, + MessageDomainValidSnappy: [4]byte{01, 00, 00, 00}, + Eth2key: "eth2", + AttSubnetKey: "attnets", + SyncCommsSubnetKey: "syncnets", + MinimumPeersInSubnetSearch: 20, + ContractDeploymentBlock: 1273020, + BootNodes: SepoliaBootstrapNodes, + }, + + GoerliNetwork: { + GossipMaxSize: 1 << 20, // 1 MiB + GossipMaxSizeBellatrix: 10485760, + MaxChunkSize: 1 << 20, // 1 MiB + AttestationSubnetCount: 64, + AttestationPropagationSlotRange: 32, + MaxRequestBlocks: 1 << 10, // 1024 + TtfbTimeout: ReqTimeout, + RespTimeout: RespTimeout, + MaximumGossipClockDisparity: 500 * time.Millisecond, + MessageDomainInvalidSnappy: [4]byte{00, 00, 00, 00}, + MessageDomainValidSnappy: [4]byte{01, 00, 00, 00}, + Eth2key: "eth2", + AttSubnetKey: "attnets", + SyncCommsSubnetKey: "syncnets", + MinimumPeersInSubnetSearch: 20, + ContractDeploymentBlock: 4367322, + BootNodes: GoerliBootstrapNodes, + }, +} + +var GenesisConfigs map[NetworkType]GenesisConfig = map[NetworkType]GenesisConfig{ + MainnetNetwork: { + GenesisValidatorRoot: common.HexToHash("4b363db94e286120d76eb905340fdd4e54bfe9f06bf33ff6cf5ad27f511bfe95"), + GenesisTime: 1606824023, + }, + SepoliaNetwork: { + GenesisValidatorRoot: common.HexToHash("d8ea171f3c94aea21ebc42a1ed61052acf3f9209c00e4efbaaddac09ed9b8078"), + GenesisTime: 1655733600, + }, + GoerliNetwork: { + GenesisValidatorRoot: common.HexToHash("043db0d9a83813551ee2f33450d23797757d430911a9320530ad8a0eabc43efb"), + GenesisTime: 1616508000, + }, +} + +// BeaconChainConfig contains constant configs for node to participate in beacon chain. +type BeaconChainConfig struct { + // Constants (non-configurable) + GenesisSlot uint64 `yaml:"GENESIS_SLOT"` // GenesisSlot represents the first canonical slot number of the beacon chain. + GenesisEpoch uint64 `yaml:"GENESIS_EPOCH"` // GenesisEpoch represents the first canonical epoch number of the beacon chain. + FarFutureEpoch uint64 `yaml:"FAR_FUTURE_EPOCH"` // FarFutureEpoch represents a epoch extremely far away in the future used as the default penalization epoch for validators. + FarFutureSlot uint64 `yaml:"FAR_FUTURE_SLOT"` // FarFutureSlot represents a slot extremely far away in the future. + BaseRewardsPerEpoch uint64 `yaml:"BASE_REWARDS_PER_EPOCH"` // BaseRewardsPerEpoch is used to calculate the per epoch rewards. + DepositContractTreeDepth uint64 `yaml:"DEPOSIT_CONTRACT_TREE_DEPTH"` // DepositContractTreeDepth depth of the Merkle trie of deposits in the validator deposit contract on the PoW chain. + JustificationBitsLength uint64 `yaml:"JUSTIFICATION_BITS_LENGTH"` // JustificationBitsLength defines number of epochs to track when implementing k-finality in Casper FFG. + + // Misc constants. + PresetBase string `yaml:"PRESET_BASE" spec:"true"` // PresetBase represents the underlying spec preset this config is based on. + ConfigName string `yaml:"CONFIG_NAME" spec:"true"` // ConfigName for allowing an easy human-readable way of knowing what chain is being used. + TargetCommitteeSize uint64 `yaml:"TARGET_COMMITTEE_SIZE" spec:"true"` // TargetCommitteeSize is the number of validators in a committee when the chain is healthy. + MaxValidatorsPerCommittee uint64 `yaml:"MAX_VALIDATORS_PER_COMMITTEE" spec:"true"` // MaxValidatorsPerCommittee defines the upper bound of the size of a committee. + MaxCommitteesPerSlot uint64 `yaml:"MAX_COMMITTEES_PER_SLOT" spec:"true"` // MaxCommitteesPerSlot defines the max amount of committee in a single slot. + MinPerEpochChurnLimit uint64 `yaml:"MIN_PER_EPOCH_CHURN_LIMIT" spec:"true"` // MinPerEpochChurnLimit is the minimum amount of churn allotted for validator rotations. + ChurnLimitQuotient uint64 `yaml:"CHURN_LIMIT_QUOTIENT" spec:"true"` // ChurnLimitQuotient is used to determine the limit of how many validators can rotate per epoch. + ShuffleRoundCount uint64 `yaml:"SHUFFLE_ROUND_COUNT" spec:"true"` // ShuffleRoundCount is used for retrieving the permuted index. + MinGenesisActiveValidatorCount uint64 `yaml:"MIN_GENESIS_ACTIVE_VALIDATOR_COUNT" spec:"true"` // MinGenesisActiveValidatorCount defines how many validator deposits needed to kick off beacon chain. + MinGenesisTime uint64 `yaml:"MIN_GENESIS_TIME" spec:"true"` // MinGenesisTime is the time that needed to pass before kicking off beacon chain. + TargetAggregatorsPerCommittee uint64 `yaml:"TARGET_AGGREGATORS_PER_COMMITTEE" spec:"true"` // TargetAggregatorsPerCommittee defines the number of aggregators inside one committee. + HysteresisQuotient uint64 `yaml:"HYSTERESIS_QUOTIENT" spec:"true"` // HysteresisQuotient defines the hysteresis quotient for effective balance calculations. + HysteresisDownwardMultiplier uint64 `yaml:"HYSTERESIS_DOWNWARD_MULTIPLIER" spec:"true"` // HysteresisDownwardMultiplier defines the hysteresis downward multiplier for effective balance calculations. + HysteresisUpwardMultiplier uint64 `yaml:"HYSTERESIS_UPWARD_MULTIPLIER" spec:"true"` // HysteresisUpwardMultiplier defines the hysteresis upward multiplier for effective balance calculations. + + // Gwei value constants. + MinDepositAmount uint64 `yaml:"MIN_DEPOSIT_AMOUNT" spec:"true"` // MinDepositAmount is the minimum amount of Gwei a validator can send to the deposit contract at once (lower amounts will be reverted). + MaxEffectiveBalance uint64 `yaml:"MAX_EFFECTIVE_BALANCE" spec:"true"` // MaxEffectiveBalance is the maximal amount of Gwei that is effective for staking. + EjectionBalance uint64 `yaml:"EJECTION_BALANCE" spec:"true"` // EjectionBalance is the minimal GWei a validator needs to have before ejected. + EffectiveBalanceIncrement uint64 `yaml:"EFFECTIVE_BALANCE_INCREMENT" spec:"true"` // EffectiveBalanceIncrement is used for converting the high balance into the low balance for validators. + + // Initial value constants. + BLSWithdrawalPrefixByte byte `yaml:"BLS_WITHDRAWAL_PREFIX" spec:"true"` // BLSWithdrawalPrefixByte is used for BLS withdrawal and it's the first byte. + ETH1AddressWithdrawalPrefixByte byte `yaml:"ETH1_ADDRESS_WITHDRAWAL_PREFIX" spec:"true"` // ETH1AddressWithdrawalPrefixByte is used for withdrawals and it's the first byte. + ZeroHash [32]byte // ZeroHash is used to represent a zeroed out 32 byte array. + + // Time parameters constants. + GenesisDelay uint64 `yaml:"GENESIS_DELAY" spec:"true"` // GenesisDelay is the minimum number of seconds to delay starting the Ethereum Beacon Chain genesis. Must be at least 1 second. + MinAttestationInclusionDelay uint64 `yaml:"MIN_ATTESTATION_INCLUSION_DELAY" spec:"true"` // MinAttestationInclusionDelay defines how many slots validator has to wait to include attestation for beacon block. + SecondsPerSlot uint64 `yaml:"SECONDS_PER_SLOT" spec:"true"` // SecondsPerSlot is how many seconds are in a single slot. + SlotsPerEpoch uint64 `yaml:"SLOTS_PER_EPOCH" spec:"true"` // SlotsPerEpoch is the number of slots in an epoch. + SqrRootSlotsPerEpoch uint64 // SqrRootSlotsPerEpoch is a hard coded value where we take the square root of `SlotsPerEpoch` and round down. + MinSeedLookahead uint64 `yaml:"MIN_SEED_LOOKAHEAD" spec:"true"` // MinSeedLookahead is the duration of randao look ahead seed. + MaxSeedLookahead uint64 `yaml:"MAX_SEED_LOOKAHEAD" spec:"true"` // MaxSeedLookahead is the duration a validator has to wait for entry and exit in epoch. + EpochsPerEth1VotingPeriod uint64 `yaml:"EPOCHS_PER_ETH1_VOTING_PERIOD" spec:"true"` // EpochsPerEth1VotingPeriod defines how often the merkle root of deposit receipts get updated in beacon node on per epoch basis. + SlotsPerHistoricalRoot uint64 `yaml:"SLOTS_PER_HISTORICAL_ROOT" spec:"true"` // SlotsPerHistoricalRoot defines how often the historical root is saved. + MinValidatorWithdrawabilityDelay uint64 `yaml:"MIN_VALIDATOR_WITHDRAWABILITY_DELAY" spec:"true"` // MinValidatorWithdrawabilityDelay is the shortest amount of time a validator has to wait to withdraw. + ShardCommitteePeriod uint64 `yaml:"SHARD_COMMITTEE_PERIOD" spec:"true"` // ShardCommitteePeriod is the minimum amount of epochs a validator must participate before exiting. + MinEpochsToInactivityPenalty uint64 `yaml:"MIN_EPOCHS_TO_INACTIVITY_PENALTY" spec:"true"` // MinEpochsToInactivityPenalty defines the minimum amount of epochs since finality to begin penalizing inactivity. + Eth1FollowDistance uint64 `yaml:"ETH1_FOLLOW_DISTANCE" spec:"true"` // Eth1FollowDistance is the number of eth1.0 blocks to wait before considering a new deposit for voting. This only applies after the chain as been started. + SafeSlotsToUpdateJustified uint64 `yaml:"SAFE_SLOTS_TO_UPDATE_JUSTIFIED" spec:"true"` // SafeSlotsToUpdateJustified is the minimal slots needed to update justified check point. + DeprecatedSafeSlotsToImportOptimistically uint64 `yaml:"SAFE_SLOTS_TO_IMPORT_OPTIMISTICALLY" spec:"true"` // SafeSlotsToImportOptimistically is the minimal number of slots to wait before importing optimistically a pre-merge block + SecondsPerETH1Block uint64 `yaml:"SECONDS_PER_ETH1_BLOCK" spec:"true"` // SecondsPerETH1Block is the approximate time for a single eth1 block to be produced. + + // Fork choice algorithm constants. + ProposerScoreBoost uint64 `yaml:"PROPOSER_SCORE_BOOST" spec:"true"` // ProposerScoreBoost defines a value that is a % of the committee weight for fork-choice boosting. + IntervalsPerSlot uint64 `yaml:"INTERVALS_PER_SLOT" spec:"true"` // IntervalsPerSlot defines the number of fork choice intervals in a slot defined in the fork choice spec. + + // Ethereum PoW parameters. + DepositChainID uint64 `yaml:"DEPOSIT_CHAIN_ID" spec:"true"` // DepositChainID of the eth1 network. This used for replay protection. + DepositNetworkID uint64 `yaml:"DEPOSIT_NETWORK_ID" spec:"true"` // DepositNetworkID of the eth1 network. This used for replay protection. + DepositContractAddress string `yaml:"DEPOSIT_CONTRACT_ADDRESS" spec:"true"` // DepositContractAddress is the address of the deposit contract. + + // Validator parameters. + RandomSubnetsPerValidator uint64 `yaml:"RANDOM_SUBNETS_PER_VALIDATOR" spec:"true"` // RandomSubnetsPerValidator specifies the amount of subnets a validator has to be subscribed to at one time. + EpochsPerRandomSubnetSubscription uint64 `yaml:"EPOCHS_PER_RANDOM_SUBNET_SUBSCRIPTION" spec:"true"` // EpochsPerRandomSubnetSubscription specifies the minimum duration a validator is connected to their subnet. + + // State list lengths + EpochsPerHistoricalVector uint64 `yaml:"EPOCHS_PER_HISTORICAL_VECTOR" spec:"true"` // EpochsPerHistoricalVector defines max length in epoch to store old historical stats in beacon state. + EpochsPerSlashingsVector uint64 `yaml:"EPOCHS_PER_SLASHINGS_VECTOR" spec:"true"` // EpochsPerSlashingsVector defines max length in epoch to store old stats to recompute slashing witness. + HistoricalRootsLimit uint64 `yaml:"HISTORICAL_ROOTS_LIMIT" spec:"true"` // HistoricalRootsLimit defines max historical roots that can be saved in state before roll over. + ValidatorRegistryLimit uint64 `yaml:"VALIDATOR_REGISTRY_LIMIT" spec:"true"` // ValidatorRegistryLimit defines the upper bound of validators can participate in eth2. + + // Reward and penalty quotients constants. + BaseRewardFactor uint64 `yaml:"BASE_REWARD_FACTOR" spec:"true"` // BaseRewardFactor is used to calculate validator per-slot interest rate. + WhistleBlowerRewardQuotient uint64 `yaml:"WHISTLEBLOWER_REWARD_QUOTIENT" spec:"true"` // WhistleBlowerRewardQuotient is used to calculate whistle blower reward. + ProposerRewardQuotient uint64 `yaml:"PROPOSER_REWARD_QUOTIENT" spec:"true"` // ProposerRewardQuotient is used to calculate the reward for proposers. + InactivityPenaltyQuotient uint64 `yaml:"INACTIVITY_PENALTY_QUOTIENT" spec:"true"` // InactivityPenaltyQuotient is used to calculate the penalty for a validator that is offline. + MinSlashingPenaltyQuotient uint64 `yaml:"MIN_SLASHING_PENALTY_QUOTIENT" spec:"true"` // MinSlashingPenaltyQuotient is used to calculate the minimum penalty to prevent DoS attacks. + ProportionalSlashingMultiplier uint64 `yaml:"PROPORTIONAL_SLASHING_MULTIPLIER" spec:"true"` // ProportionalSlashingMultiplier is used as a multiplier on slashed penalties. + + // Max operations per block constants. + MaxProposerSlashings uint64 `yaml:"MAX_PROPOSER_SLASHINGS" spec:"true"` // MaxProposerSlashings defines the maximum number of slashings of proposers possible in a block. + MaxAttesterSlashings uint64 `yaml:"MAX_ATTESTER_SLASHINGS" spec:"true"` // MaxAttesterSlashings defines the maximum number of casper FFG slashings possible in a block. + MaxAttestations uint64 `yaml:"MAX_ATTESTATIONS" spec:"true"` // MaxAttestations defines the maximum allowed attestations in a beacon block. + MaxDeposits uint64 `yaml:"MAX_DEPOSITS" spec:"true"` // MaxDeposits defines the maximum number of validator deposits in a block. + MaxVoluntaryExits uint64 `yaml:"MAX_VOLUNTARY_EXITS" spec:"true"` // MaxVoluntaryExits defines the maximum number of validator exits in a block. + + // BLS domain values. + DomainBeaconProposer [4]byte `yaml:"DOMAIN_BEACON_PROPOSER" spec:"true"` // DomainBeaconProposer defines the BLS signature domain for beacon proposal verification. + DomainRandao [4]byte `yaml:"DOMAIN_RANDAO" spec:"true"` // DomainRandao defines the BLS signature domain for randao verification. + DomainBeaconAttester [4]byte `yaml:"DOMAIN_BEACON_ATTESTER" spec:"true"` // DomainBeaconAttester defines the BLS signature domain for attestation verification. + DomainDeposit [4]byte `yaml:"DOMAIN_DEPOSIT" spec:"true"` // DomainDeposit defines the BLS signature domain for deposit verification. + DomainVoluntaryExit [4]byte `yaml:"DOMAIN_VOLUNTARY_EXIT" spec:"true"` // DomainVoluntaryExit defines the BLS signature domain for exit verification. + DomainSelectionProof [4]byte `yaml:"DOMAIN_SELECTION_PROOF" spec:"true"` // DomainSelectionProof defines the BLS signature domain for selection proof. + DomainAggregateAndProof [4]byte `yaml:"DOMAIN_AGGREGATE_AND_PROOF" spec:"true"` // DomainAggregateAndProof defines the BLS signature domain for aggregate and proof. + DomainSyncCommittee [4]byte `yaml:"DOMAIN_SYNC_COMMITTEE" spec:"true"` // DomainVoluntaryExit defines the BLS signature domain for sync committee. + DomainSyncCommitteeSelectionProof [4]byte `yaml:"DOMAIN_SYNC_COMMITTEE_SELECTION_PROOF" spec:"true"` // DomainSelectionProof defines the BLS signature domain for sync committee selection proof. + DomainContributionAndProof [4]byte `yaml:"DOMAIN_CONTRIBUTION_AND_PROOF" spec:"true"` // DomainAggregateAndProof defines the BLS signature domain for contribution and proof. + DomainApplicationMask [4]byte `yaml:"DOMAIN_APPLICATION_MASK" spec:"true"` // DomainApplicationMask defines the BLS signature domain for application mask. + DomainApplicationBuilder [4]byte // DomainApplicationBuilder defines the BLS signature domain for application builder. + DomainBLSToExecutionChange [4]byte // DomainBLSToExecutionChange defines the BLS signature domain to change withdrawal addresses to ETH1 prefix + + // Prysm constants. + GweiPerEth uint64 // GweiPerEth is the amount of gwei corresponding to 1 eth. + BLSSecretKeyLength int // BLSSecretKeyLength defines the expected length of BLS secret keys in bytes. + BLSPubkeyLength int // BLSPubkeyLength defines the expected length of BLS public keys in bytes. + DefaultBufferSize int // DefaultBufferSize for channels across the Prysm repository. + ValidatorPrivkeyFileName string // ValidatorPrivKeyFileName specifies the string name of a validator private key file. + WithdrawalPrivkeyFileName string // WithdrawalPrivKeyFileName specifies the string name of a withdrawal private key file. + RPCSyncCheck time.Duration // Number of seconds to query the sync service, to find out if the node is synced or not. + EmptySignature [96]byte // EmptySignature is used to represent a zeroed out BLS Signature. + DefaultPageSize int // DefaultPageSize defines the default page size for RPC server request. + MaxPeersToSync int // MaxPeersToSync describes the limit for number of peers in round robin sync. + SlotsPerArchivedPoint uint64 // SlotsPerArchivedPoint defines the number of slots per one archived point. + GenesisCountdownInterval time.Duration // How often to log the countdown until the genesis time is reached. + BeaconStateFieldCount int // BeaconStateFieldCount defines how many fields are in beacon state. + BeaconStateAltairFieldCount int // BeaconStateAltairFieldCount defines how many fields are in beacon state hard fork 1. + BeaconStateBellatrixFieldCount int // BeaconStateBellatrixFieldCount defines how many fields are in beacon state post upgrade to the Bellatrix. + + // Slasher constants. + WeakSubjectivityPeriod uint64 // WeakSubjectivityPeriod defines the time period expressed in number of epochs were proof of stake network should validate block headers and attestations for slashable events. + PruneSlasherStoragePeriod uint64 // PruneSlasherStoragePeriod defines the time period expressed in number of epochs were proof of stake network should prune attestation and block header store. + + // Slashing protection constants. + SlashingProtectionPruningEpochs uint64 // SlashingProtectionPruningEpochs defines a period after which all prior epochs are pruned in the validator database. + + // Fork-related values. + GenesisForkVersion []byte `yaml:"GENESIS_FORK_VERSION" spec:"true"` // GenesisForkVersion is used to track fork version between state transitions. + AltairForkVersion []byte `yaml:"ALTAIR_FORK_VERSION" spec:"true"` // AltairForkVersion is used to represent the fork version for altair. + AltairForkEpoch uint64 `yaml:"ALTAIR_FORK_EPOCH" spec:"true"` // AltairForkEpoch is used to represent the assigned fork epoch for altair. + BellatrixForkVersion []byte `yaml:"BELLATRIX_FORK_VERSION" spec:"true"` // BellatrixForkVersion is used to represent the fork version for bellatrix. + BellatrixForkEpoch uint64 `yaml:"BELLATRIX_FORK_EPOCH" spec:"true"` // BellatrixForkEpoch is used to represent the assigned fork epoch for bellatrix. + ShardingForkVersion []byte `yaml:"SHARDING_FORK_VERSION" spec:"true"` // ShardingForkVersion is used to represent the fork version for sharding. + ShardingForkEpoch uint64 `yaml:"SHARDING_FORK_EPOCH" spec:"true"` // ShardingForkEpoch is used to represent the assigned fork epoch for sharding. + CapellaForkVersion []byte `yaml:"CAPELLA_FORK_VERSION" spec:"true"` // CapellaForkVersion is used to represent the fork version for capella. + CapellaForkEpoch uint64 `yaml:"CAPELLA_FORK_EPOCH" spec:"true"` // CapellaForkEpoch is used to represent the assigned fork epoch for capella. + + ForkVersionSchedule map[[VersionLength]byte]uint64 // Schedule of fork epochs by version. + ForkVersionNames map[[VersionLength]byte]string // Human-readable names of fork versions. + + // Weak subjectivity values. + SafetyDecay uint64 // SafetyDecay is defined as the loss in the 1/3 consensus safety margin of the casper FFG mechanism. + + // New values introduced in Altair hard fork 1. + // Participation flag indices. + TimelySourceFlagIndex uint8 `yaml:"TIMELY_SOURCE_FLAG_INDEX" spec:"true"` // TimelySourceFlagIndex is the source flag position of the participation bits. + TimelyTargetFlagIndex uint8 `yaml:"TIMELY_TARGET_FLAG_INDEX" spec:"true"` // TimelyTargetFlagIndex is the target flag position of the participation bits. + TimelyHeadFlagIndex uint8 `yaml:"TIMELY_HEAD_FLAG_INDEX" spec:"true"` // TimelyHeadFlagIndex is the head flag position of the participation bits. + + // Incentivization weights. + TimelySourceWeight uint64 `yaml:"TIMELY_SOURCE_WEIGHT" spec:"true"` // TimelySourceWeight is the factor of how much source rewards receives. + TimelyTargetWeight uint64 `yaml:"TIMELY_TARGET_WEIGHT" spec:"true"` // TimelyTargetWeight is the factor of how much target rewards receives. + TimelyHeadWeight uint64 `yaml:"TIMELY_HEAD_WEIGHT" spec:"true"` // TimelyHeadWeight is the factor of how much head rewards receives. + SyncRewardWeight uint64 `yaml:"SYNC_REWARD_WEIGHT" spec:"true"` // SyncRewardWeight is the factor of how much sync committee rewards receives. + WeightDenominator uint64 `yaml:"WEIGHT_DENOMINATOR" spec:"true"` // WeightDenominator accounts for total rewards denomination. + ProposerWeight uint64 `yaml:"PROPOSER_WEIGHT" spec:"true"` // ProposerWeight is the factor of how much proposer rewards receives. + + // Validator related. + TargetAggregatorsPerSyncSubcommittee uint64 `yaml:"TARGET_AGGREGATORS_PER_SYNC_SUBCOMMITTEE" spec:"true"` // TargetAggregatorsPerSyncSubcommittee for aggregating in sync committee. + SyncCommitteeSubnetCount uint64 `yaml:"SYNC_COMMITTEE_SUBNET_COUNT" spec:"true"` // SyncCommitteeSubnetCount for sync committee subnet count. + + // Misc. + SyncCommitteeSize uint64 `yaml:"SYNC_COMMITTEE_SIZE" spec:"true"` // SyncCommitteeSize for light client sync committee size. + InactivityScoreBias uint64 `yaml:"INACTIVITY_SCORE_BIAS" spec:"true"` // InactivityScoreBias for calculating score bias penalties during inactivity + InactivityScoreRecoveryRate uint64 `yaml:"INACTIVITY_SCORE_RECOVERY_RATE" spec:"true"` // InactivityScoreRecoveryRate for recovering score bias penalties during inactivity. + EpochsPerSyncCommitteePeriod uint64 `yaml:"EPOCHS_PER_SYNC_COMMITTEE_PERIOD" spec:"true"` // EpochsPerSyncCommitteePeriod defines how many epochs per sync committee period. + + // Updated penalty values. This moves penalty parameters toward their final, maximum security values. + // Note: We do not override previous configuration values but instead creates new values and replaces usage throughout. + InactivityPenaltyQuotientAltair uint64 `yaml:"INACTIVITY_PENALTY_QUOTIENT_ALTAIR" spec:"true"` // InactivityPenaltyQuotientAltair for penalties during inactivity post Altair hard fork. + MinSlashingPenaltyQuotientAltair uint64 `yaml:"MIN_SLASHING_PENALTY_QUOTIENT_ALTAIR" spec:"true"` // MinSlashingPenaltyQuotientAltair for slashing penalties post Altair hard fork. + ProportionalSlashingMultiplierAltair uint64 `yaml:"PROPORTIONAL_SLASHING_MULTIPLIER_ALTAIR" spec:"true"` // ProportionalSlashingMultiplierAltair for slashing penalties' multiplier post Alair hard fork. + MinSlashingPenaltyQuotientBellatrix uint64 `yaml:"MIN_SLASHING_PENALTY_QUOTIENT_BELLATRIX" spec:"true"` // MinSlashingPenaltyQuotientBellatrix for slashing penalties post Bellatrix hard fork. + ProportionalSlashingMultiplierBellatrix uint64 `yaml:"PROPORTIONAL_SLASHING_MULTIPLIER_BELLATRIX" spec:"true"` // ProportionalSlashingMultiplierBellatrix for slashing penalties' multiplier post Bellatrix hard fork. + InactivityPenaltyQuotientBellatrix uint64 `yaml:"INACTIVITY_PENALTY_QUOTIENT_BELLATRIX" spec:"true"` // InactivityPenaltyQuotientBellatrix for penalties during inactivity post Bellatrix hard fork. + + // Light client + MinSyncCommitteeParticipants uint64 `yaml:"MIN_SYNC_COMMITTEE_PARTICIPANTS" spec:"true"` // MinSyncCommitteeParticipants defines the minimum amount of sync committee participants for which the light client acknowledges the signature. + + // Bellatrix + TerminalBlockHash common.Hash `yaml:"TERMINAL_BLOCK_HASH" spec:"true"` // TerminalBlockHash of beacon chain. + TerminalBlockHashActivationEpoch uint64 `yaml:"TERMINAL_BLOCK_HASH_ACTIVATION_EPOCH" spec:"true"` // TerminalBlockHashActivationEpoch of beacon chain. + TerminalTotalDifficulty string `yaml:"TERMINAL_TOTAL_DIFFICULTY" spec:"true"` // TerminalTotalDifficulty is part of the experimental Bellatrix spec. This value is type is currently TBD. + DefaultFeeRecipient common.Address // DefaultFeeRecipient where the transaction fee goes to. + EthBurnAddressHex string // EthBurnAddressHex is the constant eth address written in hex format to burn fees in that network. the default is 0x0 + DefaultBuilderGasLimit uint64 // DefaultBuilderGasLimit is the default used to set the gaslimit for the Builder APIs, typically at around 30M wei. + + // Mev-boost circuit breaker + MaxBuilderConsecutiveMissedSlots uint64 // MaxBuilderConsecutiveMissedSlots defines the number of consecutive skip slot to fallback from using relay/builder to local execution engine for block construction. + MaxBuilderEpochMissedSlots uint64 // MaxBuilderEpochMissedSlots is defines the number of total skip slot (per epoch rolling windows) to fallback from using relay/builder to local execution engine for block construction. +} + +// InitializeForkSchedule initializes the schedules forks baked into the config. +func (b *BeaconChainConfig) InitializeForkSchedule() { + b.ForkVersionSchedule = configForkSchedule(b) + b.ForkVersionNames = configForkNames(b) +} + +func toBytes4(in []byte) (ret [4]byte) { + copy(ret[:], in) + return +} + +func configForkSchedule(b *BeaconChainConfig) map[[VersionLength]byte]uint64 { + fvs := map[[VersionLength]byte]uint64{} + fvs[toBytes4(b.GenesisForkVersion)] = b.GenesisEpoch + fvs[toBytes4(b.AltairForkVersion)] = b.AltairForkEpoch + fvs[toBytes4(b.BellatrixForkVersion)] = b.BellatrixForkEpoch + return fvs +} + +func configForkNames(b *BeaconChainConfig) map[[VersionLength]byte]string { + fvn := map[[VersionLength]byte]string{} + fvn[toBytes4(b.GenesisForkVersion)] = "phase0" + fvn[toBytes4(b.AltairForkVersion)] = "altair" + fvn[toBytes4(b.BellatrixForkVersion)] = "bellatrix" + return fvn +} + +var mainnetBeaconConfig BeaconChainConfig = BeaconChainConfig{ + // Constants (Non-configurable) + FarFutureEpoch: math.MaxUint64, + FarFutureSlot: math.MaxUint64, + BaseRewardsPerEpoch: 4, + DepositContractTreeDepth: 32, + GenesisDelay: 604800, // 1 week. + + // Misc constant. + TargetCommitteeSize: 128, + MaxValidatorsPerCommittee: 2048, + MaxCommitteesPerSlot: 64, + MinPerEpochChurnLimit: 4, + ChurnLimitQuotient: 1 << 16, + ShuffleRoundCount: 90, + MinGenesisActiveValidatorCount: 16384, + MinGenesisTime: 1606824000, // Dec 1, 2020, 12pm UTC. + TargetAggregatorsPerCommittee: 16, + HysteresisQuotient: 4, + HysteresisDownwardMultiplier: 1, + HysteresisUpwardMultiplier: 5, + + // Gwei value constants. + MinDepositAmount: 1 * 1e9, + MaxEffectiveBalance: 32 * 1e9, + EjectionBalance: 16 * 1e9, + EffectiveBalanceIncrement: 1 * 1e9, + + // Initial value constants. + BLSWithdrawalPrefixByte: byte(0), + ETH1AddressWithdrawalPrefixByte: byte(1), + ZeroHash: [32]byte{}, + + // Time parameter constants. + MinAttestationInclusionDelay: 1, + SecondsPerSlot: 12, + SlotsPerEpoch: 32, + SqrRootSlotsPerEpoch: 5, + MinSeedLookahead: 1, + MaxSeedLookahead: 4, + EpochsPerEth1VotingPeriod: 64, + SlotsPerHistoricalRoot: 8192, + MinValidatorWithdrawabilityDelay: 256, + ShardCommitteePeriod: 256, + MinEpochsToInactivityPenalty: 4, + Eth1FollowDistance: 2048, + SafeSlotsToUpdateJustified: 8, + + // Fork choice algorithm constants. + ProposerScoreBoost: 40, + IntervalsPerSlot: 3, + + // Ethereum PoW parameters. + DepositChainID: 1, // Chain ID of eth1 mainnet. + DepositNetworkID: 1, // Network ID of eth1 mainnet. + DepositContractAddress: "0x00000000219ab540356cBB839Cbe05303d7705Fa", + + // Validator params. + RandomSubnetsPerValidator: 1 << 0, + EpochsPerRandomSubnetSubscription: 1 << 8, + + // While eth1 mainnet block times are closer to 13s, we must conform with other clients in + // order to vote on the correct eth1 blocks. + // + // Additional context: https://github.com/ethereum/consensus-specs/issues/2132 + // Bug prompting this change: https://github.com/prysmaticlabs/prysm/issues/7856 + // Future optimization: https://github.com/prysmaticlabs/prysm/issues/7739 + SecondsPerETH1Block: 14, + + // State list length constants. + EpochsPerHistoricalVector: 65536, + EpochsPerSlashingsVector: 8192, + HistoricalRootsLimit: 16777216, + ValidatorRegistryLimit: 1099511627776, + + // Reward and penalty quotients constants. + BaseRewardFactor: 64, + WhistleBlowerRewardQuotient: 512, + ProposerRewardQuotient: 8, + InactivityPenaltyQuotient: 67108864, + MinSlashingPenaltyQuotient: 128, + ProportionalSlashingMultiplier: 1, + + // Max operations per block constants. + MaxProposerSlashings: 16, + MaxAttesterSlashings: 2, + MaxAttestations: 128, + MaxDeposits: 16, + MaxVoluntaryExits: 16, + + // BLS domain values. + DomainBeaconProposer: utils.Uint32ToBytes4(0x00000000), + DomainBeaconAttester: utils.Uint32ToBytes4(0x01000000), + DomainRandao: utils.Uint32ToBytes4(0x02000000), + DomainDeposit: utils.Uint32ToBytes4(0x03000000), + DomainVoluntaryExit: utils.Uint32ToBytes4(0x04000000), + DomainSelectionProof: utils.Uint32ToBytes4(0x05000000), + DomainAggregateAndProof: utils.Uint32ToBytes4(0x06000000), + DomainSyncCommittee: utils.Uint32ToBytes4(0x07000000), + DomainSyncCommitteeSelectionProof: utils.Uint32ToBytes4(0x08000000), + DomainContributionAndProof: utils.Uint32ToBytes4(0x09000000), + DomainApplicationMask: utils.Uint32ToBytes4(0x00000001), + DomainApplicationBuilder: utils.Uint32ToBytes4(0x00000001), + DomainBLSToExecutionChange: utils.Uint32ToBytes4(0x0A000000), + + // Prysm constants. + GweiPerEth: 1000000000, + BLSSecretKeyLength: 32, + BLSPubkeyLength: 48, + DefaultBufferSize: 10000, + WithdrawalPrivkeyFileName: "/shardwithdrawalkey", + ValidatorPrivkeyFileName: "/validatorprivatekey", + RPCSyncCheck: 1, + EmptySignature: [96]byte{}, + DefaultPageSize: 250, + MaxPeersToSync: 15, + SlotsPerArchivedPoint: 2048, + GenesisCountdownInterval: time.Minute, + ConfigName: "mainnet", + PresetBase: "mainnet", + BeaconStateFieldCount: 21, + BeaconStateAltairFieldCount: 24, + BeaconStateBellatrixFieldCount: 25, + + // Slasher related values. + WeakSubjectivityPeriod: 54000, + PruneSlasherStoragePeriod: 10, + SlashingProtectionPruningEpochs: 512, + + // Weak subjectivity values. + SafetyDecay: 10, + + // Fork related values. + GenesisEpoch: 0, + GenesisForkVersion: []byte{0, 0, 0, 0}, + AltairForkVersion: []byte{1, 0, 0, 0}, + AltairForkEpoch: 74240, + BellatrixForkVersion: []byte{2, 0, 0, 0}, + BellatrixForkEpoch: 144869, + CapellaForkVersion: []byte{3, 0, 0, 0}, + CapellaForkEpoch: math.MaxUint64, + ShardingForkVersion: []byte{4, 0, 0, 0}, + ShardingForkEpoch: math.MaxUint64, + + // New values introduced in Altair hard fork 1. + // Participation flag indices. + TimelySourceFlagIndex: 0, + TimelyTargetFlagIndex: 1, + TimelyHeadFlagIndex: 2, + + // Incentivization weight values. + TimelySourceWeight: 14, + TimelyTargetWeight: 26, + TimelyHeadWeight: 14, + SyncRewardWeight: 2, + ProposerWeight: 8, + WeightDenominator: 64, + + // Validator related values. + TargetAggregatorsPerSyncSubcommittee: 16, + SyncCommitteeSubnetCount: 4, + + // Misc values. + SyncCommitteeSize: 512, + InactivityScoreBias: 4, + InactivityScoreRecoveryRate: 16, + EpochsPerSyncCommitteePeriod: 256, + + // Updated penalty values. + InactivityPenaltyQuotientAltair: 3 * 1 << 24, //50331648 + MinSlashingPenaltyQuotientAltair: 64, + ProportionalSlashingMultiplierAltair: 2, + MinSlashingPenaltyQuotientBellatrix: 32, + ProportionalSlashingMultiplierBellatrix: 3, + InactivityPenaltyQuotientBellatrix: 1 << 24, + + // Light client + MinSyncCommitteeParticipants: 1, + + // Bellatrix + TerminalBlockHashActivationEpoch: 18446744073709551615, + TerminalBlockHash: [32]byte{}, + TerminalTotalDifficulty: "58750000000000000000000", // Estimated: Sept 15, 2022 + EthBurnAddressHex: "0x0000000000000000000000000000000000000000", + DefaultBuilderGasLimit: uint64(30000000), + + // Mevboost circuit breaker + MaxBuilderConsecutiveMissedSlots: 3, + MaxBuilderEpochMissedSlots: 8, +} + +func mainnetConfig() BeaconChainConfig { + cfg := mainnetBeaconConfig + cfg.InitializeForkSchedule() + return cfg +} + +func sepoliaConfig() BeaconChainConfig { + cfg := mainnetBeaconConfig + cfg.MinGenesisTime = 1655647200 + cfg.GenesisDelay = 86400 + cfg.MinGenesisActiveValidatorCount = 1300 + cfg.ConfigName = "sepolia" + cfg.GenesisForkVersion = []byte{0x90, 0x00, 0x00, 0x69} + cfg.SecondsPerETH1Block = 14 + cfg.DepositChainID = uint64(SepoliaNetwork) + cfg.DepositNetworkID = uint64(SepoliaNetwork) + cfg.AltairForkEpoch = 50 + cfg.AltairForkVersion = []byte{0x90, 0x00, 0x00, 0x70} + cfg.BellatrixForkEpoch = 100 + cfg.BellatrixForkVersion = []byte{0x90, 0x00, 0x00, 0x71} + cfg.TerminalTotalDifficulty = "17000000000000000" + cfg.DepositContractAddress = "0x7f02C3E3c98b133055B8B348B2Ac625669Ed295D" + cfg.InitializeForkSchedule() + return cfg +} + +func goerliConfig() BeaconChainConfig { + cfg := mainnetBeaconConfig + cfg.MinGenesisTime = 1614588812 + cfg.GenesisDelay = 1919188 + cfg.ConfigName = "prater" + cfg.GenesisForkVersion = []byte{0x00, 0x00, 0x10, 0x20} + cfg.SecondsPerETH1Block = 14 + cfg.DepositChainID = uint64(GoerliNetwork) + cfg.DepositNetworkID = uint64(GoerliNetwork) + cfg.AltairForkEpoch = 36660 + cfg.AltairForkVersion = []byte{0x1, 0x0, 0x10, 0x20} + cfg.CapellaForkVersion = []byte{0x3, 0x0, 0x10, 0x20} + cfg.ShardingForkVersion = []byte{0x4, 0x0, 0x10, 0x20} + cfg.BellatrixForkEpoch = 112260 + cfg.BellatrixForkVersion = []byte{0x2, 0x0, 0x10, 0x20} + cfg.TerminalTotalDifficulty = "10790000" + cfg.DepositContractAddress = "0xff50ed3d0ec03aC01D4C79aAd74928BFF48a7b2b" + cfg.InitializeForkSchedule() + return cfg +} + +// Beacon configs +var BeaconConfigs map[NetworkType]BeaconChainConfig = map[NetworkType]BeaconChainConfig{ + MainnetNetwork: mainnetConfig(), + SepoliaNetwork: sepoliaConfig(), + GoerliNetwork: goerliConfig(), +} + +func GetConfigsByNetwork(net NetworkType) (*GenesisConfig, *NetworkConfig, *BeaconChainConfig) { + networkConfig := NetworkConfigs[net] + genesisConfig := GenesisConfigs[net] + fmt.Println(genesisConfig) + beaconConfig := BeaconConfigs[net] + return &genesisConfig, &networkConfig, &beaconConfig +} diff --git a/cmd/lightclient/clparams/config_test.go b/cmd/lightclient/clparams/config_test.go new file mode 100644 index 00000000000..a8aaaf512f9 --- /dev/null +++ b/cmd/lightclient/clparams/config_test.go @@ -0,0 +1,34 @@ +/* + Copyright 2022 Erigon-Lightclient contributors + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + http://www.apache.org/licenses/LICENSE-2.0 + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package clparams + +import ( + "testing" + + "github.com/stretchr/testify/require" +) + +func testConfig(t *testing.T, n NetworkType) { + genesis, network, beacon := GetConfigsByNetwork(MainnetNetwork) + + require.Equal(t, *genesis, GenesisConfigs[MainnetNetwork]) + require.Equal(t, *network, NetworkConfigs[MainnetNetwork]) + require.Equal(t, *beacon, BeaconConfigs[MainnetNetwork]) +} + +func TestGetConfigsByNetwork(t *testing.T) { + testConfig(t, MainnetNetwork) + testConfig(t, SepoliaNetwork) + testConfig(t, GoerliNetwork) +} diff --git a/cmd/lightclient/fork/fork.go b/cmd/lightclient/fork/fork.go new file mode 100644 index 00000000000..2bb98a9a61f --- /dev/null +++ b/cmd/lightclient/fork/fork.go @@ -0,0 +1,139 @@ +/* + Copyright 2022 Erigon-Lightclient contributors + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + http://www.apache.org/licenses/LICENSE-2.0 + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package fork + +import ( + "errors" + "math" + "sort" + "time" + + "github.com/ledgerwatch/erigon/cmd/lightclient/clparams" + "github.com/ledgerwatch/erigon/cmd/lightclient/sentinel/communication/p2p" + "github.com/ledgerwatch/erigon/cmd/lightclient/utils" + "github.com/ledgerwatch/erigon/common" + pubsubpb "github.com/libp2p/go-libp2p-pubsub/pb" +) + +func ComputeForkDigest( + beaconConfig *clparams.BeaconChainConfig, + genesisConfig *clparams.GenesisConfig, +) ([4]byte, error) { + if genesisConfig.GenesisTime == 0 { + return [4]byte{}, errors.New("genesis time is not set") + } + if genesisConfig.GenesisValidatorRoot == (common.Hash{}) { + return [4]byte{}, errors.New("genesis validators root is not set") + } + + currentEpoch := utils.GetCurrentEpoch(genesisConfig.GenesisTime, beaconConfig.SecondsPerSlot, beaconConfig.SlotsPerEpoch) + // Retrieve current fork version. + currentForkVersion := utils.BytesToBytes4(beaconConfig.GenesisForkVersion) + for _, fork := range forkList(beaconConfig.ForkVersionSchedule) { + if currentEpoch >= fork.epoch { + currentForkVersion = fork.version + continue + } + break + } + + return computeForkDigest(currentForkVersion, p2p.Root(genesisConfig.GenesisValidatorRoot)) +} + +type fork struct { + epoch uint64 + version [4]byte +} + +func forkList(schedule map[[4]byte]uint64) (f []fork) { + for version, epoch := range schedule { + f = append(f, fork{epoch: epoch, version: version}) + } + sort.Slice(f, func(i, j int) bool { + return f[i].epoch < f[j].epoch + }) + return +} + +func computeForkDigest(currentVersion [4]byte, genesisValidatorsRoot p2p.Root) (digest [4]byte, err error) { + data := p2p.ForkData{ + CurrentVersion: currentVersion, + GenesisValidatorsRoot: genesisValidatorsRoot, + } + var dataRoot [32]byte + dataRoot, err = data.HashTreeRoot() + if err != nil { + return + } + // copy first four bytes to output + copy(digest[:], dataRoot[:4]) + return +} + +func ComputeForkId( + beaconConfig *clparams.BeaconChainConfig, + genesisConfig *clparams.GenesisConfig, +) ([]byte, error) { + digest, err := ComputeForkDigest(beaconConfig, genesisConfig) + if err != nil { + return nil, err + } + + currentEpoch := utils.GetCurrentEpoch(genesisConfig.GenesisTime, beaconConfig.SecondsPerSlot, beaconConfig.SlotsPerEpoch) + + if time.Now().Unix() < int64(genesisConfig.GenesisTime) { + currentEpoch = 0 + } + + var nextForkVersion [4]byte + nextForkEpoch := uint64(math.MaxUint64) + for _, fork := range forkList(beaconConfig.ForkVersionSchedule) { + if currentEpoch < fork.epoch { + nextForkVersion = fork.version + nextForkEpoch = fork.epoch + break + } + nextForkVersion = fork.version + } + + enrForkID := p2p.ENRForkID{ + CurrentForkDigest: digest[:], + NextForkVersion: nextForkVersion[:], + NextForkEpoch: p2p.Epoch(nextForkEpoch), + } + return enrForkID.MarshalSSZ() +} + +func getLastForkEpoch( + beaconConfig *clparams.BeaconChainConfig, + genesisConfig *clparams.GenesisConfig, +) uint64 { + currentEpoch := utils.GetCurrentEpoch(genesisConfig.GenesisTime, beaconConfig.SecondsPerSlot, beaconConfig.SlotsPerEpoch) + // Retrieve current fork version. + currentForkEpoch := beaconConfig.GenesisEpoch + for _, fork := range forkList(beaconConfig.ForkVersionSchedule) { + if currentEpoch >= fork.epoch { + currentForkEpoch = fork.epoch + continue + } + break + } + return currentForkEpoch +} + +// The one suggested by the spec is too over-engineered. +func MsgID(pmsg *pubsubpb.Message) string { + hash := utils.Keccak256(pmsg.Data) + return string(hash[:]) +} diff --git a/cmd/lightclient/fork/fork_test.go b/cmd/lightclient/fork/fork_test.go new file mode 100644 index 00000000000..4f4aa0074a9 --- /dev/null +++ b/cmd/lightclient/fork/fork_test.go @@ -0,0 +1,32 @@ +/* + Copyright 2022 Erigon-Lightclient contributors + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + http://www.apache.org/licenses/LICENSE-2.0 + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package fork + +import ( + "testing" + + "github.com/ledgerwatch/erigon/cmd/lightclient/clparams" + "github.com/stretchr/testify/require" +) + +func TestMainnetFork(t *testing.T) { + beaconCfg := clparams.BeaconConfigs[clparams.MainnetNetwork] + genesisCfg := clparams.GenesisConfigs[clparams.MainnetNetwork] + digest, err := ComputeForkDigest(&beaconCfg, &genesisCfg) + require.NoError(t, err) + full, err := ComputeForkId(&beaconCfg, &genesisCfg) + require.NoError(t, err) + require.Equal(t, digest, [4]byte{74, 38, 197, 139}) + require.Equal(t, full, []byte{0x4a, 0x26, 0xc5, 0x8b, 0x2, 0x0, 0x0, 0x0, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff}) +} diff --git a/cmd/lightclient/lightclient/lightclient.go b/cmd/lightclient/lightclient/lightclient.go new file mode 100644 index 00000000000..0c1b8f9e44a --- /dev/null +++ b/cmd/lightclient/lightclient/lightclient.go @@ -0,0 +1,132 @@ +/* + Copyright 2022 Erigon-Lightclient contributors + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + http://www.apache.org/licenses/LICENSE-2.0 + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package lightclient + +import ( + "context" + "math/big" + "time" + + "github.com/holiman/uint256" + "github.com/ledgerwatch/erigon-lib/gointerfaces" + "github.com/ledgerwatch/erigon-lib/gointerfaces/remote" + "github.com/ledgerwatch/erigon-lib/gointerfaces/types" + "github.com/ledgerwatch/erigon/cmd/lightclient/rpc/lightrpc" + "github.com/ledgerwatch/erigon/common" + "github.com/ledgerwatch/log/v3" +) + +type LightClient struct { + sentinel lightrpc.SentinelClient + execution remote.ETHBACKENDServer +} + +func NewLightClient(execution remote.ETHBACKENDServer, sentinel lightrpc.SentinelClient) *LightClient { + return &LightClient{ + sentinel: sentinel, + execution: execution, + } +} + +func convertLightrpcExecutionPayloadToEthbacked(e *lightrpc.ExecutionPayload) *types.ExecutionPayload { + var baseFee *uint256.Int + + if e.BaseFeePerGas != nil { + // Trim and reverse it. + baseFeeBytes := common.CopyBytes(e.BaseFeePerGas) + for baseFeeBytes[len(baseFeeBytes)-1] == 0 && len(baseFeeBytes) > 0 { + baseFeeBytes = baseFeeBytes[:len(baseFeeBytes)-1] + } + for i, j := 0, len(baseFeeBytes)-1; i < j; i, j = i+1, j-1 { + baseFeeBytes[i], baseFeeBytes[j] = baseFeeBytes[j], baseFeeBytes[i] + } + var overflow bool + baseFee, overflow = uint256.FromBig(new(big.Int).SetBytes(baseFeeBytes)) + if overflow { + panic("NewPayload BaseFeePerGas overflow") + } + } + return &types.ExecutionPayload{ + ParentHash: gointerfaces.ConvertHashToH256(common.BytesToHash(e.ParentHash)), + Coinbase: gointerfaces.ConvertAddressToH160(common.BytesToAddress(e.FeeRecipient)), + StateRoot: gointerfaces.ConvertHashToH256(common.BytesToHash(e.StateRoot)), + ReceiptRoot: gointerfaces.ConvertHashToH256(common.BytesToHash(e.ReceiptsRoot)), + LogsBloom: gointerfaces.ConvertBytesToH2048(e.LogsBloom), + PrevRandao: gointerfaces.ConvertHashToH256(common.BytesToHash(e.PrevRandao)), + BlockNumber: e.BlockNumber, + GasLimit: e.GasLimit, + GasUsed: e.GasUsed, + Timestamp: e.Timestamp, + ExtraData: e.ExtraData, + BaseFeePerGas: gointerfaces.ConvertUint256IntToH256(baseFee), + BlockHash: gointerfaces.ConvertHashToH256(common.BytesToHash(e.BlockHash)), + Transactions: e.Transactions, + } +} + +func (l *LightClient) Start(ctx context.Context) { + stream, err := l.sentinel.SubscribeGossip(ctx, &lightrpc.GossipRequest{}) + if err != nil { + log.Warn("could not start lightclient", "reason", err) + return + } + + //defer stream.CloseSend() + + for { + select { + case <-ctx.Done(): + return + default: + data, err := stream.Recv() + if err != nil { + log.Warn("[Lightclient] block could not be ralayed :/", "reason", err) + continue + } + if data.Type != lightrpc.GossipType_BeaconBlockGossipType { + continue + } + block := &lightrpc.SignedBeaconBlockBellatrix{} + if err := block.UnmarshalSSZ(data.Data); err != nil { + log.Warn("Could not unmarshall gossip", "reason", err) + } + if err := l.processBeaconBlock(ctx, block); err != nil { + log.Warn("[Lightclient] block could not be executed :/", "reason", err) + continue + } + } + } +} + +func (l *LightClient) processBeaconBlock(ctx context.Context, beaconBlock *lightrpc.SignedBeaconBlockBellatrix) error { + payloadHash := gointerfaces.ConvertHashToH256( + common.BytesToHash(beaconBlock.Block.Body.ExecutionPayload.BlockHash)) + + payload := convertLightrpcExecutionPayloadToEthbacked(beaconBlock.Block.Body.ExecutionPayload) + var err error + _, err = l.execution.EngineNewPayloadV1(ctx, payload) + if err != nil { + return err + } + // Wait a bit + time.Sleep(500 * time.Millisecond) + _, err = l.execution.EngineForkChoiceUpdatedV1(ctx, &remote.EngineForkChoiceUpdatedRequest{ + ForkchoiceState: &remote.EngineForkChoiceState{ + HeadBlockHash: payloadHash, + SafeBlockHash: payloadHash, + FinalizedBlockHash: payloadHash, + }, + }) + return err +} diff --git a/cmd/lightclient/lightclient/state.go b/cmd/lightclient/lightclient/state.go new file mode 100644 index 00000000000..6be98d34393 --- /dev/null +++ b/cmd/lightclient/lightclient/state.go @@ -0,0 +1,228 @@ +/* + Copyright 2022 Erigon-Lightclient contributors + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + http://www.apache.org/licenses/LICENSE-2.0 + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package lightclient + +import ( + "context" + "fmt" + + "github.com/ledgerwatch/erigon/cmd/lightclient/rpc/lightrpc" + "github.com/ledgerwatch/erigon/cmd/lightclient/utils" + "github.com/ledgerwatch/log/v3" +) + +type LightClientEvent interface { +} + +type LightState struct { + + // none of the fields below are protected by a mutex. + // the original bootstrap a ala trusted block root + bootstrap *lightrpc.LightClientBootstrap + genesis [32]byte + + // channel of events + events chan LightClientEvent + + // light client state https://github.com/ethereum/consensus-specs/blob/dev/specs/altair/light-client/sync-protocol.md#lightclientstore + finalized_header *lightrpc.BeaconBlockHeader + current_sync_committee *lightrpc.SyncCommittee + next_sync_committee *lightrpc.SyncCommittee + best_valid_update *lightrpc.LightClientUpdate + optimistic_header *lightrpc.BeaconBlockHeader + previous_max_active_participants uint64 + current_max_active_participants uint64 +} + +func NewLightState(ctx context.Context, bootstrap *lightrpc.LightClientBootstrap, genesis [32]byte) *LightState { + // makes copy of light client bootstrap + l := &LightState{ + bootstrap: bootstrap, + genesis: genesis, + finalized_header: bootstrap.Header, + current_sync_committee: bootstrap.CurrentSyncCommittee, + optimistic_header: bootstrap.Header, + events: make(chan LightClientEvent, 1280), + } + return l +} + +func (l *LightState) CurrentSlot() uint64 { + return 0 +} + +func (l *LightState) start(ctx context.Context) { + for { + select { + case <-ctx.Done(): + return + case ev := <-l.events: + var err error + switch evt := ev.(type) { + case *lightrpc.LightClientUpdate: + err = l.onLightClientUpdate(evt) + case *lightrpc.LightClientFinalityUpdate: + err = l.onFinalityUpdate(evt) + case *lightrpc.LightClientOptimisticUpdate: + err = l.onOptimisticUpdate(evt) + } + if err != nil { + log.Warn("failed processing state update", "err", err) + } + } + } +} + +func (l *LightState) AddUpdateEvent(u *lightrpc.LightClientUpdate) { + l.events <- u +} +func (l *LightState) AddOptimisticUpdateEvent(u *lightrpc.LightClientOptimisticUpdate) { + l.events <- u +} +func (l *LightState) AddFinalityUpdateEvent(u *lightrpc.LightClientFinalityUpdate) { + l.events <- u +} + +func (l *LightState) onOptimisticUpdate(u *lightrpc.LightClientOptimisticUpdate) error { + // TODO: validate update + return nil +} + +func (l *LightState) onFinalityUpdate(u *lightrpc.LightClientFinalityUpdate) error { + // TODO: validate update + return nil +} + +func (l *LightState) onLightClientUpdate(u *lightrpc.LightClientUpdate) error { + if err := l.validateLightClientUpdate(u); err != nil { + return err + } + return nil +} +func (l *LightState) validateLightClientUpdate(u *lightrpc.LightClientUpdate) error { + // need to do this but im too lazy to. maybe someone else knows an elegant solution... + // if u.SyncAggregate.SyncCommiteeBits < min_sync_participants { + // return fmt.Errorf("not enough participants in commmittee (%d/%d)", ) + //} + if l.CurrentSlot() < u.SignatureSlot { + return fmt.Errorf("current slot must be bigger or eq to sig slot") + } + if u.SignatureSlot <= u.AttestedHeader.Slot { + return fmt.Errorf("current sig slot must be larger than attested slot") + } + if u.AttestedHeader.Slot < u.FinalizedHeader.Slot { + return fmt.Errorf("attested header slot must be lower than finalized header slot") + } + storePeriod := computeSyncCommitteePeriodAtSlot(l.finalized_header.Slot) + updateSigPeriod := computeSyncCommitteePeriodAtSlot(u.SignatureSlot) + + if l.next_sync_committee != nil { + if updateSigPeriod != storePeriod && updateSigPeriod != storePeriod+1 { + return fmt.Errorf("update sig period must match store period or be store period + 1 if next sync committee not") + } + } else { + if updateSigPeriod != storePeriod { + return fmt.Errorf("update sig period must match store period if next sync committee nil") + } + } + + updateAttestedPeriod := computeSyncCommitteePeriodAtSlot(u.AttestedHeader.Slot) + if !(l.next_sync_committee == nil && (isSyncCommitteeUpdate(u) && updateAttestedPeriod == storePeriod)) { + if u.AttestedHeader.Slot <= l.finalized_header.Slot { + return fmt.Errorf("if up has next sync committee, the update header slot must be strictly larger than the store's finalized header") + } + } + + if isFinalityUpdate(u) { + // TODO: what is the genesis slot + GENESIS_SLOT := uint64(1) + finalized_root := [32]byte{} + if u.FinalizedHeader.Slot != GENESIS_SLOT { + finalized_root = hashTreeRoot(u.FinalizedHeader) + } + + if !isValidMerkleBranch(finalized_root, utils.BytesSliceToBytes32Slice(u.FinalityBranch), 0, 0, utils.BytesToBytes32(u.AttestedHeader.Root)) { + return fmt.Errorf("merkle branch invalid for finality update") + } + } + if isSyncCommitteeUpdate(u) { + leaf, _ := u.NextSyncCommitee.HashTreeRoot() + if !isValidMerkleBranch(leaf, utils.BytesSliceToBytes32Slice(u.NextSyncCommitteeBranch), 0, 0, utils.BytesToBytes32(u.AttestedHeader.Root)) { + return fmt.Errorf("merkle branch invalid for sync committee update") + } + } + + var curSyncCommittee *lightrpc.SyncCommittee + if updateSigPeriod == storePeriod { + curSyncCommittee = l.current_sync_committee + } else { + if l.next_sync_committee != nil { + curSyncCommittee = l.next_sync_committee + } + } + _ = curSyncCommittee + //TODO: remaining validation + /// participant_pubkeys = [ + /// pubkey for (bit, pubkey) in zip(sync_aggregate.sync_committee_bits, sync_committee.pubkeys) + /// if bit + /// ] + /// fork_version = compute_fork_version(compute_epoch_at_slot(update.signature_slot)) + /// domain = compute_domain(DOMAIN_SYNC_COMMITTEE, fork_version, genesis_validators_root) + /// signing_root = compute_signing_root(update.attested_header, domain) + /// assert bls.FastAggregateVerify(participant_pubkeys, signing_root, sync_aggregate.sync_committee_signature) + return nil +} + +// TODO: implement +func isValidMerkleBranch( + leaf [32]byte, + branch [][32]byte, + depth int, + index int, + root [32]byte, +) bool { + return false +} + +// TODO: implement +func hashTreeRoot(h *lightrpc.BeaconBlockHeader) [32]byte { + return [32]byte{} +} + +// TODO: implement +func computeEpochAtSlot(slot uint64) uint64 { + return 0 +} + +// TODO: implement +func computeSyncCommitteePeriodAtSlot(slot uint64) uint64 { + return computeSyncCommitteePeriod(computeEpochAtSlot(slot)) +} + +// TODO: implement +func computeSyncCommitteePeriod(slot uint64) uint64 { + return slot +} + +// TODO: implement +func isSyncCommitteeUpdate(update *lightrpc.LightClientUpdate) bool { + // return update.next_sync_committee_branch != [Bytes32() for _ in range(floorlog2(NEXT_SYNC_COMMITTEE_INDEX))] + return true +} + +// TODO: implement +func isFinalityUpdate(update *lightrpc.LightClientUpdate) bool { + // return update.next_sync_committee_branch != [Bytes32() for _ in range(floorlog2(NEXT_SYNC_COMMITTEE_INDEX))] + return true +} diff --git a/cmd/lightclient/main.go b/cmd/lightclient/main.go new file mode 100644 index 00000000000..817ec7508d5 --- /dev/null +++ b/cmd/lightclient/main.go @@ -0,0 +1,127 @@ +/* + Copyright 2022 Erigon-Lightclient contributors + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + http://www.apache.org/licenses/LICENSE-2.0 + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package main + +import ( + "context" + "time" + + "github.com/ledgerwatch/erigon/cmd/lightclient/clparams" + "github.com/ledgerwatch/erigon/cmd/lightclient/rpc/lightrpc" + "github.com/ledgerwatch/erigon/cmd/lightclient/sentinel" + "github.com/ledgerwatch/erigon/cmd/lightclient/sentinel/communication" + "github.com/ledgerwatch/erigon/cmd/lightclient/utils" + "github.com/ledgerwatch/log/v3" +) + +var ( + defaultIpAddr = "127.0.0.1" // Localhost + defaultPort = 8080 + defaultTcpPort = uint(9000) +) + +func main() { + log.Root().SetHandler(log.LvlFilterHandler(log.LvlInfo, log.StderrHandler)) + genesisCfg, networkCfg, beaconCfg := clparams.GetConfigsByNetwork(clparams.MainnetNetwork) + sent, err := sentinel.New(context.Background(), &sentinel.SentinelConfig{ + IpAddr: defaultIpAddr, + Port: defaultPort, + TCPPort: defaultTcpPort, + GenesisConfig: genesisCfg, + NetworkConfig: networkCfg, + BeaconConfig: beaconCfg, + }) + if err != nil { + log.Error("error", "err", err) + return + } + if err := sent.Start(); err != nil { + log.Error("failed to start sentinel", "err", err) + return + } + log.Info("Sentinel started", "enr", sent.String()) + + gossip_topics := []sentinel.GossipTopic{ + sentinel.BeaconBlockSsz, + sentinel.LightClientFinalityUpdateSsz, + sentinel.LightClientOptimisticUpdateSsz, + } + for _, v := range gossip_topics { + // now lets separately connect to the gossip topics. this joins the room + subscriber, err := sent.SubscribeGossip(v) + if err != nil { + log.Error("failed to start sentinel", "err", err) + } + // actually start the subscription, ala listening and sending packets to the sentinel recv channel + err = subscriber.Listen() + if err != nil { + log.Error("failed to start sentinel", "err", err) + } + } + + logInterval := time.NewTicker(5 * time.Second) + sendReqInterval := time.NewTicker(1 * time.Second) + + for { + select { + case <-logInterval.C: + sent.LogTopicPeers() + log.Info("[Lightclient] Networking Report", "peers", sent.GetPeersCount()) + case pkt := <-sent.RecvGossip(): + handleGossipPacket(pkt) + case <-sendReqInterval.C: + go func() { + if _, err := sent.SendPingReqV1(); err != nil { + log.Debug("failed to send ping request", "err", err) + } + if _, err := sent.SendMetadataReqV1(); err != nil { + log.Debug("failed to send metadata request", "err", err) + } + }() + } + } +} + +func handleGossipPacket(pkt *communication.GossipContext) error { + log.Trace("[Gossip] Received Packet", "topic", pkt.Topic) + switch u := pkt.Packet.(type) { + case *lightrpc.SignedBeaconBlockBellatrix: + /*log.Info("[Gossip] beacon_block", + "Slot", u.Block.Slot, + "Signature", hex.EncodeToString(u.Signature), + "graffiti", string(u.Block.Body.Graffiti), + "eth1_blockhash", hex.EncodeToString(u.Block.Body.Eth1Data.BlockHash), + "stateRoot", hex.EncodeToString(u.Block.StateRoot), + "parentRoot", hex.EncodeToString(u.Block.ParentRoot), + "proposerIdx", u.Block.ProposerIndex, + )*/ + err := pkt.Codec.WritePacket(context.TODO(), pkt.Packet) + if err != nil { + log.Warn("[Gossip] Error Forwarding Packet", "err", err) + } + case *lightrpc.LightClientFinalityUpdate: + err := pkt.Codec.WritePacket(context.TODO(), pkt.Packet) + if err != nil { + log.Warn("[Gossip] Error Forwarding Packet", "err", err) + } + log.Info("[Gossip] Got Finalty Update", "sig", utils.BytesToHex(u.SyncAggregate.SyncCommiteeSignature)) + case *lightrpc.LightClientOptimisticUpdate: + err := pkt.Codec.WritePacket(context.TODO(), pkt.Packet) + if err != nil { + log.Warn("[Gossip] Error Forwarding Packet", "err", err) + } + default: + } + return nil +} diff --git a/cmd/lightclient/rpc/Makefile b/cmd/lightclient/rpc/Makefile new file mode 100644 index 00000000000..e2d5e3281f2 --- /dev/null +++ b/cmd/lightclient/rpc/Makefile @@ -0,0 +1,11 @@ +all: + protoc --proto_path=proto --go_out=lightrpc --go_opt=paths=source_relative proto/*.proto --go-grpc_out=lightrpc + + protoc-go-inject-tag -input="lightrpc/*.pb.go" + sed -i 's/sszsize/ssz-size/g' lightrpc/*.pb.go + sed -i 's/sszmax/ssz-max/g' lightrpc/*.pb.go + go run github.com/ferranbt/fastssz/sszgen -path lightrpc/metadata.pb.go -exclude-objs {{range $key, $val := .Aliases}}{{$key}},{{end}}Ignore + go run github.com/ferranbt/fastssz/sszgen -path lightrpc/beacon_block.pb.go -exclude-objs {{range $key, $val := .Aliases}}{{$key}},{{end}}Ignore + +clean: + rm lightrpc/*.pb.go \ No newline at end of file diff --git a/cmd/lightclient/rpc/lightrpc/beacon_block.pb.go b/cmd/lightclient/rpc/lightrpc/beacon_block.pb.go new file mode 100644 index 00000000000..9fb78c65f87 --- /dev/null +++ b/cmd/lightclient/rpc/lightrpc/beacon_block.pb.go @@ -0,0 +1,2043 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.25.0-devel +// protoc v3.14.0 +// source: beacon_block.proto + +package lightrpc + +import ( + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +type Eth1Data struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Root []byte `protobuf:"bytes,1,opt,name=Root,json=deposit_root,proto3" json:"Root,omitempty" ssz-size:"32"` // @gotags: ssz-size:"32" + DepositCount uint64 `protobuf:"varint,2,opt,name=DepositCount,json=deposit_count,proto3" json:"DepositCount,omitempty"` + BlockHash []byte `protobuf:"bytes,3,opt,name=BlockHash,json=block_hash,proto3" json:"BlockHash,omitempty" ssz-size:"32"` // @gotags: ssz-size:"32" +} + +func (x *Eth1Data) Reset() { + *x = Eth1Data{} + if protoimpl.UnsafeEnabled { + mi := &file_beacon_block_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Eth1Data) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Eth1Data) ProtoMessage() {} + +func (x *Eth1Data) ProtoReflect() protoreflect.Message { + mi := &file_beacon_block_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Eth1Data.ProtoReflect.Descriptor instead. +func (*Eth1Data) Descriptor() ([]byte, []int) { + return file_beacon_block_proto_rawDescGZIP(), []int{0} +} + +func (x *Eth1Data) GetRoot() []byte { + if x != nil { + return x.Root + } + return nil +} + +func (x *Eth1Data) GetDepositCount() uint64 { + if x != nil { + return x.DepositCount + } + return 0 +} + +func (x *Eth1Data) GetBlockHash() []byte { + if x != nil { + return x.BlockHash + } + return nil +} + +type BeaconBlockHeader struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Slot uint64 `protobuf:"varint,1,opt,name=Slot,json=slot,proto3" json:"Slot,omitempty"` + ProposerIndex uint64 `protobuf:"varint,2,opt,name=ProposerIndex,json=proposer_index,proto3" json:"ProposerIndex,omitempty"` + ParentRoot []byte `protobuf:"bytes,3,opt,name=ParentRoot,json=parent_root,proto3" json:"ParentRoot,omitempty" ssz-size:"32"` // @gotags: ssz-size:"32" + Root []byte `protobuf:"bytes,4,opt,name=Root,json=root,proto3" json:"Root,omitempty" ssz-size:"32"` // @gotags: ssz-size:"32" + BodyRoot []byte `protobuf:"bytes,5,opt,name=BodyRoot,json=body_root,proto3" json:"BodyRoot,omitempty" ssz-size:"32"` // @gotags: ssz-size:"32" +} + +func (x *BeaconBlockHeader) Reset() { + *x = BeaconBlockHeader{} + if protoimpl.UnsafeEnabled { + mi := &file_beacon_block_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *BeaconBlockHeader) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*BeaconBlockHeader) ProtoMessage() {} + +func (x *BeaconBlockHeader) ProtoReflect() protoreflect.Message { + mi := &file_beacon_block_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use BeaconBlockHeader.ProtoReflect.Descriptor instead. +func (*BeaconBlockHeader) Descriptor() ([]byte, []int) { + return file_beacon_block_proto_rawDescGZIP(), []int{1} +} + +func (x *BeaconBlockHeader) GetSlot() uint64 { + if x != nil { + return x.Slot + } + return 0 +} + +func (x *BeaconBlockHeader) GetProposerIndex() uint64 { + if x != nil { + return x.ProposerIndex + } + return 0 +} + +func (x *BeaconBlockHeader) GetParentRoot() []byte { + if x != nil { + return x.ParentRoot + } + return nil +} + +func (x *BeaconBlockHeader) GetRoot() []byte { + if x != nil { + return x.Root + } + return nil +} + +func (x *BeaconBlockHeader) GetBodyRoot() []byte { + if x != nil { + return x.BodyRoot + } + return nil +} + +type SignedBeaconBlockHeader struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Header *BeaconBlockHeader `protobuf:"bytes,1,opt,name=Header,json=message,proto3" json:"Header,omitempty"` + Signature []byte `protobuf:"bytes,2,opt,name=Signature,json=signature,proto3" json:"Signature,omitempty" ssz-size:"96"` // @gotags: ssz-size:"96" +} + +func (x *SignedBeaconBlockHeader) Reset() { + *x = SignedBeaconBlockHeader{} + if protoimpl.UnsafeEnabled { + mi := &file_beacon_block_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *SignedBeaconBlockHeader) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*SignedBeaconBlockHeader) ProtoMessage() {} + +func (x *SignedBeaconBlockHeader) ProtoReflect() protoreflect.Message { + mi := &file_beacon_block_proto_msgTypes[2] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use SignedBeaconBlockHeader.ProtoReflect.Descriptor instead. +func (*SignedBeaconBlockHeader) Descriptor() ([]byte, []int) { + return file_beacon_block_proto_rawDescGZIP(), []int{2} +} + +func (x *SignedBeaconBlockHeader) GetHeader() *BeaconBlockHeader { + if x != nil { + return x.Header + } + return nil +} + +func (x *SignedBeaconBlockHeader) GetSignature() []byte { + if x != nil { + return x.Signature + } + return nil +} + +type Slashing struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Header1 *SignedBeaconBlockHeader `protobuf:"bytes,1,opt,name=Header1,json=signed_header_1,proto3" json:"Header1,omitempty"` + Header2 *SignedBeaconBlockHeader `protobuf:"bytes,2,opt,name=Header2,json=signed_header_2,proto3" json:"Header2,omitempty"` +} + +func (x *Slashing) Reset() { + *x = Slashing{} + if protoimpl.UnsafeEnabled { + mi := &file_beacon_block_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Slashing) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Slashing) ProtoMessage() {} + +func (x *Slashing) ProtoReflect() protoreflect.Message { + mi := &file_beacon_block_proto_msgTypes[3] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Slashing.ProtoReflect.Descriptor instead. +func (*Slashing) Descriptor() ([]byte, []int) { + return file_beacon_block_proto_rawDescGZIP(), []int{3} +} + +func (x *Slashing) GetHeader1() *SignedBeaconBlockHeader { + if x != nil { + return x.Header1 + } + return nil +} + +func (x *Slashing) GetHeader2() *SignedBeaconBlockHeader { + if x != nil { + return x.Header2 + } + return nil +} + +// TODO(Giulio2002): Finish. +type AttestationData struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Slot uint64 `protobuf:"varint,1,opt,name=Slot,json=slot,proto3" json:"Slot,omitempty"` + Index uint64 `protobuf:"varint,2,opt,name=Index,json=index,proto3" json:"Index,omitempty"` + BeaconBlockHash []byte `protobuf:"bytes,3,opt,name=BeaconBlockHash,json=beacon_block_hash,proto3" json:"BeaconBlockHash,omitempty" ssz-size:"32"` // @gotags: ssz-size:"32" +} + +func (x *AttestationData) Reset() { + *x = AttestationData{} + if protoimpl.UnsafeEnabled { + mi := &file_beacon_block_proto_msgTypes[4] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *AttestationData) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*AttestationData) ProtoMessage() {} + +func (x *AttestationData) ProtoReflect() protoreflect.Message { + mi := &file_beacon_block_proto_msgTypes[4] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use AttestationData.ProtoReflect.Descriptor instead. +func (*AttestationData) Descriptor() ([]byte, []int) { + return file_beacon_block_proto_rawDescGZIP(), []int{4} +} + +func (x *AttestationData) GetSlot() uint64 { + if x != nil { + return x.Slot + } + return 0 +} + +func (x *AttestationData) GetIndex() uint64 { + if x != nil { + return x.Index + } + return 0 +} + +func (x *AttestationData) GetBeaconBlockHash() []byte { + if x != nil { + return x.BeaconBlockHash + } + return nil +} + +type Attestation struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + AggregationBits []byte `protobuf:"bytes,1,opt,name=AggregationBits,json=aggregation_bits,proto3" json:"AggregationBits,omitempty" ssz-max:"2048" ssz:"bitlist"` // @gotags: ssz-max:"2048" ssz:"bitlist" + Data *AttestationData `protobuf:"bytes,2,opt,name=Data,json=data,proto3" json:"Data,omitempty"` + Signature []byte `protobuf:"bytes,3,opt,name=Signature,json=signature,proto3" json:"Signature,omitempty" ssz-size:"96"` // @gotags: ssz-size:"96" +} + +func (x *Attestation) Reset() { + *x = Attestation{} + if protoimpl.UnsafeEnabled { + mi := &file_beacon_block_proto_msgTypes[5] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Attestation) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Attestation) ProtoMessage() {} + +func (x *Attestation) ProtoReflect() protoreflect.Message { + mi := &file_beacon_block_proto_msgTypes[5] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Attestation.ProtoReflect.Descriptor instead. +func (*Attestation) Descriptor() ([]byte, []int) { + return file_beacon_block_proto_rawDescGZIP(), []int{5} +} + +func (x *Attestation) GetAggregationBits() []byte { + if x != nil { + return x.AggregationBits + } + return nil +} + +func (x *Attestation) GetData() *AttestationData { + if x != nil { + return x.Data + } + return nil +} + +func (x *Attestation) GetSignature() []byte { + if x != nil { + return x.Signature + } + return nil +} + +type DepositData struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + PubKey []byte `protobuf:"bytes,1,opt,name=PubKey,json=pubkey,proto3" json:"PubKey,omitempty" ssz-size:"48"` // @gotags: ssz-size:"48" + WithdrawalCredentials []byte `protobuf:"bytes,2,opt,name=WithdrawalCredentials,json=withdrawal_credentials,proto3" json:"WithdrawalCredentials,omitempty" ssz-size:"32"` // @gotags: ssz-size:"32" + Amount uint64 `protobuf:"varint,3,opt,name=Amount,json=amount,proto3" json:"Amount,omitempty"` + Signature []byte `protobuf:"bytes,4,opt,name=Signature,json=signature,proto3" json:"Signature,omitempty" ssz-size:"96"` // @gotags: ssz-size:"96" + Root []byte `protobuf:"bytes,5,opt,name=Root,proto3" json:"Root,omitempty" ssz:"-"` // @gotags: ssz:"-" +} + +func (x *DepositData) Reset() { + *x = DepositData{} + if protoimpl.UnsafeEnabled { + mi := &file_beacon_block_proto_msgTypes[6] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *DepositData) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*DepositData) ProtoMessage() {} + +func (x *DepositData) ProtoReflect() protoreflect.Message { + mi := &file_beacon_block_proto_msgTypes[6] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use DepositData.ProtoReflect.Descriptor instead. +func (*DepositData) Descriptor() ([]byte, []int) { + return file_beacon_block_proto_rawDescGZIP(), []int{6} +} + +func (x *DepositData) GetPubKey() []byte { + if x != nil { + return x.PubKey + } + return nil +} + +func (x *DepositData) GetWithdrawalCredentials() []byte { + if x != nil { + return x.WithdrawalCredentials + } + return nil +} + +func (x *DepositData) GetAmount() uint64 { + if x != nil { + return x.Amount + } + return 0 +} + +func (x *DepositData) GetSignature() []byte { + if x != nil { + return x.Signature + } + return nil +} + +func (x *DepositData) GetRoot() []byte { + if x != nil { + return x.Root + } + return nil +} + +type Deposit struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Proof [][]byte `protobuf:"bytes,1,rep,name=Proof,json=proof,proto3" json:"Proof,omitempty" ssz-size:"33,32"` // @gotags: ssz-size:"33,32" + Data *DepositData `protobuf:"bytes,2,opt,name=Data,json=data,proto3" json:"Data,omitempty"` +} + +func (x *Deposit) Reset() { + *x = Deposit{} + if protoimpl.UnsafeEnabled { + mi := &file_beacon_block_proto_msgTypes[7] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Deposit) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Deposit) ProtoMessage() {} + +func (x *Deposit) ProtoReflect() protoreflect.Message { + mi := &file_beacon_block_proto_msgTypes[7] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Deposit.ProtoReflect.Descriptor instead. +func (*Deposit) Descriptor() ([]byte, []int) { + return file_beacon_block_proto_rawDescGZIP(), []int{7} +} + +func (x *Deposit) GetProof() [][]byte { + if x != nil { + return x.Proof + } + return nil +} + +func (x *Deposit) GetData() *DepositData { + if x != nil { + return x.Data + } + return nil +} + +type SignedVoluntaryExit struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + VolunaryExit *VoluntaryExit `protobuf:"bytes,1,opt,name=VolunaryExit,json=message,proto3" json:"VolunaryExit,omitempty"` + Signature []byte `protobuf:"bytes,2,opt,name=Signature,json=signature,proto3" json:"Signature,omitempty" ssz-size:"96"` // @gotags: ssz-size:"96" +} + +func (x *SignedVoluntaryExit) Reset() { + *x = SignedVoluntaryExit{} + if protoimpl.UnsafeEnabled { + mi := &file_beacon_block_proto_msgTypes[8] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *SignedVoluntaryExit) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*SignedVoluntaryExit) ProtoMessage() {} + +func (x *SignedVoluntaryExit) ProtoReflect() protoreflect.Message { + mi := &file_beacon_block_proto_msgTypes[8] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use SignedVoluntaryExit.ProtoReflect.Descriptor instead. +func (*SignedVoluntaryExit) Descriptor() ([]byte, []int) { + return file_beacon_block_proto_rawDescGZIP(), []int{8} +} + +func (x *SignedVoluntaryExit) GetVolunaryExit() *VoluntaryExit { + if x != nil { + return x.VolunaryExit + } + return nil +} + +func (x *SignedVoluntaryExit) GetSignature() []byte { + if x != nil { + return x.Signature + } + return nil +} + +type VoluntaryExit struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Epoch uint64 `protobuf:"varint,1,opt,name=Epoch,json=epoch,proto3" json:"Epoch,omitempty"` + ValidatorIndex uint64 `protobuf:"varint,2,opt,name=ValidatorIndex,json=validator_index,proto3" json:"ValidatorIndex,omitempty"` +} + +func (x *VoluntaryExit) Reset() { + *x = VoluntaryExit{} + if protoimpl.UnsafeEnabled { + mi := &file_beacon_block_proto_msgTypes[9] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *VoluntaryExit) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*VoluntaryExit) ProtoMessage() {} + +func (x *VoluntaryExit) ProtoReflect() protoreflect.Message { + mi := &file_beacon_block_proto_msgTypes[9] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use VoluntaryExit.ProtoReflect.Descriptor instead. +func (*VoluntaryExit) Descriptor() ([]byte, []int) { + return file_beacon_block_proto_rawDescGZIP(), []int{9} +} + +func (x *VoluntaryExit) GetEpoch() uint64 { + if x != nil { + return x.Epoch + } + return 0 +} + +func (x *VoluntaryExit) GetValidatorIndex() uint64 { + if x != nil { + return x.ValidatorIndex + } + return 0 +} + +type SyncAggregate struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + SyncCommiteeBits []byte `protobuf:"bytes,1,opt,name=SyncCommiteeBits,json=sync_committee_bits,proto3" json:"SyncCommiteeBits,omitempty" ssz-size:"64"` // @gotags: ssz-size:"64" + SyncCommiteeSignature []byte `protobuf:"bytes,2,opt,name=SyncCommiteeSignature,json=sync_committee_signature,proto3" json:"SyncCommiteeSignature,omitempty" ssz-size:"96"` // @gotags: ssz-size:"96" +} + +func (x *SyncAggregate) Reset() { + *x = SyncAggregate{} + if protoimpl.UnsafeEnabled { + mi := &file_beacon_block_proto_msgTypes[10] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *SyncAggregate) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*SyncAggregate) ProtoMessage() {} + +func (x *SyncAggregate) ProtoReflect() protoreflect.Message { + mi := &file_beacon_block_proto_msgTypes[10] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use SyncAggregate.ProtoReflect.Descriptor instead. +func (*SyncAggregate) Descriptor() ([]byte, []int) { + return file_beacon_block_proto_rawDescGZIP(), []int{10} +} + +func (x *SyncAggregate) GetSyncCommiteeBits() []byte { + if x != nil { + return x.SyncCommiteeBits + } + return nil +} + +func (x *SyncAggregate) GetSyncCommiteeSignature() []byte { + if x != nil { + return x.SyncCommiteeSignature + } + return nil +} + +// Lightclient will sent this to Erigon once validation is done. +type ExecutionPayload struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + ParentHash []byte `protobuf:"bytes,1,opt,name=ParentHash,json=parent_hash,proto3" json:"ParentHash,omitempty" ssz-size:"32"` // @gotags: ssz-size:"32" + FeeRecipient []byte `protobuf:"bytes,2,opt,name=FeeRecipient,json=fee_recipient,proto3" json:"FeeRecipient,omitempty" ssz-size:"20"` // @gotags: ssz-size:"20" + StateRoot []byte `protobuf:"bytes,3,opt,name=StateRoot,json=state_root,proto3" json:"StateRoot,omitempty" ssz-size:"32"` // @gotags: ssz-size:"32" + ReceiptsRoot []byte `protobuf:"bytes,4,opt,name=ReceiptsRoot,json=receipts_root,proto3" json:"ReceiptsRoot,omitempty" ssz-size:"32"` // @gotags: ssz-size:"32" + LogsBloom []byte `protobuf:"bytes,5,opt,name=LogsBloom,json=logs_bloom,proto3" json:"LogsBloom,omitempty" ssz-size:"256"` // @gotags: ssz-size:"256" + PrevRandao []byte `protobuf:"bytes,6,opt,name=PrevRandao,json=prev_randao,proto3" json:"PrevRandao,omitempty" ssz-size:"32"` // @gotags: ssz-size:"32" + BlockNumber uint64 `protobuf:"varint,7,opt,name=BlockNumber,json=block_number,proto3" json:"BlockNumber,omitempty"` + GasLimit uint64 `protobuf:"varint,8,opt,name=GasLimit,json=gas_limit,proto3" json:"GasLimit,omitempty"` + GasUsed uint64 `protobuf:"varint,9,opt,name=GasUsed,json=gas_used,proto3" json:"GasUsed,omitempty"` + Timestamp uint64 `protobuf:"varint,10,opt,name=Timestamp,json=timestamp,proto3" json:"Timestamp,omitempty"` + ExtraData []byte `protobuf:"bytes,11,opt,name=ExtraData,json=extra_data,proto3" json:"ExtraData,omitempty" ssz-max:"32"` // @gotags: ssz-max:"32" + BaseFeePerGas []byte `protobuf:"bytes,12,opt,name=BaseFeePerGas,json=base_fee_per_gas,proto3" json:"BaseFeePerGas,omitempty" ssz-size:"32"` // @gotags: ssz-size:"32" + BlockHash []byte `protobuf:"bytes,13,opt,name=BlockHash,json=block_hash,proto3" json:"BlockHash,omitempty" ssz-size:"32"` // @gotags: ssz-size:"32" + Transactions [][]byte `protobuf:"bytes,14,rep,name=Transactions,json=transactions,proto3" json:"Transactions,omitempty" ssz-size:"?,?" ssz-max:"1048576,1073741824"` // @gotags: ssz-size:"?,?" ssz-max:"1048576,1073741824" +} + +func (x *ExecutionPayload) Reset() { + *x = ExecutionPayload{} + if protoimpl.UnsafeEnabled { + mi := &file_beacon_block_proto_msgTypes[11] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ExecutionPayload) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ExecutionPayload) ProtoMessage() {} + +func (x *ExecutionPayload) ProtoReflect() protoreflect.Message { + mi := &file_beacon_block_proto_msgTypes[11] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ExecutionPayload.ProtoReflect.Descriptor instead. +func (*ExecutionPayload) Descriptor() ([]byte, []int) { + return file_beacon_block_proto_rawDescGZIP(), []int{11} +} + +func (x *ExecutionPayload) GetParentHash() []byte { + if x != nil { + return x.ParentHash + } + return nil +} + +func (x *ExecutionPayload) GetFeeRecipient() []byte { + if x != nil { + return x.FeeRecipient + } + return nil +} + +func (x *ExecutionPayload) GetStateRoot() []byte { + if x != nil { + return x.StateRoot + } + return nil +} + +func (x *ExecutionPayload) GetReceiptsRoot() []byte { + if x != nil { + return x.ReceiptsRoot + } + return nil +} + +func (x *ExecutionPayload) GetLogsBloom() []byte { + if x != nil { + return x.LogsBloom + } + return nil +} + +func (x *ExecutionPayload) GetPrevRandao() []byte { + if x != nil { + return x.PrevRandao + } + return nil +} + +func (x *ExecutionPayload) GetBlockNumber() uint64 { + if x != nil { + return x.BlockNumber + } + return 0 +} + +func (x *ExecutionPayload) GetGasLimit() uint64 { + if x != nil { + return x.GasLimit + } + return 0 +} + +func (x *ExecutionPayload) GetGasUsed() uint64 { + if x != nil { + return x.GasUsed + } + return 0 +} + +func (x *ExecutionPayload) GetTimestamp() uint64 { + if x != nil { + return x.Timestamp + } + return 0 +} + +func (x *ExecutionPayload) GetExtraData() []byte { + if x != nil { + return x.ExtraData + } + return nil +} + +func (x *ExecutionPayload) GetBaseFeePerGas() []byte { + if x != nil { + return x.BaseFeePerGas + } + return nil +} + +func (x *ExecutionPayload) GetBlockHash() []byte { + if x != nil { + return x.BlockHash + } + return nil +} + +func (x *ExecutionPayload) GetTransactions() [][]byte { + if x != nil { + return x.Transactions + } + return nil +} + +type BeaconBodyBellatrix struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + RandaoReveal []byte `protobuf:"bytes,1,opt,name=RandaoReveal,json=randao_reveal,proto3" json:"RandaoReveal,omitempty" ssz-size:"96"` // @gotags: ssz-size:"96" + Eth1Data *Eth1Data `protobuf:"bytes,2,opt,name=Eth1Data,json=eth1_data,proto3" json:"Eth1Data,omitempty"` + Graffiti []byte `protobuf:"bytes,3,opt,name=Graffiti,json=graffiti,proto3" json:"Graffiti,omitempty" ssz-size:"32"` // @gotags: ssz-size:"32" + ProposerSlashings []*Slashing `protobuf:"bytes,4,rep,name=ProposerSlashings,json=proposer_slashings,proto3" json:"ProposerSlashings,omitempty" ssz-max:"16"` // @gotags: ssz-max:"16" + AttesterSlashings []*Slashing `protobuf:"bytes,5,rep,name=AttesterSlashings,json=attester_slashings,proto3" json:"AttesterSlashings,omitempty" ssz-max:"2"` // @gotags: ssz-max:"2" + Attestations []*Attestation `protobuf:"bytes,6,rep,name=Attestations,json=attestations,proto3" json:"Attestations,omitempty" ssz-max:"128"` // @gotags: ssz-max:"128" + Deposits []*Deposit `protobuf:"bytes,7,rep,name=Deposits,json=deposits,proto3" json:"Deposits,omitempty" ssz-max:"16"` // @gotags: ssz-max:"16" + VoluntaryExits []*SignedVoluntaryExit `protobuf:"bytes,8,rep,name=VoluntaryExits,json=voluntary_exits,proto3" json:"VoluntaryExits,omitempty" ssz-max:"16"` // @gotags: ssz-max:"16" + SyncAggregate *SyncAggregate `protobuf:"bytes,9,opt,name=SyncAggregate,json=sync_aggregate,proto3" json:"SyncAggregate,omitempty"` + ExecutionPayload *ExecutionPayload `protobuf:"bytes,10,opt,name=ExecutionPayload,json=execution_payload,proto3" json:"ExecutionPayload,omitempty"` +} + +func (x *BeaconBodyBellatrix) Reset() { + *x = BeaconBodyBellatrix{} + if protoimpl.UnsafeEnabled { + mi := &file_beacon_block_proto_msgTypes[12] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *BeaconBodyBellatrix) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*BeaconBodyBellatrix) ProtoMessage() {} + +func (x *BeaconBodyBellatrix) ProtoReflect() protoreflect.Message { + mi := &file_beacon_block_proto_msgTypes[12] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use BeaconBodyBellatrix.ProtoReflect.Descriptor instead. +func (*BeaconBodyBellatrix) Descriptor() ([]byte, []int) { + return file_beacon_block_proto_rawDescGZIP(), []int{12} +} + +func (x *BeaconBodyBellatrix) GetRandaoReveal() []byte { + if x != nil { + return x.RandaoReveal + } + return nil +} + +func (x *BeaconBodyBellatrix) GetEth1Data() *Eth1Data { + if x != nil { + return x.Eth1Data + } + return nil +} + +func (x *BeaconBodyBellatrix) GetGraffiti() []byte { + if x != nil { + return x.Graffiti + } + return nil +} + +func (x *BeaconBodyBellatrix) GetProposerSlashings() []*Slashing { + if x != nil { + return x.ProposerSlashings + } + return nil +} + +func (x *BeaconBodyBellatrix) GetAttesterSlashings() []*Slashing { + if x != nil { + return x.AttesterSlashings + } + return nil +} + +func (x *BeaconBodyBellatrix) GetAttestations() []*Attestation { + if x != nil { + return x.Attestations + } + return nil +} + +func (x *BeaconBodyBellatrix) GetDeposits() []*Deposit { + if x != nil { + return x.Deposits + } + return nil +} + +func (x *BeaconBodyBellatrix) GetVoluntaryExits() []*SignedVoluntaryExit { + if x != nil { + return x.VoluntaryExits + } + return nil +} + +func (x *BeaconBodyBellatrix) GetSyncAggregate() *SyncAggregate { + if x != nil { + return x.SyncAggregate + } + return nil +} + +func (x *BeaconBodyBellatrix) GetExecutionPayload() *ExecutionPayload { + if x != nil { + return x.ExecutionPayload + } + return nil +} + +type BeaconBlockBellatrix struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Slot uint64 `protobuf:"varint,1,opt,name=Slot,json=slot,proto3" json:"Slot,omitempty"` + ProposerIndex uint64 `protobuf:"varint,2,opt,name=ProposerIndex,json=proposer_index,proto3" json:"ProposerIndex,omitempty"` + ParentRoot []byte `protobuf:"bytes,3,opt,name=ParentRoot,json=parent_root,proto3" json:"ParentRoot,omitempty" ssz-size:"32"` // @gotags: ssz-size:"32" + StateRoot []byte `protobuf:"bytes,4,opt,name=StateRoot,json=root,proto3" json:"StateRoot,omitempty" ssz-size:"32"` // @gotags: ssz-size:"32" + Body *BeaconBodyBellatrix `protobuf:"bytes,5,opt,name=Body,json=body,proto3" json:"Body,omitempty"` +} + +func (x *BeaconBlockBellatrix) Reset() { + *x = BeaconBlockBellatrix{} + if protoimpl.UnsafeEnabled { + mi := &file_beacon_block_proto_msgTypes[13] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *BeaconBlockBellatrix) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*BeaconBlockBellatrix) ProtoMessage() {} + +func (x *BeaconBlockBellatrix) ProtoReflect() protoreflect.Message { + mi := &file_beacon_block_proto_msgTypes[13] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use BeaconBlockBellatrix.ProtoReflect.Descriptor instead. +func (*BeaconBlockBellatrix) Descriptor() ([]byte, []int) { + return file_beacon_block_proto_rawDescGZIP(), []int{13} +} + +func (x *BeaconBlockBellatrix) GetSlot() uint64 { + if x != nil { + return x.Slot + } + return 0 +} + +func (x *BeaconBlockBellatrix) GetProposerIndex() uint64 { + if x != nil { + return x.ProposerIndex + } + return 0 +} + +func (x *BeaconBlockBellatrix) GetParentRoot() []byte { + if x != nil { + return x.ParentRoot + } + return nil +} + +func (x *BeaconBlockBellatrix) GetStateRoot() []byte { + if x != nil { + return x.StateRoot + } + return nil +} + +func (x *BeaconBlockBellatrix) GetBody() *BeaconBodyBellatrix { + if x != nil { + return x.Body + } + return nil +} + +type SignedBeaconBlockBellatrix struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Block *BeaconBlockBellatrix `protobuf:"bytes,1,opt,name=Block,json=message,proto3" json:"Block,omitempty"` + Signature []byte `protobuf:"bytes,2,opt,name=Signature,json=signature,proto3" json:"Signature,omitempty" ssz-size:"96"` // @gotags: ssz-size:"96" +} + +func (x *SignedBeaconBlockBellatrix) Reset() { + *x = SignedBeaconBlockBellatrix{} + if protoimpl.UnsafeEnabled { + mi := &file_beacon_block_proto_msgTypes[14] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *SignedBeaconBlockBellatrix) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*SignedBeaconBlockBellatrix) ProtoMessage() {} + +func (x *SignedBeaconBlockBellatrix) ProtoReflect() protoreflect.Message { + mi := &file_beacon_block_proto_msgTypes[14] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use SignedBeaconBlockBellatrix.ProtoReflect.Descriptor instead. +func (*SignedBeaconBlockBellatrix) Descriptor() ([]byte, []int) { + return file_beacon_block_proto_rawDescGZIP(), []int{14} +} + +func (x *SignedBeaconBlockBellatrix) GetBlock() *BeaconBlockBellatrix { + if x != nil { + return x.Block + } + return nil +} + +func (x *SignedBeaconBlockBellatrix) GetSignature() []byte { + if x != nil { + return x.Signature + } + return nil +} + +type LightClientBootstrap struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Header *BeaconBlockHeader `protobuf:"bytes,1,opt,name=Header,json=header,proto3" json:"Header,omitempty"` + CurrentSyncCommittee *SyncCommittee `protobuf:"bytes,2,opt,name=CurrentSyncCommittee,json=current_sync_committee,proto3" json:"CurrentSyncCommittee,omitempty"` + CurrentSyncCommitteeBranch [][]byte `protobuf:"bytes,3,rep,name=CurrentSyncCommitteeBranch,json=current_sync_committee_branch,proto3" json:"CurrentSyncCommitteeBranch,omitempty" ssz-size:"5,32"` // @gotags: ssz-size:"5,32" +} + +func (x *LightClientBootstrap) Reset() { + *x = LightClientBootstrap{} + if protoimpl.UnsafeEnabled { + mi := &file_beacon_block_proto_msgTypes[15] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *LightClientBootstrap) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*LightClientBootstrap) ProtoMessage() {} + +func (x *LightClientBootstrap) ProtoReflect() protoreflect.Message { + mi := &file_beacon_block_proto_msgTypes[15] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use LightClientBootstrap.ProtoReflect.Descriptor instead. +func (*LightClientBootstrap) Descriptor() ([]byte, []int) { + return file_beacon_block_proto_rawDescGZIP(), []int{15} +} + +func (x *LightClientBootstrap) GetHeader() *BeaconBlockHeader { + if x != nil { + return x.Header + } + return nil +} + +func (x *LightClientBootstrap) GetCurrentSyncCommittee() *SyncCommittee { + if x != nil { + return x.CurrentSyncCommittee + } + return nil +} + +func (x *LightClientBootstrap) GetCurrentSyncCommitteeBranch() [][]byte { + if x != nil { + return x.CurrentSyncCommitteeBranch + } + return nil +} + +type SyncCommittee struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + PubKeys [][]byte `protobuf:"bytes,1,rep,name=PubKeys,json=current_sync_committee,proto3" json:"PubKeys,omitempty" ssz-size:"512,48"` // @gotags: ssz-size:"512,48" +} + +func (x *SyncCommittee) Reset() { + *x = SyncCommittee{} + if protoimpl.UnsafeEnabled { + mi := &file_beacon_block_proto_msgTypes[16] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *SyncCommittee) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*SyncCommittee) ProtoMessage() {} + +func (x *SyncCommittee) ProtoReflect() protoreflect.Message { + mi := &file_beacon_block_proto_msgTypes[16] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use SyncCommittee.ProtoReflect.Descriptor instead. +func (*SyncCommittee) Descriptor() ([]byte, []int) { + return file_beacon_block_proto_rawDescGZIP(), []int{16} +} + +func (x *SyncCommittee) GetPubKeys() [][]byte { + if x != nil { + return x.PubKeys + } + return nil +} + +type LightClientUpdate struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + AttestedHeader *BeaconBlockHeader `protobuf:"bytes,1,opt,name=AttestedHeader,json=attested_header,proto3" json:"AttestedHeader,omitempty"` + NextSyncCommitee *SyncCommittee `protobuf:"bytes,2,opt,name=NextSyncCommitee,json=next_sync_committee,proto3" json:"NextSyncCommitee,omitempty"` + NextSyncCommitteeBranch [][]byte `protobuf:"bytes,3,rep,name=NextSyncCommitteeBranch,json=next_sync_committee_branch,proto3" json:"NextSyncCommitteeBranch,omitempty" ssz-size:"5,32"` // @gotags: ssz-size:"5,32" + FinalizedHeader *BeaconBlockHeader `protobuf:"bytes,4,opt,name=FinalizedHeader,json=finalized_header,proto3" json:"FinalizedHeader,omitempty"` + FinalityBranch [][]byte `protobuf:"bytes,5,rep,name=FinalityBranch,json=finality_branch,proto3" json:"FinalityBranch,omitempty" ssz-size:"6,32"` // @gotags: ssz-size:"6,32" + SyncAggregate *SyncAggregate `protobuf:"bytes,6,opt,name=SyncAggregate,json=sync_aggregate,proto3" json:"SyncAggregate,omitempty"` + SignatureSlot uint64 `protobuf:"varint,7,opt,name=SignatureSlot,json=signature_slot,proto3" json:"SignatureSlot,omitempty"` +} + +func (x *LightClientUpdate) Reset() { + *x = LightClientUpdate{} + if protoimpl.UnsafeEnabled { + mi := &file_beacon_block_proto_msgTypes[17] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *LightClientUpdate) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*LightClientUpdate) ProtoMessage() {} + +func (x *LightClientUpdate) ProtoReflect() protoreflect.Message { + mi := &file_beacon_block_proto_msgTypes[17] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use LightClientUpdate.ProtoReflect.Descriptor instead. +func (*LightClientUpdate) Descriptor() ([]byte, []int) { + return file_beacon_block_proto_rawDescGZIP(), []int{17} +} + +func (x *LightClientUpdate) GetAttestedHeader() *BeaconBlockHeader { + if x != nil { + return x.AttestedHeader + } + return nil +} + +func (x *LightClientUpdate) GetNextSyncCommitee() *SyncCommittee { + if x != nil { + return x.NextSyncCommitee + } + return nil +} + +func (x *LightClientUpdate) GetNextSyncCommitteeBranch() [][]byte { + if x != nil { + return x.NextSyncCommitteeBranch + } + return nil +} + +func (x *LightClientUpdate) GetFinalizedHeader() *BeaconBlockHeader { + if x != nil { + return x.FinalizedHeader + } + return nil +} + +func (x *LightClientUpdate) GetFinalityBranch() [][]byte { + if x != nil { + return x.FinalityBranch + } + return nil +} + +func (x *LightClientUpdate) GetSyncAggregate() *SyncAggregate { + if x != nil { + return x.SyncAggregate + } + return nil +} + +func (x *LightClientUpdate) GetSignatureSlot() uint64 { + if x != nil { + return x.SignatureSlot + } + return 0 +} + +type LightClientFinalityUpdate struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + AttestedHeader *BeaconBlockHeader `protobuf:"bytes,1,opt,name=AttestedHeader,json=attested_header,proto3" json:"AttestedHeader,omitempty"` + FinalizedHeader *BeaconBlockHeader `protobuf:"bytes,2,opt,name=FinalizedHeader,json=finalized_header,proto3" json:"FinalizedHeader,omitempty"` + FinalityBranch [][]byte `protobuf:"bytes,3,rep,name=FinalityBranch,json=finality_branch,proto3" json:"FinalityBranch,omitempty" ssz-size:"6,32"` // @gotags: ssz-size:"6,32" + SyncAggregate *SyncAggregate `protobuf:"bytes,4,opt,name=SyncAggregate,json=sync_aggregate,proto3" json:"SyncAggregate,omitempty"` + SignatureSlot uint64 `protobuf:"varint,5,opt,name=SignatureSlot,json=signature_slot,proto3" json:"SignatureSlot,omitempty"` +} + +func (x *LightClientFinalityUpdate) Reset() { + *x = LightClientFinalityUpdate{} + if protoimpl.UnsafeEnabled { + mi := &file_beacon_block_proto_msgTypes[18] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *LightClientFinalityUpdate) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*LightClientFinalityUpdate) ProtoMessage() {} + +func (x *LightClientFinalityUpdate) ProtoReflect() protoreflect.Message { + mi := &file_beacon_block_proto_msgTypes[18] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use LightClientFinalityUpdate.ProtoReflect.Descriptor instead. +func (*LightClientFinalityUpdate) Descriptor() ([]byte, []int) { + return file_beacon_block_proto_rawDescGZIP(), []int{18} +} + +func (x *LightClientFinalityUpdate) GetAttestedHeader() *BeaconBlockHeader { + if x != nil { + return x.AttestedHeader + } + return nil +} + +func (x *LightClientFinalityUpdate) GetFinalizedHeader() *BeaconBlockHeader { + if x != nil { + return x.FinalizedHeader + } + return nil +} + +func (x *LightClientFinalityUpdate) GetFinalityBranch() [][]byte { + if x != nil { + return x.FinalityBranch + } + return nil +} + +func (x *LightClientFinalityUpdate) GetSyncAggregate() *SyncAggregate { + if x != nil { + return x.SyncAggregate + } + return nil +} + +func (x *LightClientFinalityUpdate) GetSignatureSlot() uint64 { + if x != nil { + return x.SignatureSlot + } + return 0 +} + +type LightClientOptimisticUpdate struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + AttestedHeader *BeaconBlockHeader `protobuf:"bytes,1,opt,name=AttestedHeader,json=attested_header,proto3" json:"AttestedHeader,omitempty"` + SyncAggregate *SyncAggregate `protobuf:"bytes,2,opt,name=SyncAggregate,json=sync_aggregate,proto3" json:"SyncAggregate,omitempty"` + SignatureSlot uint64 `protobuf:"varint,3,opt,name=SignatureSlot,json=signature_slot,proto3" json:"SignatureSlot,omitempty"` +} + +func (x *LightClientOptimisticUpdate) Reset() { + *x = LightClientOptimisticUpdate{} + if protoimpl.UnsafeEnabled { + mi := &file_beacon_block_proto_msgTypes[19] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *LightClientOptimisticUpdate) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*LightClientOptimisticUpdate) ProtoMessage() {} + +func (x *LightClientOptimisticUpdate) ProtoReflect() protoreflect.Message { + mi := &file_beacon_block_proto_msgTypes[19] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use LightClientOptimisticUpdate.ProtoReflect.Descriptor instead. +func (*LightClientOptimisticUpdate) Descriptor() ([]byte, []int) { + return file_beacon_block_proto_rawDescGZIP(), []int{19} +} + +func (x *LightClientOptimisticUpdate) GetAttestedHeader() *BeaconBlockHeader { + if x != nil { + return x.AttestedHeader + } + return nil +} + +func (x *LightClientOptimisticUpdate) GetSyncAggregate() *SyncAggregate { + if x != nil { + return x.SyncAggregate + } + return nil +} + +func (x *LightClientOptimisticUpdate) GetSignatureSlot() uint64 { + if x != nil { + return x.SignatureSlot + } + return 0 +} + +var File_beacon_block_proto protoreflect.FileDescriptor + +var file_beacon_block_proto_rawDesc = []byte{ + 0x0a, 0x12, 0x62, 0x65, 0x61, 0x63, 0x6f, 0x6e, 0x5f, 0x62, 0x6c, 0x6f, 0x63, 0x6b, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x08, 0x6c, 0x69, 0x67, 0x68, 0x74, 0x72, 0x70, 0x63, 0x22, 0x6a, + 0x0a, 0x08, 0x45, 0x74, 0x68, 0x31, 0x44, 0x61, 0x74, 0x61, 0x12, 0x1a, 0x0a, 0x04, 0x52, 0x6f, + 0x6f, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x0c, 0x64, 0x65, 0x70, 0x6f, 0x73, 0x69, + 0x74, 0x5f, 0x72, 0x6f, 0x6f, 0x74, 0x12, 0x23, 0x0a, 0x0c, 0x44, 0x65, 0x70, 0x6f, 0x73, 0x69, + 0x74, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x04, 0x52, 0x0d, 0x64, 0x65, + 0x70, 0x6f, 0x73, 0x69, 0x74, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x1d, 0x0a, 0x09, 0x42, + 0x6c, 0x6f, 0x63, 0x6b, 0x48, 0x61, 0x73, 0x68, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x0a, + 0x62, 0x6c, 0x6f, 0x63, 0x6b, 0x5f, 0x68, 0x61, 0x73, 0x68, 0x22, 0xa0, 0x01, 0x0a, 0x11, 0x42, + 0x65, 0x61, 0x63, 0x6f, 0x6e, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, + 0x12, 0x12, 0x0a, 0x04, 0x53, 0x6c, 0x6f, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x04, 0x52, 0x04, + 0x73, 0x6c, 0x6f, 0x74, 0x12, 0x25, 0x0a, 0x0d, 0x50, 0x72, 0x6f, 0x70, 0x6f, 0x73, 0x65, 0x72, + 0x49, 0x6e, 0x64, 0x65, 0x78, 0x18, 0x02, 0x20, 0x01, 0x28, 0x04, 0x52, 0x0e, 0x70, 0x72, 0x6f, + 0x70, 0x6f, 0x73, 0x65, 0x72, 0x5f, 0x69, 0x6e, 0x64, 0x65, 0x78, 0x12, 0x1f, 0x0a, 0x0a, 0x50, + 0x61, 0x72, 0x65, 0x6e, 0x74, 0x52, 0x6f, 0x6f, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0c, 0x52, + 0x0b, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x5f, 0x72, 0x6f, 0x6f, 0x74, 0x12, 0x12, 0x0a, 0x04, + 0x52, 0x6f, 0x6f, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x04, 0x72, 0x6f, 0x6f, 0x74, + 0x12, 0x1b, 0x0a, 0x08, 0x42, 0x6f, 0x64, 0x79, 0x52, 0x6f, 0x6f, 0x74, 0x18, 0x05, 0x20, 0x01, + 0x28, 0x0c, 0x52, 0x09, 0x62, 0x6f, 0x64, 0x79, 0x5f, 0x72, 0x6f, 0x6f, 0x74, 0x22, 0x6d, 0x0a, + 0x17, 0x53, 0x69, 0x67, 0x6e, 0x65, 0x64, 0x42, 0x65, 0x61, 0x63, 0x6f, 0x6e, 0x42, 0x6c, 0x6f, + 0x63, 0x6b, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x12, 0x34, 0x0a, 0x06, 0x48, 0x65, 0x61, 0x64, + 0x65, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x6c, 0x69, 0x67, 0x68, 0x74, + 0x72, 0x70, 0x63, 0x2e, 0x42, 0x65, 0x61, 0x63, 0x6f, 0x6e, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x48, + 0x65, 0x61, 0x64, 0x65, 0x72, 0x52, 0x07, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x12, 0x1c, + 0x0a, 0x09, 0x53, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, + 0x0c, 0x52, 0x09, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x22, 0x94, 0x01, 0x0a, + 0x08, 0x53, 0x6c, 0x61, 0x73, 0x68, 0x69, 0x6e, 0x67, 0x12, 0x43, 0x0a, 0x07, 0x48, 0x65, 0x61, + 0x64, 0x65, 0x72, 0x31, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x6c, 0x69, 0x67, + 0x68, 0x74, 0x72, 0x70, 0x63, 0x2e, 0x53, 0x69, 0x67, 0x6e, 0x65, 0x64, 0x42, 0x65, 0x61, 0x63, + 0x6f, 0x6e, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x52, 0x0f, 0x73, + 0x69, 0x67, 0x6e, 0x65, 0x64, 0x5f, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x5f, 0x31, 0x12, 0x43, + 0x0a, 0x07, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x32, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x21, 0x2e, 0x6c, 0x69, 0x67, 0x68, 0x74, 0x72, 0x70, 0x63, 0x2e, 0x53, 0x69, 0x67, 0x6e, 0x65, + 0x64, 0x42, 0x65, 0x61, 0x63, 0x6f, 0x6e, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x48, 0x65, 0x61, 0x64, + 0x65, 0x72, 0x52, 0x0f, 0x73, 0x69, 0x67, 0x6e, 0x65, 0x64, 0x5f, 0x68, 0x65, 0x61, 0x64, 0x65, + 0x72, 0x5f, 0x32, 0x22, 0x67, 0x0a, 0x0f, 0x41, 0x74, 0x74, 0x65, 0x73, 0x74, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x44, 0x61, 0x74, 0x61, 0x12, 0x12, 0x0a, 0x04, 0x53, 0x6c, 0x6f, 0x74, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x04, 0x52, 0x04, 0x73, 0x6c, 0x6f, 0x74, 0x12, 0x14, 0x0a, 0x05, 0x49, 0x6e, + 0x64, 0x65, 0x78, 0x18, 0x02, 0x20, 0x01, 0x28, 0x04, 0x52, 0x05, 0x69, 0x6e, 0x64, 0x65, 0x78, + 0x12, 0x2a, 0x0a, 0x0f, 0x42, 0x65, 0x61, 0x63, 0x6f, 0x6e, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x48, + 0x61, 0x73, 0x68, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x11, 0x62, 0x65, 0x61, 0x63, 0x6f, + 0x6e, 0x5f, 0x62, 0x6c, 0x6f, 0x63, 0x6b, 0x5f, 0x68, 0x61, 0x73, 0x68, 0x22, 0x85, 0x01, 0x0a, + 0x0b, 0x41, 0x74, 0x74, 0x65, 0x73, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x29, 0x0a, 0x0f, + 0x41, 0x67, 0x67, 0x72, 0x65, 0x67, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x69, 0x74, 0x73, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x10, 0x61, 0x67, 0x67, 0x72, 0x65, 0x67, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x5f, 0x62, 0x69, 0x74, 0x73, 0x12, 0x2d, 0x0a, 0x04, 0x44, 0x61, 0x74, 0x61, 0x18, + 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x6c, 0x69, 0x67, 0x68, 0x74, 0x72, 0x70, 0x63, + 0x2e, 0x41, 0x74, 0x74, 0x65, 0x73, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x44, 0x61, 0x74, 0x61, + 0x52, 0x04, 0x64, 0x61, 0x74, 0x61, 0x12, 0x1c, 0x0a, 0x09, 0x53, 0x69, 0x67, 0x6e, 0x61, 0x74, + 0x75, 0x72, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x09, 0x73, 0x69, 0x67, 0x6e, 0x61, + 0x74, 0x75, 0x72, 0x65, 0x22, 0xa6, 0x01, 0x0a, 0x0b, 0x44, 0x65, 0x70, 0x6f, 0x73, 0x69, 0x74, + 0x44, 0x61, 0x74, 0x61, 0x12, 0x16, 0x0a, 0x06, 0x50, 0x75, 0x62, 0x4b, 0x65, 0x79, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x0c, 0x52, 0x06, 0x70, 0x75, 0x62, 0x6b, 0x65, 0x79, 0x12, 0x35, 0x0a, 0x15, + 0x57, 0x69, 0x74, 0x68, 0x64, 0x72, 0x61, 0x77, 0x61, 0x6c, 0x43, 0x72, 0x65, 0x64, 0x65, 0x6e, + 0x74, 0x69, 0x61, 0x6c, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x16, 0x77, 0x69, 0x74, + 0x68, 0x64, 0x72, 0x61, 0x77, 0x61, 0x6c, 0x5f, 0x63, 0x72, 0x65, 0x64, 0x65, 0x6e, 0x74, 0x69, + 0x61, 0x6c, 0x73, 0x12, 0x16, 0x0a, 0x06, 0x41, 0x6d, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x03, 0x20, + 0x01, 0x28, 0x04, 0x52, 0x06, 0x61, 0x6d, 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x1c, 0x0a, 0x09, 0x53, + 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x09, + 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x52, 0x6f, 0x6f, + 0x74, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x04, 0x52, 0x6f, 0x6f, 0x74, 0x22, 0x4a, 0x0a, + 0x07, 0x44, 0x65, 0x70, 0x6f, 0x73, 0x69, 0x74, 0x12, 0x14, 0x0a, 0x05, 0x50, 0x72, 0x6f, 0x6f, + 0x66, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0c, 0x52, 0x05, 0x70, 0x72, 0x6f, 0x6f, 0x66, 0x12, 0x29, + 0x0a, 0x04, 0x44, 0x61, 0x74, 0x61, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x6c, + 0x69, 0x67, 0x68, 0x74, 0x72, 0x70, 0x63, 0x2e, 0x44, 0x65, 0x70, 0x6f, 0x73, 0x69, 0x74, 0x44, + 0x61, 0x74, 0x61, 0x52, 0x04, 0x64, 0x61, 0x74, 0x61, 0x22, 0x6b, 0x0a, 0x13, 0x53, 0x69, 0x67, + 0x6e, 0x65, 0x64, 0x56, 0x6f, 0x6c, 0x75, 0x6e, 0x74, 0x61, 0x72, 0x79, 0x45, 0x78, 0x69, 0x74, + 0x12, 0x36, 0x0a, 0x0c, 0x56, 0x6f, 0x6c, 0x75, 0x6e, 0x61, 0x72, 0x79, 0x45, 0x78, 0x69, 0x74, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x6c, 0x69, 0x67, 0x68, 0x74, 0x72, 0x70, + 0x63, 0x2e, 0x56, 0x6f, 0x6c, 0x75, 0x6e, 0x74, 0x61, 0x72, 0x79, 0x45, 0x78, 0x69, 0x74, 0x52, + 0x07, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x12, 0x1c, 0x0a, 0x09, 0x53, 0x69, 0x67, 0x6e, + 0x61, 0x74, 0x75, 0x72, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x09, 0x73, 0x69, 0x67, + 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x22, 0x4e, 0x0a, 0x0d, 0x56, 0x6f, 0x6c, 0x75, 0x6e, 0x74, + 0x61, 0x72, 0x79, 0x45, 0x78, 0x69, 0x74, 0x12, 0x14, 0x0a, 0x05, 0x45, 0x70, 0x6f, 0x63, 0x68, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x04, 0x52, 0x05, 0x65, 0x70, 0x6f, 0x63, 0x68, 0x12, 0x27, 0x0a, + 0x0e, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x6f, 0x72, 0x49, 0x6e, 0x64, 0x65, 0x78, 0x18, + 0x02, 0x20, 0x01, 0x28, 0x04, 0x52, 0x0f, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x6f, 0x72, + 0x5f, 0x69, 0x6e, 0x64, 0x65, 0x78, 0x22, 0x77, 0x0a, 0x0d, 0x53, 0x79, 0x6e, 0x63, 0x41, 0x67, + 0x67, 0x72, 0x65, 0x67, 0x61, 0x74, 0x65, 0x12, 0x2d, 0x0a, 0x10, 0x53, 0x79, 0x6e, 0x63, 0x43, + 0x6f, 0x6d, 0x6d, 0x69, 0x74, 0x65, 0x65, 0x42, 0x69, 0x74, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x0c, 0x52, 0x13, 0x73, 0x79, 0x6e, 0x63, 0x5f, 0x63, 0x6f, 0x6d, 0x6d, 0x69, 0x74, 0x74, 0x65, + 0x65, 0x5f, 0x62, 0x69, 0x74, 0x73, 0x12, 0x37, 0x0a, 0x15, 0x53, 0x79, 0x6e, 0x63, 0x43, 0x6f, + 0x6d, 0x6d, 0x69, 0x74, 0x65, 0x65, 0x53, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x18, + 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x18, 0x73, 0x79, 0x6e, 0x63, 0x5f, 0x63, 0x6f, 0x6d, 0x6d, + 0x69, 0x74, 0x74, 0x65, 0x65, 0x5f, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x22, + 0xe0, 0x03, 0x0a, 0x10, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x61, 0x79, + 0x6c, 0x6f, 0x61, 0x64, 0x12, 0x1f, 0x0a, 0x0a, 0x50, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x48, 0x61, + 0x73, 0x68, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x0b, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, + 0x5f, 0x68, 0x61, 0x73, 0x68, 0x12, 0x23, 0x0a, 0x0c, 0x46, 0x65, 0x65, 0x52, 0x65, 0x63, 0x69, + 0x70, 0x69, 0x65, 0x6e, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x0d, 0x66, 0x65, 0x65, + 0x5f, 0x72, 0x65, 0x63, 0x69, 0x70, 0x69, 0x65, 0x6e, 0x74, 0x12, 0x1d, 0x0a, 0x09, 0x53, 0x74, + 0x61, 0x74, 0x65, 0x52, 0x6f, 0x6f, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x0a, 0x73, + 0x74, 0x61, 0x74, 0x65, 0x5f, 0x72, 0x6f, 0x6f, 0x74, 0x12, 0x23, 0x0a, 0x0c, 0x52, 0x65, 0x63, + 0x65, 0x69, 0x70, 0x74, 0x73, 0x52, 0x6f, 0x6f, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0c, 0x52, + 0x0d, 0x72, 0x65, 0x63, 0x65, 0x69, 0x70, 0x74, 0x73, 0x5f, 0x72, 0x6f, 0x6f, 0x74, 0x12, 0x1d, + 0x0a, 0x09, 0x4c, 0x6f, 0x67, 0x73, 0x42, 0x6c, 0x6f, 0x6f, 0x6d, 0x18, 0x05, 0x20, 0x01, 0x28, + 0x0c, 0x52, 0x0a, 0x6c, 0x6f, 0x67, 0x73, 0x5f, 0x62, 0x6c, 0x6f, 0x6f, 0x6d, 0x12, 0x1f, 0x0a, + 0x0a, 0x50, 0x72, 0x65, 0x76, 0x52, 0x61, 0x6e, 0x64, 0x61, 0x6f, 0x18, 0x06, 0x20, 0x01, 0x28, + 0x0c, 0x52, 0x0b, 0x70, 0x72, 0x65, 0x76, 0x5f, 0x72, 0x61, 0x6e, 0x64, 0x61, 0x6f, 0x12, 0x21, + 0x0a, 0x0b, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x4e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x18, 0x07, 0x20, + 0x01, 0x28, 0x04, 0x52, 0x0c, 0x62, 0x6c, 0x6f, 0x63, 0x6b, 0x5f, 0x6e, 0x75, 0x6d, 0x62, 0x65, + 0x72, 0x12, 0x1b, 0x0a, 0x08, 0x47, 0x61, 0x73, 0x4c, 0x69, 0x6d, 0x69, 0x74, 0x18, 0x08, 0x20, + 0x01, 0x28, 0x04, 0x52, 0x09, 0x67, 0x61, 0x73, 0x5f, 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x12, 0x19, + 0x0a, 0x07, 0x47, 0x61, 0x73, 0x55, 0x73, 0x65, 0x64, 0x18, 0x09, 0x20, 0x01, 0x28, 0x04, 0x52, + 0x08, 0x67, 0x61, 0x73, 0x5f, 0x75, 0x73, 0x65, 0x64, 0x12, 0x1c, 0x0a, 0x09, 0x54, 0x69, 0x6d, + 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x04, 0x52, 0x09, 0x74, 0x69, + 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x12, 0x1d, 0x0a, 0x09, 0x45, 0x78, 0x74, 0x72, 0x61, + 0x44, 0x61, 0x74, 0x61, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x0a, 0x65, 0x78, 0x74, 0x72, + 0x61, 0x5f, 0x64, 0x61, 0x74, 0x61, 0x12, 0x27, 0x0a, 0x0d, 0x42, 0x61, 0x73, 0x65, 0x46, 0x65, + 0x65, 0x50, 0x65, 0x72, 0x47, 0x61, 0x73, 0x18, 0x0c, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x10, 0x62, + 0x61, 0x73, 0x65, 0x5f, 0x66, 0x65, 0x65, 0x5f, 0x70, 0x65, 0x72, 0x5f, 0x67, 0x61, 0x73, 0x12, + 0x1d, 0x0a, 0x09, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x48, 0x61, 0x73, 0x68, 0x18, 0x0d, 0x20, 0x01, + 0x28, 0x0c, 0x52, 0x0a, 0x62, 0x6c, 0x6f, 0x63, 0x6b, 0x5f, 0x68, 0x61, 0x73, 0x68, 0x12, 0x22, + 0x0a, 0x0c, 0x54, 0x72, 0x61, 0x6e, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x0e, + 0x20, 0x03, 0x28, 0x0c, 0x52, 0x0c, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6f, + 0x6e, 0x73, 0x22, 0xc8, 0x04, 0x0a, 0x13, 0x42, 0x65, 0x61, 0x63, 0x6f, 0x6e, 0x42, 0x6f, 0x64, + 0x79, 0x42, 0x65, 0x6c, 0x6c, 0x61, 0x74, 0x72, 0x69, 0x78, 0x12, 0x23, 0x0a, 0x0c, 0x52, 0x61, + 0x6e, 0x64, 0x61, 0x6f, 0x52, 0x65, 0x76, 0x65, 0x61, 0x6c, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, + 0x52, 0x0d, 0x72, 0x61, 0x6e, 0x64, 0x61, 0x6f, 0x5f, 0x72, 0x65, 0x76, 0x65, 0x61, 0x6c, 0x12, + 0x2f, 0x0a, 0x08, 0x45, 0x74, 0x68, 0x31, 0x44, 0x61, 0x74, 0x61, 0x18, 0x02, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x12, 0x2e, 0x6c, 0x69, 0x67, 0x68, 0x74, 0x72, 0x70, 0x63, 0x2e, 0x45, 0x74, 0x68, + 0x31, 0x44, 0x61, 0x74, 0x61, 0x52, 0x09, 0x65, 0x74, 0x68, 0x31, 0x5f, 0x64, 0x61, 0x74, 0x61, + 0x12, 0x1a, 0x0a, 0x08, 0x47, 0x72, 0x61, 0x66, 0x66, 0x69, 0x74, 0x69, 0x18, 0x03, 0x20, 0x01, + 0x28, 0x0c, 0x52, 0x08, 0x67, 0x72, 0x61, 0x66, 0x66, 0x69, 0x74, 0x69, 0x12, 0x41, 0x0a, 0x11, + 0x50, 0x72, 0x6f, 0x70, 0x6f, 0x73, 0x65, 0x72, 0x53, 0x6c, 0x61, 0x73, 0x68, 0x69, 0x6e, 0x67, + 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x6c, 0x69, 0x67, 0x68, 0x74, 0x72, + 0x70, 0x63, 0x2e, 0x53, 0x6c, 0x61, 0x73, 0x68, 0x69, 0x6e, 0x67, 0x52, 0x12, 0x70, 0x72, 0x6f, + 0x70, 0x6f, 0x73, 0x65, 0x72, 0x5f, 0x73, 0x6c, 0x61, 0x73, 0x68, 0x69, 0x6e, 0x67, 0x73, 0x12, + 0x41, 0x0a, 0x11, 0x41, 0x74, 0x74, 0x65, 0x73, 0x74, 0x65, 0x72, 0x53, 0x6c, 0x61, 0x73, 0x68, + 0x69, 0x6e, 0x67, 0x73, 0x18, 0x05, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x6c, 0x69, 0x67, + 0x68, 0x74, 0x72, 0x70, 0x63, 0x2e, 0x53, 0x6c, 0x61, 0x73, 0x68, 0x69, 0x6e, 0x67, 0x52, 0x12, + 0x61, 0x74, 0x74, 0x65, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x73, 0x6c, 0x61, 0x73, 0x68, 0x69, 0x6e, + 0x67, 0x73, 0x12, 0x39, 0x0a, 0x0c, 0x41, 0x74, 0x74, 0x65, 0x73, 0x74, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x73, 0x18, 0x06, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x6c, 0x69, 0x67, 0x68, 0x74, + 0x72, 0x70, 0x63, 0x2e, 0x41, 0x74, 0x74, 0x65, 0x73, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, + 0x0c, 0x61, 0x74, 0x74, 0x65, 0x73, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x2d, 0x0a, + 0x08, 0x44, 0x65, 0x70, 0x6f, 0x73, 0x69, 0x74, 0x73, 0x18, 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, + 0x11, 0x2e, 0x6c, 0x69, 0x67, 0x68, 0x74, 0x72, 0x70, 0x63, 0x2e, 0x44, 0x65, 0x70, 0x6f, 0x73, + 0x69, 0x74, 0x52, 0x08, 0x64, 0x65, 0x70, 0x6f, 0x73, 0x69, 0x74, 0x73, 0x12, 0x46, 0x0a, 0x0e, + 0x56, 0x6f, 0x6c, 0x75, 0x6e, 0x74, 0x61, 0x72, 0x79, 0x45, 0x78, 0x69, 0x74, 0x73, 0x18, 0x08, + 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x6c, 0x69, 0x67, 0x68, 0x74, 0x72, 0x70, 0x63, 0x2e, + 0x53, 0x69, 0x67, 0x6e, 0x65, 0x64, 0x56, 0x6f, 0x6c, 0x75, 0x6e, 0x74, 0x61, 0x72, 0x79, 0x45, + 0x78, 0x69, 0x74, 0x52, 0x0f, 0x76, 0x6f, 0x6c, 0x75, 0x6e, 0x74, 0x61, 0x72, 0x79, 0x5f, 0x65, + 0x78, 0x69, 0x74, 0x73, 0x12, 0x3e, 0x0a, 0x0d, 0x53, 0x79, 0x6e, 0x63, 0x41, 0x67, 0x67, 0x72, + 0x65, 0x67, 0x61, 0x74, 0x65, 0x18, 0x09, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x6c, 0x69, + 0x67, 0x68, 0x74, 0x72, 0x70, 0x63, 0x2e, 0x53, 0x79, 0x6e, 0x63, 0x41, 0x67, 0x67, 0x72, 0x65, + 0x67, 0x61, 0x74, 0x65, 0x52, 0x0e, 0x73, 0x79, 0x6e, 0x63, 0x5f, 0x61, 0x67, 0x67, 0x72, 0x65, + 0x67, 0x61, 0x74, 0x65, 0x12, 0x47, 0x0a, 0x10, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, + 0x6e, 0x50, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, + 0x2e, 0x6c, 0x69, 0x67, 0x68, 0x74, 0x72, 0x70, 0x63, 0x2e, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, + 0x69, 0x6f, 0x6e, 0x50, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x52, 0x11, 0x65, 0x78, 0x65, 0x63, + 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x70, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x22, 0xbe, 0x01, + 0x0a, 0x14, 0x42, 0x65, 0x61, 0x63, 0x6f, 0x6e, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x42, 0x65, 0x6c, + 0x6c, 0x61, 0x74, 0x72, 0x69, 0x78, 0x12, 0x12, 0x0a, 0x04, 0x53, 0x6c, 0x6f, 0x74, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x04, 0x52, 0x04, 0x73, 0x6c, 0x6f, 0x74, 0x12, 0x25, 0x0a, 0x0d, 0x50, 0x72, + 0x6f, 0x70, 0x6f, 0x73, 0x65, 0x72, 0x49, 0x6e, 0x64, 0x65, 0x78, 0x18, 0x02, 0x20, 0x01, 0x28, + 0x04, 0x52, 0x0e, 0x70, 0x72, 0x6f, 0x70, 0x6f, 0x73, 0x65, 0x72, 0x5f, 0x69, 0x6e, 0x64, 0x65, + 0x78, 0x12, 0x1f, 0x0a, 0x0a, 0x50, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x52, 0x6f, 0x6f, 0x74, 0x18, + 0x03, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x0b, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x5f, 0x72, 0x6f, + 0x6f, 0x74, 0x12, 0x17, 0x0a, 0x09, 0x53, 0x74, 0x61, 0x74, 0x65, 0x52, 0x6f, 0x6f, 0x74, 0x18, + 0x04, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x04, 0x72, 0x6f, 0x6f, 0x74, 0x12, 0x31, 0x0a, 0x04, 0x42, + 0x6f, 0x64, 0x79, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x6c, 0x69, 0x67, 0x68, + 0x74, 0x72, 0x70, 0x63, 0x2e, 0x42, 0x65, 0x61, 0x63, 0x6f, 0x6e, 0x42, 0x6f, 0x64, 0x79, 0x42, + 0x65, 0x6c, 0x6c, 0x61, 0x74, 0x72, 0x69, 0x78, 0x52, 0x04, 0x62, 0x6f, 0x64, 0x79, 0x22, 0x72, + 0x0a, 0x1a, 0x53, 0x69, 0x67, 0x6e, 0x65, 0x64, 0x42, 0x65, 0x61, 0x63, 0x6f, 0x6e, 0x42, 0x6c, + 0x6f, 0x63, 0x6b, 0x42, 0x65, 0x6c, 0x6c, 0x61, 0x74, 0x72, 0x69, 0x78, 0x12, 0x36, 0x0a, 0x05, + 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x6c, 0x69, + 0x67, 0x68, 0x74, 0x72, 0x70, 0x63, 0x2e, 0x42, 0x65, 0x61, 0x63, 0x6f, 0x6e, 0x42, 0x6c, 0x6f, + 0x63, 0x6b, 0x42, 0x65, 0x6c, 0x6c, 0x61, 0x74, 0x72, 0x69, 0x78, 0x52, 0x07, 0x6d, 0x65, 0x73, + 0x73, 0x61, 0x67, 0x65, 0x12, 0x1c, 0x0a, 0x09, 0x53, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, + 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x09, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, + 0x72, 0x65, 0x22, 0xdd, 0x01, 0x0a, 0x14, 0x4c, 0x69, 0x67, 0x68, 0x74, 0x43, 0x6c, 0x69, 0x65, + 0x6e, 0x74, 0x42, 0x6f, 0x6f, 0x74, 0x73, 0x74, 0x72, 0x61, 0x70, 0x12, 0x33, 0x0a, 0x06, 0x48, + 0x65, 0x61, 0x64, 0x65, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x6c, 0x69, + 0x67, 0x68, 0x74, 0x72, 0x70, 0x63, 0x2e, 0x42, 0x65, 0x61, 0x63, 0x6f, 0x6e, 0x42, 0x6c, 0x6f, + 0x63, 0x6b, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x52, 0x06, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, + 0x12, 0x4d, 0x0a, 0x14, 0x43, 0x75, 0x72, 0x72, 0x65, 0x6e, 0x74, 0x53, 0x79, 0x6e, 0x63, 0x43, + 0x6f, 0x6d, 0x6d, 0x69, 0x74, 0x74, 0x65, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, + 0x2e, 0x6c, 0x69, 0x67, 0x68, 0x74, 0x72, 0x70, 0x63, 0x2e, 0x53, 0x79, 0x6e, 0x63, 0x43, 0x6f, + 0x6d, 0x6d, 0x69, 0x74, 0x74, 0x65, 0x65, 0x52, 0x16, 0x63, 0x75, 0x72, 0x72, 0x65, 0x6e, 0x74, + 0x5f, 0x73, 0x79, 0x6e, 0x63, 0x5f, 0x63, 0x6f, 0x6d, 0x6d, 0x69, 0x74, 0x74, 0x65, 0x65, 0x12, + 0x41, 0x0a, 0x1a, 0x43, 0x75, 0x72, 0x72, 0x65, 0x6e, 0x74, 0x53, 0x79, 0x6e, 0x63, 0x43, 0x6f, + 0x6d, 0x6d, 0x69, 0x74, 0x74, 0x65, 0x65, 0x42, 0x72, 0x61, 0x6e, 0x63, 0x68, 0x18, 0x03, 0x20, + 0x03, 0x28, 0x0c, 0x52, 0x1d, 0x63, 0x75, 0x72, 0x72, 0x65, 0x6e, 0x74, 0x5f, 0x73, 0x79, 0x6e, + 0x63, 0x5f, 0x63, 0x6f, 0x6d, 0x6d, 0x69, 0x74, 0x74, 0x65, 0x65, 0x5f, 0x62, 0x72, 0x61, 0x6e, + 0x63, 0x68, 0x22, 0x38, 0x0a, 0x0d, 0x53, 0x79, 0x6e, 0x63, 0x43, 0x6f, 0x6d, 0x6d, 0x69, 0x74, + 0x74, 0x65, 0x65, 0x12, 0x27, 0x0a, 0x07, 0x50, 0x75, 0x62, 0x4b, 0x65, 0x79, 0x73, 0x18, 0x01, + 0x20, 0x03, 0x28, 0x0c, 0x52, 0x16, 0x63, 0x75, 0x72, 0x72, 0x65, 0x6e, 0x74, 0x5f, 0x73, 0x79, + 0x6e, 0x63, 0x5f, 0x63, 0x6f, 0x6d, 0x6d, 0x69, 0x74, 0x74, 0x65, 0x65, 0x22, 0xb6, 0x03, 0x0a, + 0x11, 0x4c, 0x69, 0x67, 0x68, 0x74, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x55, 0x70, 0x64, 0x61, + 0x74, 0x65, 0x12, 0x44, 0x0a, 0x0e, 0x41, 0x74, 0x74, 0x65, 0x73, 0x74, 0x65, 0x64, 0x48, 0x65, + 0x61, 0x64, 0x65, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x6c, 0x69, 0x67, + 0x68, 0x74, 0x72, 0x70, 0x63, 0x2e, 0x42, 0x65, 0x61, 0x63, 0x6f, 0x6e, 0x42, 0x6c, 0x6f, 0x63, + 0x6b, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x52, 0x0f, 0x61, 0x74, 0x74, 0x65, 0x73, 0x74, 0x65, + 0x64, 0x5f, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x12, 0x46, 0x0a, 0x10, 0x4e, 0x65, 0x78, 0x74, + 0x53, 0x79, 0x6e, 0x63, 0x43, 0x6f, 0x6d, 0x6d, 0x69, 0x74, 0x65, 0x65, 0x18, 0x02, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x6c, 0x69, 0x67, 0x68, 0x74, 0x72, 0x70, 0x63, 0x2e, 0x53, 0x79, + 0x6e, 0x63, 0x43, 0x6f, 0x6d, 0x6d, 0x69, 0x74, 0x74, 0x65, 0x65, 0x52, 0x13, 0x6e, 0x65, 0x78, + 0x74, 0x5f, 0x73, 0x79, 0x6e, 0x63, 0x5f, 0x63, 0x6f, 0x6d, 0x6d, 0x69, 0x74, 0x74, 0x65, 0x65, + 0x12, 0x3b, 0x0a, 0x17, 0x4e, 0x65, 0x78, 0x74, 0x53, 0x79, 0x6e, 0x63, 0x43, 0x6f, 0x6d, 0x6d, + 0x69, 0x74, 0x74, 0x65, 0x65, 0x42, 0x72, 0x61, 0x6e, 0x63, 0x68, 0x18, 0x03, 0x20, 0x03, 0x28, + 0x0c, 0x52, 0x1a, 0x6e, 0x65, 0x78, 0x74, 0x5f, 0x73, 0x79, 0x6e, 0x63, 0x5f, 0x63, 0x6f, 0x6d, + 0x6d, 0x69, 0x74, 0x74, 0x65, 0x65, 0x5f, 0x62, 0x72, 0x61, 0x6e, 0x63, 0x68, 0x12, 0x46, 0x0a, + 0x0f, 0x46, 0x69, 0x6e, 0x61, 0x6c, 0x69, 0x7a, 0x65, 0x64, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, + 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x6c, 0x69, 0x67, 0x68, 0x74, 0x72, 0x70, + 0x63, 0x2e, 0x42, 0x65, 0x61, 0x63, 0x6f, 0x6e, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x48, 0x65, 0x61, + 0x64, 0x65, 0x72, 0x52, 0x10, 0x66, 0x69, 0x6e, 0x61, 0x6c, 0x69, 0x7a, 0x65, 0x64, 0x5f, 0x68, + 0x65, 0x61, 0x64, 0x65, 0x72, 0x12, 0x27, 0x0a, 0x0e, 0x46, 0x69, 0x6e, 0x61, 0x6c, 0x69, 0x74, + 0x79, 0x42, 0x72, 0x61, 0x6e, 0x63, 0x68, 0x18, 0x05, 0x20, 0x03, 0x28, 0x0c, 0x52, 0x0f, 0x66, + 0x69, 0x6e, 0x61, 0x6c, 0x69, 0x74, 0x79, 0x5f, 0x62, 0x72, 0x61, 0x6e, 0x63, 0x68, 0x12, 0x3e, + 0x0a, 0x0d, 0x53, 0x79, 0x6e, 0x63, 0x41, 0x67, 0x67, 0x72, 0x65, 0x67, 0x61, 0x74, 0x65, 0x18, + 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x6c, 0x69, 0x67, 0x68, 0x74, 0x72, 0x70, 0x63, + 0x2e, 0x53, 0x79, 0x6e, 0x63, 0x41, 0x67, 0x67, 0x72, 0x65, 0x67, 0x61, 0x74, 0x65, 0x52, 0x0e, + 0x73, 0x79, 0x6e, 0x63, 0x5f, 0x61, 0x67, 0x67, 0x72, 0x65, 0x67, 0x61, 0x74, 0x65, 0x12, 0x25, + 0x0a, 0x0d, 0x53, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x6c, 0x6f, 0x74, 0x18, + 0x07, 0x20, 0x01, 0x28, 0x04, 0x52, 0x0e, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, + 0x5f, 0x73, 0x6c, 0x6f, 0x74, 0x22, 0xb9, 0x02, 0x0a, 0x19, 0x4c, 0x69, 0x67, 0x68, 0x74, 0x43, + 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x46, 0x69, 0x6e, 0x61, 0x6c, 0x69, 0x74, 0x79, 0x55, 0x70, 0x64, + 0x61, 0x74, 0x65, 0x12, 0x44, 0x0a, 0x0e, 0x41, 0x74, 0x74, 0x65, 0x73, 0x74, 0x65, 0x64, 0x48, + 0x65, 0x61, 0x64, 0x65, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x6c, 0x69, + 0x67, 0x68, 0x74, 0x72, 0x70, 0x63, 0x2e, 0x42, 0x65, 0x61, 0x63, 0x6f, 0x6e, 0x42, 0x6c, 0x6f, + 0x63, 0x6b, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x52, 0x0f, 0x61, 0x74, 0x74, 0x65, 0x73, 0x74, + 0x65, 0x64, 0x5f, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x12, 0x46, 0x0a, 0x0f, 0x46, 0x69, 0x6e, + 0x61, 0x6c, 0x69, 0x7a, 0x65, 0x64, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x18, 0x02, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x6c, 0x69, 0x67, 0x68, 0x74, 0x72, 0x70, 0x63, 0x2e, 0x42, 0x65, + 0x61, 0x63, 0x6f, 0x6e, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x52, + 0x10, 0x66, 0x69, 0x6e, 0x61, 0x6c, 0x69, 0x7a, 0x65, 0x64, 0x5f, 0x68, 0x65, 0x61, 0x64, 0x65, + 0x72, 0x12, 0x27, 0x0a, 0x0e, 0x46, 0x69, 0x6e, 0x61, 0x6c, 0x69, 0x74, 0x79, 0x42, 0x72, 0x61, + 0x6e, 0x63, 0x68, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0c, 0x52, 0x0f, 0x66, 0x69, 0x6e, 0x61, 0x6c, + 0x69, 0x74, 0x79, 0x5f, 0x62, 0x72, 0x61, 0x6e, 0x63, 0x68, 0x12, 0x3e, 0x0a, 0x0d, 0x53, 0x79, + 0x6e, 0x63, 0x41, 0x67, 0x67, 0x72, 0x65, 0x67, 0x61, 0x74, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x17, 0x2e, 0x6c, 0x69, 0x67, 0x68, 0x74, 0x72, 0x70, 0x63, 0x2e, 0x53, 0x79, 0x6e, + 0x63, 0x41, 0x67, 0x67, 0x72, 0x65, 0x67, 0x61, 0x74, 0x65, 0x52, 0x0e, 0x73, 0x79, 0x6e, 0x63, + 0x5f, 0x61, 0x67, 0x67, 0x72, 0x65, 0x67, 0x61, 0x74, 0x65, 0x12, 0x25, 0x0a, 0x0d, 0x53, 0x69, + 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x6c, 0x6f, 0x74, 0x18, 0x05, 0x20, 0x01, 0x28, + 0x04, 0x52, 0x0e, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x5f, 0x73, 0x6c, 0x6f, + 0x74, 0x22, 0xca, 0x01, 0x0a, 0x1b, 0x4c, 0x69, 0x67, 0x68, 0x74, 0x43, 0x6c, 0x69, 0x65, 0x6e, + 0x74, 0x4f, 0x70, 0x74, 0x69, 0x6d, 0x69, 0x73, 0x74, 0x69, 0x63, 0x55, 0x70, 0x64, 0x61, 0x74, + 0x65, 0x12, 0x44, 0x0a, 0x0e, 0x41, 0x74, 0x74, 0x65, 0x73, 0x74, 0x65, 0x64, 0x48, 0x65, 0x61, + 0x64, 0x65, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x6c, 0x69, 0x67, 0x68, + 0x74, 0x72, 0x70, 0x63, 0x2e, 0x42, 0x65, 0x61, 0x63, 0x6f, 0x6e, 0x42, 0x6c, 0x6f, 0x63, 0x6b, + 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x52, 0x0f, 0x61, 0x74, 0x74, 0x65, 0x73, 0x74, 0x65, 0x64, + 0x5f, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x12, 0x3e, 0x0a, 0x0d, 0x53, 0x79, 0x6e, 0x63, 0x41, + 0x67, 0x67, 0x72, 0x65, 0x67, 0x61, 0x74, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, + 0x2e, 0x6c, 0x69, 0x67, 0x68, 0x74, 0x72, 0x70, 0x63, 0x2e, 0x53, 0x79, 0x6e, 0x63, 0x41, 0x67, + 0x67, 0x72, 0x65, 0x67, 0x61, 0x74, 0x65, 0x52, 0x0e, 0x73, 0x79, 0x6e, 0x63, 0x5f, 0x61, 0x67, + 0x67, 0x72, 0x65, 0x67, 0x61, 0x74, 0x65, 0x12, 0x25, 0x0a, 0x0d, 0x53, 0x69, 0x67, 0x6e, 0x61, + 0x74, 0x75, 0x72, 0x65, 0x53, 0x6c, 0x6f, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x04, 0x52, 0x0e, + 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x5f, 0x73, 0x6c, 0x6f, 0x74, 0x62, 0x06, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_beacon_block_proto_rawDescOnce sync.Once + file_beacon_block_proto_rawDescData = file_beacon_block_proto_rawDesc +) + +func file_beacon_block_proto_rawDescGZIP() []byte { + file_beacon_block_proto_rawDescOnce.Do(func() { + file_beacon_block_proto_rawDescData = protoimpl.X.CompressGZIP(file_beacon_block_proto_rawDescData) + }) + return file_beacon_block_proto_rawDescData +} + +var file_beacon_block_proto_msgTypes = make([]protoimpl.MessageInfo, 20) +var file_beacon_block_proto_goTypes = []interface{}{ + (*Eth1Data)(nil), // 0: lightrpc.Eth1Data + (*BeaconBlockHeader)(nil), // 1: lightrpc.BeaconBlockHeader + (*SignedBeaconBlockHeader)(nil), // 2: lightrpc.SignedBeaconBlockHeader + (*Slashing)(nil), // 3: lightrpc.Slashing + (*AttestationData)(nil), // 4: lightrpc.AttestationData + (*Attestation)(nil), // 5: lightrpc.Attestation + (*DepositData)(nil), // 6: lightrpc.DepositData + (*Deposit)(nil), // 7: lightrpc.Deposit + (*SignedVoluntaryExit)(nil), // 8: lightrpc.SignedVoluntaryExit + (*VoluntaryExit)(nil), // 9: lightrpc.VoluntaryExit + (*SyncAggregate)(nil), // 10: lightrpc.SyncAggregate + (*ExecutionPayload)(nil), // 11: lightrpc.ExecutionPayload + (*BeaconBodyBellatrix)(nil), // 12: lightrpc.BeaconBodyBellatrix + (*BeaconBlockBellatrix)(nil), // 13: lightrpc.BeaconBlockBellatrix + (*SignedBeaconBlockBellatrix)(nil), // 14: lightrpc.SignedBeaconBlockBellatrix + (*LightClientBootstrap)(nil), // 15: lightrpc.LightClientBootstrap + (*SyncCommittee)(nil), // 16: lightrpc.SyncCommittee + (*LightClientUpdate)(nil), // 17: lightrpc.LightClientUpdate + (*LightClientFinalityUpdate)(nil), // 18: lightrpc.LightClientFinalityUpdate + (*LightClientOptimisticUpdate)(nil), // 19: lightrpc.LightClientOptimisticUpdate +} +var file_beacon_block_proto_depIdxs = []int32{ + 1, // 0: lightrpc.SignedBeaconBlockHeader.Header:type_name -> lightrpc.BeaconBlockHeader + 2, // 1: lightrpc.Slashing.Header1:type_name -> lightrpc.SignedBeaconBlockHeader + 2, // 2: lightrpc.Slashing.Header2:type_name -> lightrpc.SignedBeaconBlockHeader + 4, // 3: lightrpc.Attestation.Data:type_name -> lightrpc.AttestationData + 6, // 4: lightrpc.Deposit.Data:type_name -> lightrpc.DepositData + 9, // 5: lightrpc.SignedVoluntaryExit.VolunaryExit:type_name -> lightrpc.VoluntaryExit + 0, // 6: lightrpc.BeaconBodyBellatrix.Eth1Data:type_name -> lightrpc.Eth1Data + 3, // 7: lightrpc.BeaconBodyBellatrix.ProposerSlashings:type_name -> lightrpc.Slashing + 3, // 8: lightrpc.BeaconBodyBellatrix.AttesterSlashings:type_name -> lightrpc.Slashing + 5, // 9: lightrpc.BeaconBodyBellatrix.Attestations:type_name -> lightrpc.Attestation + 7, // 10: lightrpc.BeaconBodyBellatrix.Deposits:type_name -> lightrpc.Deposit + 8, // 11: lightrpc.BeaconBodyBellatrix.VoluntaryExits:type_name -> lightrpc.SignedVoluntaryExit + 10, // 12: lightrpc.BeaconBodyBellatrix.SyncAggregate:type_name -> lightrpc.SyncAggregate + 11, // 13: lightrpc.BeaconBodyBellatrix.ExecutionPayload:type_name -> lightrpc.ExecutionPayload + 12, // 14: lightrpc.BeaconBlockBellatrix.Body:type_name -> lightrpc.BeaconBodyBellatrix + 13, // 15: lightrpc.SignedBeaconBlockBellatrix.Block:type_name -> lightrpc.BeaconBlockBellatrix + 1, // 16: lightrpc.LightClientBootstrap.Header:type_name -> lightrpc.BeaconBlockHeader + 16, // 17: lightrpc.LightClientBootstrap.CurrentSyncCommittee:type_name -> lightrpc.SyncCommittee + 1, // 18: lightrpc.LightClientUpdate.AttestedHeader:type_name -> lightrpc.BeaconBlockHeader + 16, // 19: lightrpc.LightClientUpdate.NextSyncCommitee:type_name -> lightrpc.SyncCommittee + 1, // 20: lightrpc.LightClientUpdate.FinalizedHeader:type_name -> lightrpc.BeaconBlockHeader + 10, // 21: lightrpc.LightClientUpdate.SyncAggregate:type_name -> lightrpc.SyncAggregate + 1, // 22: lightrpc.LightClientFinalityUpdate.AttestedHeader:type_name -> lightrpc.BeaconBlockHeader + 1, // 23: lightrpc.LightClientFinalityUpdate.FinalizedHeader:type_name -> lightrpc.BeaconBlockHeader + 10, // 24: lightrpc.LightClientFinalityUpdate.SyncAggregate:type_name -> lightrpc.SyncAggregate + 1, // 25: lightrpc.LightClientOptimisticUpdate.AttestedHeader:type_name -> lightrpc.BeaconBlockHeader + 10, // 26: lightrpc.LightClientOptimisticUpdate.SyncAggregate:type_name -> lightrpc.SyncAggregate + 27, // [27:27] is the sub-list for method output_type + 27, // [27:27] is the sub-list for method input_type + 27, // [27:27] is the sub-list for extension type_name + 27, // [27:27] is the sub-list for extension extendee + 0, // [0:27] is the sub-list for field type_name +} + +func init() { file_beacon_block_proto_init() } +func file_beacon_block_proto_init() { + if File_beacon_block_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_beacon_block_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Eth1Data); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_beacon_block_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*BeaconBlockHeader); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_beacon_block_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*SignedBeaconBlockHeader); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_beacon_block_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Slashing); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_beacon_block_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*AttestationData); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_beacon_block_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Attestation); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_beacon_block_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*DepositData); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_beacon_block_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Deposit); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_beacon_block_proto_msgTypes[8].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*SignedVoluntaryExit); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_beacon_block_proto_msgTypes[9].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*VoluntaryExit); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_beacon_block_proto_msgTypes[10].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*SyncAggregate); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_beacon_block_proto_msgTypes[11].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ExecutionPayload); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_beacon_block_proto_msgTypes[12].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*BeaconBodyBellatrix); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_beacon_block_proto_msgTypes[13].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*BeaconBlockBellatrix); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_beacon_block_proto_msgTypes[14].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*SignedBeaconBlockBellatrix); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_beacon_block_proto_msgTypes[15].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*LightClientBootstrap); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_beacon_block_proto_msgTypes[16].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*SyncCommittee); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_beacon_block_proto_msgTypes[17].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*LightClientUpdate); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_beacon_block_proto_msgTypes[18].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*LightClientFinalityUpdate); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_beacon_block_proto_msgTypes[19].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*LightClientOptimisticUpdate); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_beacon_block_proto_rawDesc, + NumEnums: 0, + NumMessages: 20, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_beacon_block_proto_goTypes, + DependencyIndexes: file_beacon_block_proto_depIdxs, + MessageInfos: file_beacon_block_proto_msgTypes, + }.Build() + File_beacon_block_proto = out.File + file_beacon_block_proto_rawDesc = nil + file_beacon_block_proto_goTypes = nil + file_beacon_block_proto_depIdxs = nil +} diff --git a/cmd/lightclient/rpc/lightrpc/beacon_block.pb_encoding.go b/cmd/lightclient/rpc/lightrpc/beacon_block.pb_encoding.go new file mode 100644 index 00000000000..d2f98c55036 --- /dev/null +++ b/cmd/lightclient/rpc/lightrpc/beacon_block.pb_encoding.go @@ -0,0 +1,2955 @@ +// Code generated by fastssz. DO NOT EDIT. +// Hash: 2fd264dc6cb222d77296a4351e56923e7dda43c34a1e03bfc18db9a8cf649a95 +// Version: 0.1.2 +package lightrpc + +import ( + ssz "github.com/ferranbt/fastssz" +) + +// MarshalSSZ ssz marshals the Eth1Data object +func (e *Eth1Data) MarshalSSZ() ([]byte, error) { + return ssz.MarshalSSZ(e) +} + +// MarshalSSZTo ssz marshals the Eth1Data object to a target array +func (e *Eth1Data) MarshalSSZTo(buf []byte) (dst []byte, err error) { + dst = buf + + // Field (0) 'Root' + if size := len(e.Root); size != 32 { + err = ssz.ErrBytesLengthFn("Eth1Data.Root", size, 32) + return + } + dst = append(dst, e.Root...) + + // Field (1) 'DepositCount' + dst = ssz.MarshalUint64(dst, e.DepositCount) + + // Field (2) 'BlockHash' + if size := len(e.BlockHash); size != 32 { + err = ssz.ErrBytesLengthFn("Eth1Data.BlockHash", size, 32) + return + } + dst = append(dst, e.BlockHash...) + + return +} + +// UnmarshalSSZ ssz unmarshals the Eth1Data object +func (e *Eth1Data) UnmarshalSSZ(buf []byte) error { + var err error + size := uint64(len(buf)) + if size != 72 { + return ssz.ErrSize + } + + // Field (0) 'Root' + if cap(e.Root) == 0 { + e.Root = make([]byte, 0, len(buf[0:32])) + } + e.Root = append(e.Root, buf[0:32]...) + + // Field (1) 'DepositCount' + e.DepositCount = ssz.UnmarshallUint64(buf[32:40]) + + // Field (2) 'BlockHash' + if cap(e.BlockHash) == 0 { + e.BlockHash = make([]byte, 0, len(buf[40:72])) + } + e.BlockHash = append(e.BlockHash, buf[40:72]...) + + return err +} + +// SizeSSZ returns the ssz encoded size in bytes for the Eth1Data object +func (e *Eth1Data) SizeSSZ() (size int) { + size = 72 + return +} + +// HashTreeRoot ssz hashes the Eth1Data object +func (e *Eth1Data) HashTreeRoot() ([32]byte, error) { + return ssz.HashWithDefaultHasher(e) +} + +// HashTreeRootWith ssz hashes the Eth1Data object with a hasher +func (e *Eth1Data) HashTreeRootWith(hh ssz.HashWalker) (err error) { + indx := hh.Index() + + // Field (0) 'Root' + if size := len(e.Root); size != 32 { + err = ssz.ErrBytesLengthFn("Eth1Data.Root", size, 32) + return + } + hh.PutBytes(e.Root) + + // Field (1) 'DepositCount' + hh.PutUint64(e.DepositCount) + + // Field (2) 'BlockHash' + if size := len(e.BlockHash); size != 32 { + err = ssz.ErrBytesLengthFn("Eth1Data.BlockHash", size, 32) + return + } + hh.PutBytes(e.BlockHash) + + hh.Merkleize(indx) + return +} + +// GetTree ssz hashes the Eth1Data object +func (e *Eth1Data) GetTree() (*ssz.Node, error) { + return ssz.ProofTree(e) +} + +// MarshalSSZ ssz marshals the BeaconBlockHeader object +func (b *BeaconBlockHeader) MarshalSSZ() ([]byte, error) { + return ssz.MarshalSSZ(b) +} + +// MarshalSSZTo ssz marshals the BeaconBlockHeader object to a target array +func (b *BeaconBlockHeader) MarshalSSZTo(buf []byte) (dst []byte, err error) { + dst = buf + + // Field (0) 'Slot' + dst = ssz.MarshalUint64(dst, b.Slot) + + // Field (1) 'ProposerIndex' + dst = ssz.MarshalUint64(dst, b.ProposerIndex) + + // Field (2) 'ParentRoot' + if size := len(b.ParentRoot); size != 32 { + err = ssz.ErrBytesLengthFn("BeaconBlockHeader.ParentRoot", size, 32) + return + } + dst = append(dst, b.ParentRoot...) + + // Field (3) 'Root' + if size := len(b.Root); size != 32 { + err = ssz.ErrBytesLengthFn("BeaconBlockHeader.Root", size, 32) + return + } + dst = append(dst, b.Root...) + + // Field (4) 'BodyRoot' + if size := len(b.BodyRoot); size != 32 { + err = ssz.ErrBytesLengthFn("BeaconBlockHeader.BodyRoot", size, 32) + return + } + dst = append(dst, b.BodyRoot...) + + return +} + +// UnmarshalSSZ ssz unmarshals the BeaconBlockHeader object +func (b *BeaconBlockHeader) UnmarshalSSZ(buf []byte) error { + var err error + size := uint64(len(buf)) + if size != 112 { + return ssz.ErrSize + } + + // Field (0) 'Slot' + b.Slot = ssz.UnmarshallUint64(buf[0:8]) + + // Field (1) 'ProposerIndex' + b.ProposerIndex = ssz.UnmarshallUint64(buf[8:16]) + + // Field (2) 'ParentRoot' + if cap(b.ParentRoot) == 0 { + b.ParentRoot = make([]byte, 0, len(buf[16:48])) + } + b.ParentRoot = append(b.ParentRoot, buf[16:48]...) + + // Field (3) 'Root' + if cap(b.Root) == 0 { + b.Root = make([]byte, 0, len(buf[48:80])) + } + b.Root = append(b.Root, buf[48:80]...) + + // Field (4) 'BodyRoot' + if cap(b.BodyRoot) == 0 { + b.BodyRoot = make([]byte, 0, len(buf[80:112])) + } + b.BodyRoot = append(b.BodyRoot, buf[80:112]...) + + return err +} + +// SizeSSZ returns the ssz encoded size in bytes for the BeaconBlockHeader object +func (b *BeaconBlockHeader) SizeSSZ() (size int) { + size = 112 + return +} + +// HashTreeRoot ssz hashes the BeaconBlockHeader object +func (b *BeaconBlockHeader) HashTreeRoot() ([32]byte, error) { + return ssz.HashWithDefaultHasher(b) +} + +// HashTreeRootWith ssz hashes the BeaconBlockHeader object with a hasher +func (b *BeaconBlockHeader) HashTreeRootWith(hh ssz.HashWalker) (err error) { + indx := hh.Index() + + // Field (0) 'Slot' + hh.PutUint64(b.Slot) + + // Field (1) 'ProposerIndex' + hh.PutUint64(b.ProposerIndex) + + // Field (2) 'ParentRoot' + if size := len(b.ParentRoot); size != 32 { + err = ssz.ErrBytesLengthFn("BeaconBlockHeader.ParentRoot", size, 32) + return + } + hh.PutBytes(b.ParentRoot) + + // Field (3) 'Root' + if size := len(b.Root); size != 32 { + err = ssz.ErrBytesLengthFn("BeaconBlockHeader.Root", size, 32) + return + } + hh.PutBytes(b.Root) + + // Field (4) 'BodyRoot' + if size := len(b.BodyRoot); size != 32 { + err = ssz.ErrBytesLengthFn("BeaconBlockHeader.BodyRoot", size, 32) + return + } + hh.PutBytes(b.BodyRoot) + + hh.Merkleize(indx) + return +} + +// GetTree ssz hashes the BeaconBlockHeader object +func (b *BeaconBlockHeader) GetTree() (*ssz.Node, error) { + return ssz.ProofTree(b) +} + +// MarshalSSZ ssz marshals the SignedBeaconBlockHeader object +func (s *SignedBeaconBlockHeader) MarshalSSZ() ([]byte, error) { + return ssz.MarshalSSZ(s) +} + +// MarshalSSZTo ssz marshals the SignedBeaconBlockHeader object to a target array +func (s *SignedBeaconBlockHeader) MarshalSSZTo(buf []byte) (dst []byte, err error) { + dst = buf + + // Field (0) 'Header' + if s.Header == nil { + s.Header = new(BeaconBlockHeader) + } + if dst, err = s.Header.MarshalSSZTo(dst); err != nil { + return + } + + // Field (1) 'Signature' + if size := len(s.Signature); size != 96 { + err = ssz.ErrBytesLengthFn("SignedBeaconBlockHeader.Signature", size, 96) + return + } + dst = append(dst, s.Signature...) + + return +} + +// UnmarshalSSZ ssz unmarshals the SignedBeaconBlockHeader object +func (s *SignedBeaconBlockHeader) UnmarshalSSZ(buf []byte) error { + var err error + size := uint64(len(buf)) + if size != 208 { + return ssz.ErrSize + } + + // Field (0) 'Header' + if s.Header == nil { + s.Header = new(BeaconBlockHeader) + } + if err = s.Header.UnmarshalSSZ(buf[0:112]); err != nil { + return err + } + + // Field (1) 'Signature' + if cap(s.Signature) == 0 { + s.Signature = make([]byte, 0, len(buf[112:208])) + } + s.Signature = append(s.Signature, buf[112:208]...) + + return err +} + +// SizeSSZ returns the ssz encoded size in bytes for the SignedBeaconBlockHeader object +func (s *SignedBeaconBlockHeader) SizeSSZ() (size int) { + size = 208 + return +} + +// HashTreeRoot ssz hashes the SignedBeaconBlockHeader object +func (s *SignedBeaconBlockHeader) HashTreeRoot() ([32]byte, error) { + return ssz.HashWithDefaultHasher(s) +} + +// HashTreeRootWith ssz hashes the SignedBeaconBlockHeader object with a hasher +func (s *SignedBeaconBlockHeader) HashTreeRootWith(hh ssz.HashWalker) (err error) { + indx := hh.Index() + + // Field (0) 'Header' + if s.Header == nil { + s.Header = new(BeaconBlockHeader) + } + if err = s.Header.HashTreeRootWith(hh); err != nil { + return + } + + // Field (1) 'Signature' + if size := len(s.Signature); size != 96 { + err = ssz.ErrBytesLengthFn("SignedBeaconBlockHeader.Signature", size, 96) + return + } + hh.PutBytes(s.Signature) + + hh.Merkleize(indx) + return +} + +// GetTree ssz hashes the SignedBeaconBlockHeader object +func (s *SignedBeaconBlockHeader) GetTree() (*ssz.Node, error) { + return ssz.ProofTree(s) +} + +// MarshalSSZ ssz marshals the Slashing object +func (s *Slashing) MarshalSSZ() ([]byte, error) { + return ssz.MarshalSSZ(s) +} + +// MarshalSSZTo ssz marshals the Slashing object to a target array +func (s *Slashing) MarshalSSZTo(buf []byte) (dst []byte, err error) { + dst = buf + + // Field (0) 'Header1' + if s.Header1 == nil { + s.Header1 = new(SignedBeaconBlockHeader) + } + if dst, err = s.Header1.MarshalSSZTo(dst); err != nil { + return + } + + // Field (1) 'Header2' + if s.Header2 == nil { + s.Header2 = new(SignedBeaconBlockHeader) + } + if dst, err = s.Header2.MarshalSSZTo(dst); err != nil { + return + } + + return +} + +// UnmarshalSSZ ssz unmarshals the Slashing object +func (s *Slashing) UnmarshalSSZ(buf []byte) error { + var err error + size := uint64(len(buf)) + if size != 416 { + return ssz.ErrSize + } + + // Field (0) 'Header1' + if s.Header1 == nil { + s.Header1 = new(SignedBeaconBlockHeader) + } + if err = s.Header1.UnmarshalSSZ(buf[0:208]); err != nil { + return err + } + + // Field (1) 'Header2' + if s.Header2 == nil { + s.Header2 = new(SignedBeaconBlockHeader) + } + if err = s.Header2.UnmarshalSSZ(buf[208:416]); err != nil { + return err + } + + return err +} + +// SizeSSZ returns the ssz encoded size in bytes for the Slashing object +func (s *Slashing) SizeSSZ() (size int) { + size = 416 + return +} + +// HashTreeRoot ssz hashes the Slashing object +func (s *Slashing) HashTreeRoot() ([32]byte, error) { + return ssz.HashWithDefaultHasher(s) +} + +// HashTreeRootWith ssz hashes the Slashing object with a hasher +func (s *Slashing) HashTreeRootWith(hh ssz.HashWalker) (err error) { + indx := hh.Index() + + // Field (0) 'Header1' + if s.Header1 == nil { + s.Header1 = new(SignedBeaconBlockHeader) + } + if err = s.Header1.HashTreeRootWith(hh); err != nil { + return + } + + // Field (1) 'Header2' + if s.Header2 == nil { + s.Header2 = new(SignedBeaconBlockHeader) + } + if err = s.Header2.HashTreeRootWith(hh); err != nil { + return + } + + hh.Merkleize(indx) + return +} + +// GetTree ssz hashes the Slashing object +func (s *Slashing) GetTree() (*ssz.Node, error) { + return ssz.ProofTree(s) +} + +// MarshalSSZ ssz marshals the AttestationData object +func (a *AttestationData) MarshalSSZ() ([]byte, error) { + return ssz.MarshalSSZ(a) +} + +// MarshalSSZTo ssz marshals the AttestationData object to a target array +func (a *AttestationData) MarshalSSZTo(buf []byte) (dst []byte, err error) { + dst = buf + + // Field (0) 'Slot' + dst = ssz.MarshalUint64(dst, a.Slot) + + // Field (1) 'Index' + dst = ssz.MarshalUint64(dst, a.Index) + + // Field (2) 'BeaconBlockHash' + if size := len(a.BeaconBlockHash); size != 32 { + err = ssz.ErrBytesLengthFn("AttestationData.BeaconBlockHash", size, 32) + return + } + dst = append(dst, a.BeaconBlockHash...) + + return +} + +// UnmarshalSSZ ssz unmarshals the AttestationData object +func (a *AttestationData) UnmarshalSSZ(buf []byte) error { + var err error + size := uint64(len(buf)) + if size != 48 { + return ssz.ErrSize + } + + // Field (0) 'Slot' + a.Slot = ssz.UnmarshallUint64(buf[0:8]) + + // Field (1) 'Index' + a.Index = ssz.UnmarshallUint64(buf[8:16]) + + // Field (2) 'BeaconBlockHash' + if cap(a.BeaconBlockHash) == 0 { + a.BeaconBlockHash = make([]byte, 0, len(buf[16:48])) + } + a.BeaconBlockHash = append(a.BeaconBlockHash, buf[16:48]...) + + return err +} + +// SizeSSZ returns the ssz encoded size in bytes for the AttestationData object +func (a *AttestationData) SizeSSZ() (size int) { + size = 48 + return +} + +// HashTreeRoot ssz hashes the AttestationData object +func (a *AttestationData) HashTreeRoot() ([32]byte, error) { + return ssz.HashWithDefaultHasher(a) +} + +// HashTreeRootWith ssz hashes the AttestationData object with a hasher +func (a *AttestationData) HashTreeRootWith(hh ssz.HashWalker) (err error) { + indx := hh.Index() + + // Field (0) 'Slot' + hh.PutUint64(a.Slot) + + // Field (1) 'Index' + hh.PutUint64(a.Index) + + // Field (2) 'BeaconBlockHash' + if size := len(a.BeaconBlockHash); size != 32 { + err = ssz.ErrBytesLengthFn("AttestationData.BeaconBlockHash", size, 32) + return + } + hh.PutBytes(a.BeaconBlockHash) + + hh.Merkleize(indx) + return +} + +// GetTree ssz hashes the AttestationData object +func (a *AttestationData) GetTree() (*ssz.Node, error) { + return ssz.ProofTree(a) +} + +// MarshalSSZ ssz marshals the Attestation object +func (a *Attestation) MarshalSSZ() ([]byte, error) { + return ssz.MarshalSSZ(a) +} + +// MarshalSSZTo ssz marshals the Attestation object to a target array +func (a *Attestation) MarshalSSZTo(buf []byte) (dst []byte, err error) { + dst = buf + offset := int(148) + + // Offset (0) 'AggregationBits' + dst = ssz.WriteOffset(dst, offset) + offset += len(a.AggregationBits) + + // Field (1) 'Data' + if a.Data == nil { + a.Data = new(AttestationData) + } + if dst, err = a.Data.MarshalSSZTo(dst); err != nil { + return + } + + // Field (2) 'Signature' + if size := len(a.Signature); size != 96 { + err = ssz.ErrBytesLengthFn("Attestation.Signature", size, 96) + return + } + dst = append(dst, a.Signature...) + + // Field (0) 'AggregationBits' + if size := len(a.AggregationBits); size > 2048 { + err = ssz.ErrBytesLengthFn("Attestation.AggregationBits", size, 2048) + return + } + dst = append(dst, a.AggregationBits...) + + return +} + +// UnmarshalSSZ ssz unmarshals the Attestation object +func (a *Attestation) UnmarshalSSZ(buf []byte) error { + var err error + size := uint64(len(buf)) + if size < 148 { + return ssz.ErrSize + } + + tail := buf + var o0 uint64 + + // Offset (0) 'AggregationBits' + if o0 = ssz.ReadOffset(buf[0:4]); o0 > size { + return ssz.ErrOffset + } + + if o0 < 148 { + return ssz.ErrInvalidVariableOffset + } + + // Field (1) 'Data' + if a.Data == nil { + a.Data = new(AttestationData) + } + if err = a.Data.UnmarshalSSZ(buf[4:52]); err != nil { + return err + } + + // Field (2) 'Signature' + if cap(a.Signature) == 0 { + a.Signature = make([]byte, 0, len(buf[52:148])) + } + a.Signature = append(a.Signature, buf[52:148]...) + + // Field (0) 'AggregationBits' + { + buf = tail[o0:] + if err = ssz.ValidateBitlist(buf, 2048); err != nil { + return err + } + if cap(a.AggregationBits) == 0 { + a.AggregationBits = make([]byte, 0, len(buf)) + } + a.AggregationBits = append(a.AggregationBits, buf...) + } + return err +} + +// SizeSSZ returns the ssz encoded size in bytes for the Attestation object +func (a *Attestation) SizeSSZ() (size int) { + size = 148 + + // Field (0) 'AggregationBits' + size += len(a.AggregationBits) + + return +} + +// HashTreeRoot ssz hashes the Attestation object +func (a *Attestation) HashTreeRoot() ([32]byte, error) { + return ssz.HashWithDefaultHasher(a) +} + +// HashTreeRootWith ssz hashes the Attestation object with a hasher +func (a *Attestation) HashTreeRootWith(hh ssz.HashWalker) (err error) { + indx := hh.Index() + + // Field (0) 'AggregationBits' + if len(a.AggregationBits) == 0 { + err = ssz.ErrEmptyBitlist + return + } + hh.PutBitlist(a.AggregationBits, 2048) + + // Field (1) 'Data' + if a.Data == nil { + a.Data = new(AttestationData) + } + if err = a.Data.HashTreeRootWith(hh); err != nil { + return + } + + // Field (2) 'Signature' + if size := len(a.Signature); size != 96 { + err = ssz.ErrBytesLengthFn("Attestation.Signature", size, 96) + return + } + hh.PutBytes(a.Signature) + + hh.Merkleize(indx) + return +} + +// GetTree ssz hashes the Attestation object +func (a *Attestation) GetTree() (*ssz.Node, error) { + return ssz.ProofTree(a) +} + +// MarshalSSZ ssz marshals the DepositData object +func (d *DepositData) MarshalSSZ() ([]byte, error) { + return ssz.MarshalSSZ(d) +} + +// MarshalSSZTo ssz marshals the DepositData object to a target array +func (d *DepositData) MarshalSSZTo(buf []byte) (dst []byte, err error) { + dst = buf + + // Field (0) 'PubKey' + if size := len(d.PubKey); size != 48 { + err = ssz.ErrBytesLengthFn("DepositData.PubKey", size, 48) + return + } + dst = append(dst, d.PubKey...) + + // Field (1) 'WithdrawalCredentials' + if size := len(d.WithdrawalCredentials); size != 32 { + err = ssz.ErrBytesLengthFn("DepositData.WithdrawalCredentials", size, 32) + return + } + dst = append(dst, d.WithdrawalCredentials...) + + // Field (2) 'Amount' + dst = ssz.MarshalUint64(dst, d.Amount) + + // Field (3) 'Signature' + if size := len(d.Signature); size != 96 { + err = ssz.ErrBytesLengthFn("DepositData.Signature", size, 96) + return + } + dst = append(dst, d.Signature...) + + return +} + +// UnmarshalSSZ ssz unmarshals the DepositData object +func (d *DepositData) UnmarshalSSZ(buf []byte) error { + var err error + size := uint64(len(buf)) + if size != 184 { + return ssz.ErrSize + } + + // Field (0) 'PubKey' + if cap(d.PubKey) == 0 { + d.PubKey = make([]byte, 0, len(buf[0:48])) + } + d.PubKey = append(d.PubKey, buf[0:48]...) + + // Field (1) 'WithdrawalCredentials' + if cap(d.WithdrawalCredentials) == 0 { + d.WithdrawalCredentials = make([]byte, 0, len(buf[48:80])) + } + d.WithdrawalCredentials = append(d.WithdrawalCredentials, buf[48:80]...) + + // Field (2) 'Amount' + d.Amount = ssz.UnmarshallUint64(buf[80:88]) + + // Field (3) 'Signature' + if cap(d.Signature) == 0 { + d.Signature = make([]byte, 0, len(buf[88:184])) + } + d.Signature = append(d.Signature, buf[88:184]...) + + return err +} + +// SizeSSZ returns the ssz encoded size in bytes for the DepositData object +func (d *DepositData) SizeSSZ() (size int) { + size = 184 + return +} + +// HashTreeRoot ssz hashes the DepositData object +func (d *DepositData) HashTreeRoot() ([32]byte, error) { + return ssz.HashWithDefaultHasher(d) +} + +// HashTreeRootWith ssz hashes the DepositData object with a hasher +func (d *DepositData) HashTreeRootWith(hh ssz.HashWalker) (err error) { + indx := hh.Index() + + // Field (0) 'PubKey' + if size := len(d.PubKey); size != 48 { + err = ssz.ErrBytesLengthFn("DepositData.PubKey", size, 48) + return + } + hh.PutBytes(d.PubKey) + + // Field (1) 'WithdrawalCredentials' + if size := len(d.WithdrawalCredentials); size != 32 { + err = ssz.ErrBytesLengthFn("DepositData.WithdrawalCredentials", size, 32) + return + } + hh.PutBytes(d.WithdrawalCredentials) + + // Field (2) 'Amount' + hh.PutUint64(d.Amount) + + // Field (3) 'Signature' + if size := len(d.Signature); size != 96 { + err = ssz.ErrBytesLengthFn("DepositData.Signature", size, 96) + return + } + hh.PutBytes(d.Signature) + + hh.Merkleize(indx) + return +} + +// GetTree ssz hashes the DepositData object +func (d *DepositData) GetTree() (*ssz.Node, error) { + return ssz.ProofTree(d) +} + +// MarshalSSZ ssz marshals the Deposit object +func (d *Deposit) MarshalSSZ() ([]byte, error) { + return ssz.MarshalSSZ(d) +} + +// MarshalSSZTo ssz marshals the Deposit object to a target array +func (d *Deposit) MarshalSSZTo(buf []byte) (dst []byte, err error) { + dst = buf + + // Field (0) 'Proof' + if size := len(d.Proof); size != 33 { + err = ssz.ErrVectorLengthFn("Deposit.Proof", size, 33) + return + } + for ii := 0; ii < 33; ii++ { + if size := len(d.Proof[ii]); size != 32 { + err = ssz.ErrBytesLengthFn("Deposit.Proof[ii]", size, 32) + return + } + dst = append(dst, d.Proof[ii]...) + } + + // Field (1) 'Data' + if d.Data == nil { + d.Data = new(DepositData) + } + if dst, err = d.Data.MarshalSSZTo(dst); err != nil { + return + } + + return +} + +// UnmarshalSSZ ssz unmarshals the Deposit object +func (d *Deposit) UnmarshalSSZ(buf []byte) error { + var err error + size := uint64(len(buf)) + if size != 1240 { + return ssz.ErrSize + } + + // Field (0) 'Proof' + d.Proof = make([][]byte, 33) + for ii := 0; ii < 33; ii++ { + if cap(d.Proof[ii]) == 0 { + d.Proof[ii] = make([]byte, 0, len(buf[0:1056][ii*32:(ii+1)*32])) + } + d.Proof[ii] = append(d.Proof[ii], buf[0:1056][ii*32:(ii+1)*32]...) + } + + // Field (1) 'Data' + if d.Data == nil { + d.Data = new(DepositData) + } + if err = d.Data.UnmarshalSSZ(buf[1056:1240]); err != nil { + return err + } + + return err +} + +// SizeSSZ returns the ssz encoded size in bytes for the Deposit object +func (d *Deposit) SizeSSZ() (size int) { + size = 1240 + return +} + +// HashTreeRoot ssz hashes the Deposit object +func (d *Deposit) HashTreeRoot() ([32]byte, error) { + return ssz.HashWithDefaultHasher(d) +} + +// HashTreeRootWith ssz hashes the Deposit object with a hasher +func (d *Deposit) HashTreeRootWith(hh ssz.HashWalker) (err error) { + indx := hh.Index() + + // Field (0) 'Proof' + { + if size := len(d.Proof); size != 33 { + err = ssz.ErrVectorLengthFn("Deposit.Proof", size, 33) + return + } + subIndx := hh.Index() + for _, i := range d.Proof { + if len(i) != 32 { + err = ssz.ErrBytesLength + return + } + hh.Append(i) + } + hh.Merkleize(subIndx) + } + + // Field (1) 'Data' + if d.Data == nil { + d.Data = new(DepositData) + } + if err = d.Data.HashTreeRootWith(hh); err != nil { + return + } + + hh.Merkleize(indx) + return +} + +// GetTree ssz hashes the Deposit object +func (d *Deposit) GetTree() (*ssz.Node, error) { + return ssz.ProofTree(d) +} + +// MarshalSSZ ssz marshals the SignedVoluntaryExit object +func (s *SignedVoluntaryExit) MarshalSSZ() ([]byte, error) { + return ssz.MarshalSSZ(s) +} + +// MarshalSSZTo ssz marshals the SignedVoluntaryExit object to a target array +func (s *SignedVoluntaryExit) MarshalSSZTo(buf []byte) (dst []byte, err error) { + dst = buf + + // Field (0) 'VolunaryExit' + if s.VolunaryExit == nil { + s.VolunaryExit = new(VoluntaryExit) + } + if dst, err = s.VolunaryExit.MarshalSSZTo(dst); err != nil { + return + } + + // Field (1) 'Signature' + if size := len(s.Signature); size != 96 { + err = ssz.ErrBytesLengthFn("SignedVoluntaryExit.Signature", size, 96) + return + } + dst = append(dst, s.Signature...) + + return +} + +// UnmarshalSSZ ssz unmarshals the SignedVoluntaryExit object +func (s *SignedVoluntaryExit) UnmarshalSSZ(buf []byte) error { + var err error + size := uint64(len(buf)) + if size != 112 { + return ssz.ErrSize + } + + // Field (0) 'VolunaryExit' + if s.VolunaryExit == nil { + s.VolunaryExit = new(VoluntaryExit) + } + if err = s.VolunaryExit.UnmarshalSSZ(buf[0:16]); err != nil { + return err + } + + // Field (1) 'Signature' + if cap(s.Signature) == 0 { + s.Signature = make([]byte, 0, len(buf[16:112])) + } + s.Signature = append(s.Signature, buf[16:112]...) + + return err +} + +// SizeSSZ returns the ssz encoded size in bytes for the SignedVoluntaryExit object +func (s *SignedVoluntaryExit) SizeSSZ() (size int) { + size = 112 + return +} + +// HashTreeRoot ssz hashes the SignedVoluntaryExit object +func (s *SignedVoluntaryExit) HashTreeRoot() ([32]byte, error) { + return ssz.HashWithDefaultHasher(s) +} + +// HashTreeRootWith ssz hashes the SignedVoluntaryExit object with a hasher +func (s *SignedVoluntaryExit) HashTreeRootWith(hh ssz.HashWalker) (err error) { + indx := hh.Index() + + // Field (0) 'VolunaryExit' + if s.VolunaryExit == nil { + s.VolunaryExit = new(VoluntaryExit) + } + if err = s.VolunaryExit.HashTreeRootWith(hh); err != nil { + return + } + + // Field (1) 'Signature' + if size := len(s.Signature); size != 96 { + err = ssz.ErrBytesLengthFn("SignedVoluntaryExit.Signature", size, 96) + return + } + hh.PutBytes(s.Signature) + + hh.Merkleize(indx) + return +} + +// GetTree ssz hashes the SignedVoluntaryExit object +func (s *SignedVoluntaryExit) GetTree() (*ssz.Node, error) { + return ssz.ProofTree(s) +} + +// MarshalSSZ ssz marshals the VoluntaryExit object +func (v *VoluntaryExit) MarshalSSZ() ([]byte, error) { + return ssz.MarshalSSZ(v) +} + +// MarshalSSZTo ssz marshals the VoluntaryExit object to a target array +func (v *VoluntaryExit) MarshalSSZTo(buf []byte) (dst []byte, err error) { + dst = buf + + // Field (0) 'Epoch' + dst = ssz.MarshalUint64(dst, v.Epoch) + + // Field (1) 'ValidatorIndex' + dst = ssz.MarshalUint64(dst, v.ValidatorIndex) + + return +} + +// UnmarshalSSZ ssz unmarshals the VoluntaryExit object +func (v *VoluntaryExit) UnmarshalSSZ(buf []byte) error { + var err error + size := uint64(len(buf)) + if size != 16 { + return ssz.ErrSize + } + + // Field (0) 'Epoch' + v.Epoch = ssz.UnmarshallUint64(buf[0:8]) + + // Field (1) 'ValidatorIndex' + v.ValidatorIndex = ssz.UnmarshallUint64(buf[8:16]) + + return err +} + +// SizeSSZ returns the ssz encoded size in bytes for the VoluntaryExit object +func (v *VoluntaryExit) SizeSSZ() (size int) { + size = 16 + return +} + +// HashTreeRoot ssz hashes the VoluntaryExit object +func (v *VoluntaryExit) HashTreeRoot() ([32]byte, error) { + return ssz.HashWithDefaultHasher(v) +} + +// HashTreeRootWith ssz hashes the VoluntaryExit object with a hasher +func (v *VoluntaryExit) HashTreeRootWith(hh ssz.HashWalker) (err error) { + indx := hh.Index() + + // Field (0) 'Epoch' + hh.PutUint64(v.Epoch) + + // Field (1) 'ValidatorIndex' + hh.PutUint64(v.ValidatorIndex) + + hh.Merkleize(indx) + return +} + +// GetTree ssz hashes the VoluntaryExit object +func (v *VoluntaryExit) GetTree() (*ssz.Node, error) { + return ssz.ProofTree(v) +} + +// MarshalSSZ ssz marshals the SyncAggregate object +func (s *SyncAggregate) MarshalSSZ() ([]byte, error) { + return ssz.MarshalSSZ(s) +} + +// MarshalSSZTo ssz marshals the SyncAggregate object to a target array +func (s *SyncAggregate) MarshalSSZTo(buf []byte) (dst []byte, err error) { + dst = buf + + // Field (0) 'SyncCommiteeBits' + if size := len(s.SyncCommiteeBits); size != 64 { + err = ssz.ErrBytesLengthFn("SyncAggregate.SyncCommiteeBits", size, 64) + return + } + dst = append(dst, s.SyncCommiteeBits...) + + // Field (1) 'SyncCommiteeSignature' + if size := len(s.SyncCommiteeSignature); size != 96 { + err = ssz.ErrBytesLengthFn("SyncAggregate.SyncCommiteeSignature", size, 96) + return + } + dst = append(dst, s.SyncCommiteeSignature...) + + return +} + +// UnmarshalSSZ ssz unmarshals the SyncAggregate object +func (s *SyncAggregate) UnmarshalSSZ(buf []byte) error { + var err error + size := uint64(len(buf)) + if size != 160 { + return ssz.ErrSize + } + + // Field (0) 'SyncCommiteeBits' + if cap(s.SyncCommiteeBits) == 0 { + s.SyncCommiteeBits = make([]byte, 0, len(buf[0:64])) + } + s.SyncCommiteeBits = append(s.SyncCommiteeBits, buf[0:64]...) + + // Field (1) 'SyncCommiteeSignature' + if cap(s.SyncCommiteeSignature) == 0 { + s.SyncCommiteeSignature = make([]byte, 0, len(buf[64:160])) + } + s.SyncCommiteeSignature = append(s.SyncCommiteeSignature, buf[64:160]...) + + return err +} + +// SizeSSZ returns the ssz encoded size in bytes for the SyncAggregate object +func (s *SyncAggregate) SizeSSZ() (size int) { + size = 160 + return +} + +// HashTreeRoot ssz hashes the SyncAggregate object +func (s *SyncAggregate) HashTreeRoot() ([32]byte, error) { + return ssz.HashWithDefaultHasher(s) +} + +// HashTreeRootWith ssz hashes the SyncAggregate object with a hasher +func (s *SyncAggregate) HashTreeRootWith(hh ssz.HashWalker) (err error) { + indx := hh.Index() + + // Field (0) 'SyncCommiteeBits' + if size := len(s.SyncCommiteeBits); size != 64 { + err = ssz.ErrBytesLengthFn("SyncAggregate.SyncCommiteeBits", size, 64) + return + } + hh.PutBytes(s.SyncCommiteeBits) + + // Field (1) 'SyncCommiteeSignature' + if size := len(s.SyncCommiteeSignature); size != 96 { + err = ssz.ErrBytesLengthFn("SyncAggregate.SyncCommiteeSignature", size, 96) + return + } + hh.PutBytes(s.SyncCommiteeSignature) + + hh.Merkleize(indx) + return +} + +// GetTree ssz hashes the SyncAggregate object +func (s *SyncAggregate) GetTree() (*ssz.Node, error) { + return ssz.ProofTree(s) +} + +// MarshalSSZ ssz marshals the ExecutionPayload object +func (e *ExecutionPayload) MarshalSSZ() ([]byte, error) { + return ssz.MarshalSSZ(e) +} + +// MarshalSSZTo ssz marshals the ExecutionPayload object to a target array +func (e *ExecutionPayload) MarshalSSZTo(buf []byte) (dst []byte, err error) { + dst = buf + offset := int(508) + + // Field (0) 'ParentHash' + if size := len(e.ParentHash); size != 32 { + err = ssz.ErrBytesLengthFn("ExecutionPayload.ParentHash", size, 32) + return + } + dst = append(dst, e.ParentHash...) + + // Field (1) 'FeeRecipient' + if size := len(e.FeeRecipient); size != 20 { + err = ssz.ErrBytesLengthFn("ExecutionPayload.FeeRecipient", size, 20) + return + } + dst = append(dst, e.FeeRecipient...) + + // Field (2) 'StateRoot' + if size := len(e.StateRoot); size != 32 { + err = ssz.ErrBytesLengthFn("ExecutionPayload.StateRoot", size, 32) + return + } + dst = append(dst, e.StateRoot...) + + // Field (3) 'ReceiptsRoot' + if size := len(e.ReceiptsRoot); size != 32 { + err = ssz.ErrBytesLengthFn("ExecutionPayload.ReceiptsRoot", size, 32) + return + } + dst = append(dst, e.ReceiptsRoot...) + + // Field (4) 'LogsBloom' + if size := len(e.LogsBloom); size != 256 { + err = ssz.ErrBytesLengthFn("ExecutionPayload.LogsBloom", size, 256) + return + } + dst = append(dst, e.LogsBloom...) + + // Field (5) 'PrevRandao' + if size := len(e.PrevRandao); size != 32 { + err = ssz.ErrBytesLengthFn("ExecutionPayload.PrevRandao", size, 32) + return + } + dst = append(dst, e.PrevRandao...) + + // Field (6) 'BlockNumber' + dst = ssz.MarshalUint64(dst, e.BlockNumber) + + // Field (7) 'GasLimit' + dst = ssz.MarshalUint64(dst, e.GasLimit) + + // Field (8) 'GasUsed' + dst = ssz.MarshalUint64(dst, e.GasUsed) + + // Field (9) 'Timestamp' + dst = ssz.MarshalUint64(dst, e.Timestamp) + + // Offset (10) 'ExtraData' + dst = ssz.WriteOffset(dst, offset) + offset += len(e.ExtraData) + + // Field (11) 'BaseFeePerGas' + if size := len(e.BaseFeePerGas); size != 32 { + err = ssz.ErrBytesLengthFn("ExecutionPayload.BaseFeePerGas", size, 32) + return + } + dst = append(dst, e.BaseFeePerGas...) + + // Field (12) 'BlockHash' + if size := len(e.BlockHash); size != 32 { + err = ssz.ErrBytesLengthFn("ExecutionPayload.BlockHash", size, 32) + return + } + dst = append(dst, e.BlockHash...) + + // Offset (13) 'Transactions' + dst = ssz.WriteOffset(dst, offset) + for ii := 0; ii < len(e.Transactions); ii++ { + offset += 4 + offset += len(e.Transactions[ii]) + } + + // Field (10) 'ExtraData' + if size := len(e.ExtraData); size > 32 { + err = ssz.ErrBytesLengthFn("ExecutionPayload.ExtraData", size, 32) + return + } + dst = append(dst, e.ExtraData...) + + // Field (13) 'Transactions' + if size := len(e.Transactions); size > 1048576 { + err = ssz.ErrListTooBigFn("ExecutionPayload.Transactions", size, 1048576) + return + } + { + offset = 4 * len(e.Transactions) + for ii := 0; ii < len(e.Transactions); ii++ { + dst = ssz.WriteOffset(dst, offset) + offset += len(e.Transactions[ii]) + } + } + for ii := 0; ii < len(e.Transactions); ii++ { + if size := len(e.Transactions[ii]); size > 1073741824 { + err = ssz.ErrBytesLengthFn("ExecutionPayload.Transactions[ii]", size, 1073741824) + return + } + dst = append(dst, e.Transactions[ii]...) + } + + return +} + +// UnmarshalSSZ ssz unmarshals the ExecutionPayload object +func (e *ExecutionPayload) UnmarshalSSZ(buf []byte) error { + var err error + size := uint64(len(buf)) + if size < 508 { + return ssz.ErrSize + } + + tail := buf + var o10, o13 uint64 + + // Field (0) 'ParentHash' + if cap(e.ParentHash) == 0 { + e.ParentHash = make([]byte, 0, len(buf[0:32])) + } + e.ParentHash = append(e.ParentHash, buf[0:32]...) + + // Field (1) 'FeeRecipient' + if cap(e.FeeRecipient) == 0 { + e.FeeRecipient = make([]byte, 0, len(buf[32:52])) + } + e.FeeRecipient = append(e.FeeRecipient, buf[32:52]...) + + // Field (2) 'StateRoot' + if cap(e.StateRoot) == 0 { + e.StateRoot = make([]byte, 0, len(buf[52:84])) + } + e.StateRoot = append(e.StateRoot, buf[52:84]...) + + // Field (3) 'ReceiptsRoot' + if cap(e.ReceiptsRoot) == 0 { + e.ReceiptsRoot = make([]byte, 0, len(buf[84:116])) + } + e.ReceiptsRoot = append(e.ReceiptsRoot, buf[84:116]...) + + // Field (4) 'LogsBloom' + if cap(e.LogsBloom) == 0 { + e.LogsBloom = make([]byte, 0, len(buf[116:372])) + } + e.LogsBloom = append(e.LogsBloom, buf[116:372]...) + + // Field (5) 'PrevRandao' + if cap(e.PrevRandao) == 0 { + e.PrevRandao = make([]byte, 0, len(buf[372:404])) + } + e.PrevRandao = append(e.PrevRandao, buf[372:404]...) + + // Field (6) 'BlockNumber' + e.BlockNumber = ssz.UnmarshallUint64(buf[404:412]) + + // Field (7) 'GasLimit' + e.GasLimit = ssz.UnmarshallUint64(buf[412:420]) + + // Field (8) 'GasUsed' + e.GasUsed = ssz.UnmarshallUint64(buf[420:428]) + + // Field (9) 'Timestamp' + e.Timestamp = ssz.UnmarshallUint64(buf[428:436]) + + // Offset (10) 'ExtraData' + if o10 = ssz.ReadOffset(buf[436:440]); o10 > size { + return ssz.ErrOffset + } + + if o10 < 508 { + return ssz.ErrInvalidVariableOffset + } + + // Field (11) 'BaseFeePerGas' + if cap(e.BaseFeePerGas) == 0 { + e.BaseFeePerGas = make([]byte, 0, len(buf[440:472])) + } + e.BaseFeePerGas = append(e.BaseFeePerGas, buf[440:472]...) + + // Field (12) 'BlockHash' + if cap(e.BlockHash) == 0 { + e.BlockHash = make([]byte, 0, len(buf[472:504])) + } + e.BlockHash = append(e.BlockHash, buf[472:504]...) + + // Offset (13) 'Transactions' + if o13 = ssz.ReadOffset(buf[504:508]); o13 > size || o10 > o13 { + return ssz.ErrOffset + } + + // Field (10) 'ExtraData' + { + buf = tail[o10:o13] + if len(buf) > 32 { + return ssz.ErrBytesLength + } + if cap(e.ExtraData) == 0 { + e.ExtraData = make([]byte, 0, len(buf)) + } + e.ExtraData = append(e.ExtraData, buf...) + } + + // Field (13) 'Transactions' + { + buf = tail[o13:] + num, err := ssz.DecodeDynamicLength(buf, 1048576) + if err != nil { + return err + } + e.Transactions = make([][]byte, num) + err = ssz.UnmarshalDynamic(buf, num, func(indx int, buf []byte) (err error) { + if len(buf) > 1073741824 { + return ssz.ErrBytesLength + } + if cap(e.Transactions[indx]) == 0 { + e.Transactions[indx] = make([]byte, 0, len(buf)) + } + e.Transactions[indx] = append(e.Transactions[indx], buf...) + return nil + }) + if err != nil { + return err + } + } + return err +} + +// SizeSSZ returns the ssz encoded size in bytes for the ExecutionPayload object +func (e *ExecutionPayload) SizeSSZ() (size int) { + size = 508 + + // Field (10) 'ExtraData' + size += len(e.ExtraData) + + // Field (13) 'Transactions' + for ii := 0; ii < len(e.Transactions); ii++ { + size += 4 + size += len(e.Transactions[ii]) + } + + return +} + +// HashTreeRoot ssz hashes the ExecutionPayload object +func (e *ExecutionPayload) HashTreeRoot() ([32]byte, error) { + return ssz.HashWithDefaultHasher(e) +} + +// HashTreeRootWith ssz hashes the ExecutionPayload object with a hasher +func (e *ExecutionPayload) HashTreeRootWith(hh ssz.HashWalker) (err error) { + indx := hh.Index() + + // Field (0) 'ParentHash' + if size := len(e.ParentHash); size != 32 { + err = ssz.ErrBytesLengthFn("ExecutionPayload.ParentHash", size, 32) + return + } + hh.PutBytes(e.ParentHash) + + // Field (1) 'FeeRecipient' + if size := len(e.FeeRecipient); size != 20 { + err = ssz.ErrBytesLengthFn("ExecutionPayload.FeeRecipient", size, 20) + return + } + hh.PutBytes(e.FeeRecipient) + + // Field (2) 'StateRoot' + if size := len(e.StateRoot); size != 32 { + err = ssz.ErrBytesLengthFn("ExecutionPayload.StateRoot", size, 32) + return + } + hh.PutBytes(e.StateRoot) + + // Field (3) 'ReceiptsRoot' + if size := len(e.ReceiptsRoot); size != 32 { + err = ssz.ErrBytesLengthFn("ExecutionPayload.ReceiptsRoot", size, 32) + return + } + hh.PutBytes(e.ReceiptsRoot) + + // Field (4) 'LogsBloom' + if size := len(e.LogsBloom); size != 256 { + err = ssz.ErrBytesLengthFn("ExecutionPayload.LogsBloom", size, 256) + return + } + hh.PutBytes(e.LogsBloom) + + // Field (5) 'PrevRandao' + if size := len(e.PrevRandao); size != 32 { + err = ssz.ErrBytesLengthFn("ExecutionPayload.PrevRandao", size, 32) + return + } + hh.PutBytes(e.PrevRandao) + + // Field (6) 'BlockNumber' + hh.PutUint64(e.BlockNumber) + + // Field (7) 'GasLimit' + hh.PutUint64(e.GasLimit) + + // Field (8) 'GasUsed' + hh.PutUint64(e.GasUsed) + + // Field (9) 'Timestamp' + hh.PutUint64(e.Timestamp) + + // Field (10) 'ExtraData' + { + elemIndx := hh.Index() + byteLen := uint64(len(e.ExtraData)) + if byteLen > 32 { + err = ssz.ErrIncorrectListSize + return + } + hh.PutBytes(e.ExtraData) + hh.MerkleizeWithMixin(elemIndx, byteLen, (32+31)/32) + } + + // Field (11) 'BaseFeePerGas' + if size := len(e.BaseFeePerGas); size != 32 { + err = ssz.ErrBytesLengthFn("ExecutionPayload.BaseFeePerGas", size, 32) + return + } + hh.PutBytes(e.BaseFeePerGas) + + // Field (12) 'BlockHash' + if size := len(e.BlockHash); size != 32 { + err = ssz.ErrBytesLengthFn("ExecutionPayload.BlockHash", size, 32) + return + } + hh.PutBytes(e.BlockHash) + + // Field (13) 'Transactions' + { + subIndx := hh.Index() + num := uint64(len(e.Transactions)) + if num > 1048576 { + err = ssz.ErrIncorrectListSize + return + } + for _, elem := range e.Transactions { + { + elemIndx := hh.Index() + byteLen := uint64(len(elem)) + if byteLen > 1073741824 { + err = ssz.ErrIncorrectListSize + return + } + hh.AppendBytes32(elem) + hh.MerkleizeWithMixin(elemIndx, byteLen, (1073741824+31)/32) + } + } + hh.MerkleizeWithMixin(subIndx, num, 1048576) + } + + hh.Merkleize(indx) + return +} + +// GetTree ssz hashes the ExecutionPayload object +func (e *ExecutionPayload) GetTree() (*ssz.Node, error) { + return ssz.ProofTree(e) +} + +// MarshalSSZ ssz marshals the BeaconBodyBellatrix object +func (b *BeaconBodyBellatrix) MarshalSSZ() ([]byte, error) { + return ssz.MarshalSSZ(b) +} + +// MarshalSSZTo ssz marshals the BeaconBodyBellatrix object to a target array +func (b *BeaconBodyBellatrix) MarshalSSZTo(buf []byte) (dst []byte, err error) { + dst = buf + offset := int(384) + + // Field (0) 'RandaoReveal' + if size := len(b.RandaoReveal); size != 96 { + err = ssz.ErrBytesLengthFn("BeaconBodyBellatrix.RandaoReveal", size, 96) + return + } + dst = append(dst, b.RandaoReveal...) + + // Field (1) 'Eth1Data' + if b.Eth1Data == nil { + b.Eth1Data = new(Eth1Data) + } + if dst, err = b.Eth1Data.MarshalSSZTo(dst); err != nil { + return + } + + // Field (2) 'Graffiti' + if size := len(b.Graffiti); size != 32 { + err = ssz.ErrBytesLengthFn("BeaconBodyBellatrix.Graffiti", size, 32) + return + } + dst = append(dst, b.Graffiti...) + + // Offset (3) 'ProposerSlashings' + dst = ssz.WriteOffset(dst, offset) + offset += len(b.ProposerSlashings) * 416 + + // Offset (4) 'AttesterSlashings' + dst = ssz.WriteOffset(dst, offset) + offset += len(b.AttesterSlashings) * 416 + + // Offset (5) 'Attestations' + dst = ssz.WriteOffset(dst, offset) + for ii := 0; ii < len(b.Attestations); ii++ { + offset += 4 + offset += b.Attestations[ii].SizeSSZ() + } + + // Offset (6) 'Deposits' + dst = ssz.WriteOffset(dst, offset) + offset += len(b.Deposits) * 1240 + + // Offset (7) 'VoluntaryExits' + dst = ssz.WriteOffset(dst, offset) + offset += len(b.VoluntaryExits) * 112 + + // Field (8) 'SyncAggregate' + if b.SyncAggregate == nil { + b.SyncAggregate = new(SyncAggregate) + } + if dst, err = b.SyncAggregate.MarshalSSZTo(dst); err != nil { + return + } + + // Offset (9) 'ExecutionPayload' + dst = ssz.WriteOffset(dst, offset) + if b.ExecutionPayload == nil { + b.ExecutionPayload = new(ExecutionPayload) + } + offset += b.ExecutionPayload.SizeSSZ() + + // Field (3) 'ProposerSlashings' + if size := len(b.ProposerSlashings); size > 16 { + err = ssz.ErrListTooBigFn("BeaconBodyBellatrix.ProposerSlashings", size, 16) + return + } + for ii := 0; ii < len(b.ProposerSlashings); ii++ { + if dst, err = b.ProposerSlashings[ii].MarshalSSZTo(dst); err != nil { + return + } + } + + // Field (4) 'AttesterSlashings' + if size := len(b.AttesterSlashings); size > 2 { + err = ssz.ErrListTooBigFn("BeaconBodyBellatrix.AttesterSlashings", size, 2) + return + } + for ii := 0; ii < len(b.AttesterSlashings); ii++ { + if dst, err = b.AttesterSlashings[ii].MarshalSSZTo(dst); err != nil { + return + } + } + + // Field (5) 'Attestations' + if size := len(b.Attestations); size > 128 { + err = ssz.ErrListTooBigFn("BeaconBodyBellatrix.Attestations", size, 128) + return + } + { + offset = 4 * len(b.Attestations) + for ii := 0; ii < len(b.Attestations); ii++ { + dst = ssz.WriteOffset(dst, offset) + offset += b.Attestations[ii].SizeSSZ() + } + } + for ii := 0; ii < len(b.Attestations); ii++ { + if dst, err = b.Attestations[ii].MarshalSSZTo(dst); err != nil { + return + } + } + + // Field (6) 'Deposits' + if size := len(b.Deposits); size > 16 { + err = ssz.ErrListTooBigFn("BeaconBodyBellatrix.Deposits", size, 16) + return + } + for ii := 0; ii < len(b.Deposits); ii++ { + if dst, err = b.Deposits[ii].MarshalSSZTo(dst); err != nil { + return + } + } + + // Field (7) 'VoluntaryExits' + if size := len(b.VoluntaryExits); size > 16 { + err = ssz.ErrListTooBigFn("BeaconBodyBellatrix.VoluntaryExits", size, 16) + return + } + for ii := 0; ii < len(b.VoluntaryExits); ii++ { + if dst, err = b.VoluntaryExits[ii].MarshalSSZTo(dst); err != nil { + return + } + } + + // Field (9) 'ExecutionPayload' + if dst, err = b.ExecutionPayload.MarshalSSZTo(dst); err != nil { + return + } + + return +} + +// UnmarshalSSZ ssz unmarshals the BeaconBodyBellatrix object +func (b *BeaconBodyBellatrix) UnmarshalSSZ(buf []byte) error { + var err error + size := uint64(len(buf)) + if size < 384 { + return ssz.ErrSize + } + + tail := buf + var o3, o4, o5, o6, o7, o9 uint64 + + // Field (0) 'RandaoReveal' + if cap(b.RandaoReveal) == 0 { + b.RandaoReveal = make([]byte, 0, len(buf[0:96])) + } + b.RandaoReveal = append(b.RandaoReveal, buf[0:96]...) + + // Field (1) 'Eth1Data' + if b.Eth1Data == nil { + b.Eth1Data = new(Eth1Data) + } + if err = b.Eth1Data.UnmarshalSSZ(buf[96:168]); err != nil { + return err + } + + // Field (2) 'Graffiti' + if cap(b.Graffiti) == 0 { + b.Graffiti = make([]byte, 0, len(buf[168:200])) + } + b.Graffiti = append(b.Graffiti, buf[168:200]...) + + // Offset (3) 'ProposerSlashings' + if o3 = ssz.ReadOffset(buf[200:204]); o3 > size { + return ssz.ErrOffset + } + + if o3 < 384 { + return ssz.ErrInvalidVariableOffset + } + + // Offset (4) 'AttesterSlashings' + if o4 = ssz.ReadOffset(buf[204:208]); o4 > size || o3 > o4 { + return ssz.ErrOffset + } + + // Offset (5) 'Attestations' + if o5 = ssz.ReadOffset(buf[208:212]); o5 > size || o4 > o5 { + return ssz.ErrOffset + } + + // Offset (6) 'Deposits' + if o6 = ssz.ReadOffset(buf[212:216]); o6 > size || o5 > o6 { + return ssz.ErrOffset + } + + // Offset (7) 'VoluntaryExits' + if o7 = ssz.ReadOffset(buf[216:220]); o7 > size || o6 > o7 { + return ssz.ErrOffset + } + + // Field (8) 'SyncAggregate' + if b.SyncAggregate == nil { + b.SyncAggregate = new(SyncAggregate) + } + if err = b.SyncAggregate.UnmarshalSSZ(buf[220:380]); err != nil { + return err + } + + // Offset (9) 'ExecutionPayload' + if o9 = ssz.ReadOffset(buf[380:384]); o9 > size || o7 > o9 { + return ssz.ErrOffset + } + + // Field (3) 'ProposerSlashings' + { + buf = tail[o3:o4] + num, err := ssz.DivideInt2(len(buf), 416, 16) + if err != nil { + return err + } + b.ProposerSlashings = make([]*Slashing, num) + for ii := 0; ii < num; ii++ { + if b.ProposerSlashings[ii] == nil { + b.ProposerSlashings[ii] = new(Slashing) + } + if err = b.ProposerSlashings[ii].UnmarshalSSZ(buf[ii*416 : (ii+1)*416]); err != nil { + return err + } + } + } + + // Field (4) 'AttesterSlashings' + { + buf = tail[o4:o5] + num, err := ssz.DivideInt2(len(buf), 416, 2) + if err != nil { + return err + } + b.AttesterSlashings = make([]*Slashing, num) + for ii := 0; ii < num; ii++ { + if b.AttesterSlashings[ii] == nil { + b.AttesterSlashings[ii] = new(Slashing) + } + if err = b.AttesterSlashings[ii].UnmarshalSSZ(buf[ii*416 : (ii+1)*416]); err != nil { + return err + } + } + } + + // Field (5) 'Attestations' + { + buf = tail[o5:o6] + num, err := ssz.DecodeDynamicLength(buf, 128) + if err != nil { + return err + } + b.Attestations = make([]*Attestation, num) + err = ssz.UnmarshalDynamic(buf, num, func(indx int, buf []byte) (err error) { + if b.Attestations[indx] == nil { + b.Attestations[indx] = new(Attestation) + } + if err = b.Attestations[indx].UnmarshalSSZ(buf); err != nil { + return err + } + return nil + }) + if err != nil { + return err + } + } + + // Field (6) 'Deposits' + { + buf = tail[o6:o7] + num, err := ssz.DivideInt2(len(buf), 1240, 16) + if err != nil { + return err + } + b.Deposits = make([]*Deposit, num) + for ii := 0; ii < num; ii++ { + if b.Deposits[ii] == nil { + b.Deposits[ii] = new(Deposit) + } + if err = b.Deposits[ii].UnmarshalSSZ(buf[ii*1240 : (ii+1)*1240]); err != nil { + return err + } + } + } + + // Field (7) 'VoluntaryExits' + { + buf = tail[o7:o9] + num, err := ssz.DivideInt2(len(buf), 112, 16) + if err != nil { + return err + } + b.VoluntaryExits = make([]*SignedVoluntaryExit, num) + for ii := 0; ii < num; ii++ { + if b.VoluntaryExits[ii] == nil { + b.VoluntaryExits[ii] = new(SignedVoluntaryExit) + } + if err = b.VoluntaryExits[ii].UnmarshalSSZ(buf[ii*112 : (ii+1)*112]); err != nil { + return err + } + } + } + + // Field (9) 'ExecutionPayload' + { + buf = tail[o9:] + if b.ExecutionPayload == nil { + b.ExecutionPayload = new(ExecutionPayload) + } + if err = b.ExecutionPayload.UnmarshalSSZ(buf); err != nil { + return err + } + } + return err +} + +// SizeSSZ returns the ssz encoded size in bytes for the BeaconBodyBellatrix object +func (b *BeaconBodyBellatrix) SizeSSZ() (size int) { + size = 384 + + // Field (3) 'ProposerSlashings' + size += len(b.ProposerSlashings) * 416 + + // Field (4) 'AttesterSlashings' + size += len(b.AttesterSlashings) * 416 + + // Field (5) 'Attestations' + for ii := 0; ii < len(b.Attestations); ii++ { + size += 4 + size += b.Attestations[ii].SizeSSZ() + } + + // Field (6) 'Deposits' + size += len(b.Deposits) * 1240 + + // Field (7) 'VoluntaryExits' + size += len(b.VoluntaryExits) * 112 + + // Field (9) 'ExecutionPayload' + if b.ExecutionPayload == nil { + b.ExecutionPayload = new(ExecutionPayload) + } + size += b.ExecutionPayload.SizeSSZ() + + return +} + +// HashTreeRoot ssz hashes the BeaconBodyBellatrix object +func (b *BeaconBodyBellatrix) HashTreeRoot() ([32]byte, error) { + return ssz.HashWithDefaultHasher(b) +} + +// HashTreeRootWith ssz hashes the BeaconBodyBellatrix object with a hasher +func (b *BeaconBodyBellatrix) HashTreeRootWith(hh ssz.HashWalker) (err error) { + indx := hh.Index() + + // Field (0) 'RandaoReveal' + if size := len(b.RandaoReveal); size != 96 { + err = ssz.ErrBytesLengthFn("BeaconBodyBellatrix.RandaoReveal", size, 96) + return + } + hh.PutBytes(b.RandaoReveal) + + // Field (1) 'Eth1Data' + if b.Eth1Data == nil { + b.Eth1Data = new(Eth1Data) + } + if err = b.Eth1Data.HashTreeRootWith(hh); err != nil { + return + } + + // Field (2) 'Graffiti' + if size := len(b.Graffiti); size != 32 { + err = ssz.ErrBytesLengthFn("BeaconBodyBellatrix.Graffiti", size, 32) + return + } + hh.PutBytes(b.Graffiti) + + // Field (3) 'ProposerSlashings' + { + subIndx := hh.Index() + num := uint64(len(b.ProposerSlashings)) + if num > 16 { + err = ssz.ErrIncorrectListSize + return + } + for _, elem := range b.ProposerSlashings { + if err = elem.HashTreeRootWith(hh); err != nil { + return + } + } + hh.MerkleizeWithMixin(subIndx, num, 16) + } + + // Field (4) 'AttesterSlashings' + { + subIndx := hh.Index() + num := uint64(len(b.AttesterSlashings)) + if num > 2 { + err = ssz.ErrIncorrectListSize + return + } + for _, elem := range b.AttesterSlashings { + if err = elem.HashTreeRootWith(hh); err != nil { + return + } + } + hh.MerkleizeWithMixin(subIndx, num, 2) + } + + // Field (5) 'Attestations' + { + subIndx := hh.Index() + num := uint64(len(b.Attestations)) + if num > 128 { + err = ssz.ErrIncorrectListSize + return + } + for _, elem := range b.Attestations { + if err = elem.HashTreeRootWith(hh); err != nil { + return + } + } + hh.MerkleizeWithMixin(subIndx, num, 128) + } + + // Field (6) 'Deposits' + { + subIndx := hh.Index() + num := uint64(len(b.Deposits)) + if num > 16 { + err = ssz.ErrIncorrectListSize + return + } + for _, elem := range b.Deposits { + if err = elem.HashTreeRootWith(hh); err != nil { + return + } + } + hh.MerkleizeWithMixin(subIndx, num, 16) + } + + // Field (7) 'VoluntaryExits' + { + subIndx := hh.Index() + num := uint64(len(b.VoluntaryExits)) + if num > 16 { + err = ssz.ErrIncorrectListSize + return + } + for _, elem := range b.VoluntaryExits { + if err = elem.HashTreeRootWith(hh); err != nil { + return + } + } + hh.MerkleizeWithMixin(subIndx, num, 16) + } + + // Field (8) 'SyncAggregate' + if b.SyncAggregate == nil { + b.SyncAggregate = new(SyncAggregate) + } + if err = b.SyncAggregate.HashTreeRootWith(hh); err != nil { + return + } + + // Field (9) 'ExecutionPayload' + if err = b.ExecutionPayload.HashTreeRootWith(hh); err != nil { + return + } + + hh.Merkleize(indx) + return +} + +// GetTree ssz hashes the BeaconBodyBellatrix object +func (b *BeaconBodyBellatrix) GetTree() (*ssz.Node, error) { + return ssz.ProofTree(b) +} + +// MarshalSSZ ssz marshals the BeaconBlockBellatrix object +func (b *BeaconBlockBellatrix) MarshalSSZ() ([]byte, error) { + return ssz.MarshalSSZ(b) +} + +// MarshalSSZTo ssz marshals the BeaconBlockBellatrix object to a target array +func (b *BeaconBlockBellatrix) MarshalSSZTo(buf []byte) (dst []byte, err error) { + dst = buf + offset := int(84) + + // Field (0) 'Slot' + dst = ssz.MarshalUint64(dst, b.Slot) + + // Field (1) 'ProposerIndex' + dst = ssz.MarshalUint64(dst, b.ProposerIndex) + + // Field (2) 'ParentRoot' + if size := len(b.ParentRoot); size != 32 { + err = ssz.ErrBytesLengthFn("BeaconBlockBellatrix.ParentRoot", size, 32) + return + } + dst = append(dst, b.ParentRoot...) + + // Field (3) 'StateRoot' + if size := len(b.StateRoot); size != 32 { + err = ssz.ErrBytesLengthFn("BeaconBlockBellatrix.StateRoot", size, 32) + return + } + dst = append(dst, b.StateRoot...) + + // Offset (4) 'Body' + dst = ssz.WriteOffset(dst, offset) + if b.Body == nil { + b.Body = new(BeaconBodyBellatrix) + } + offset += b.Body.SizeSSZ() + + // Field (4) 'Body' + if dst, err = b.Body.MarshalSSZTo(dst); err != nil { + return + } + + return +} + +// UnmarshalSSZ ssz unmarshals the BeaconBlockBellatrix object +func (b *BeaconBlockBellatrix) UnmarshalSSZ(buf []byte) error { + var err error + size := uint64(len(buf)) + if size < 84 { + return ssz.ErrSize + } + + tail := buf + var o4 uint64 + + // Field (0) 'Slot' + b.Slot = ssz.UnmarshallUint64(buf[0:8]) + + // Field (1) 'ProposerIndex' + b.ProposerIndex = ssz.UnmarshallUint64(buf[8:16]) + + // Field (2) 'ParentRoot' + if cap(b.ParentRoot) == 0 { + b.ParentRoot = make([]byte, 0, len(buf[16:48])) + } + b.ParentRoot = append(b.ParentRoot, buf[16:48]...) + + // Field (3) 'StateRoot' + if cap(b.StateRoot) == 0 { + b.StateRoot = make([]byte, 0, len(buf[48:80])) + } + b.StateRoot = append(b.StateRoot, buf[48:80]...) + + // Offset (4) 'Body' + if o4 = ssz.ReadOffset(buf[80:84]); o4 > size { + return ssz.ErrOffset + } + + if o4 < 84 { + return ssz.ErrInvalidVariableOffset + } + + // Field (4) 'Body' + { + buf = tail[o4:] + if b.Body == nil { + b.Body = new(BeaconBodyBellatrix) + } + if err = b.Body.UnmarshalSSZ(buf); err != nil { + return err + } + } + return err +} + +// SizeSSZ returns the ssz encoded size in bytes for the BeaconBlockBellatrix object +func (b *BeaconBlockBellatrix) SizeSSZ() (size int) { + size = 84 + + // Field (4) 'Body' + if b.Body == nil { + b.Body = new(BeaconBodyBellatrix) + } + size += b.Body.SizeSSZ() + + return +} + +// HashTreeRoot ssz hashes the BeaconBlockBellatrix object +func (b *BeaconBlockBellatrix) HashTreeRoot() ([32]byte, error) { + return ssz.HashWithDefaultHasher(b) +} + +// HashTreeRootWith ssz hashes the BeaconBlockBellatrix object with a hasher +func (b *BeaconBlockBellatrix) HashTreeRootWith(hh ssz.HashWalker) (err error) { + indx := hh.Index() + + // Field (0) 'Slot' + hh.PutUint64(b.Slot) + + // Field (1) 'ProposerIndex' + hh.PutUint64(b.ProposerIndex) + + // Field (2) 'ParentRoot' + if size := len(b.ParentRoot); size != 32 { + err = ssz.ErrBytesLengthFn("BeaconBlockBellatrix.ParentRoot", size, 32) + return + } + hh.PutBytes(b.ParentRoot) + + // Field (3) 'StateRoot' + if size := len(b.StateRoot); size != 32 { + err = ssz.ErrBytesLengthFn("BeaconBlockBellatrix.StateRoot", size, 32) + return + } + hh.PutBytes(b.StateRoot) + + // Field (4) 'Body' + if err = b.Body.HashTreeRootWith(hh); err != nil { + return + } + + hh.Merkleize(indx) + return +} + +// GetTree ssz hashes the BeaconBlockBellatrix object +func (b *BeaconBlockBellatrix) GetTree() (*ssz.Node, error) { + return ssz.ProofTree(b) +} + +// MarshalSSZ ssz marshals the SignedBeaconBlockBellatrix object +func (s *SignedBeaconBlockBellatrix) MarshalSSZ() ([]byte, error) { + return ssz.MarshalSSZ(s) +} + +// MarshalSSZTo ssz marshals the SignedBeaconBlockBellatrix object to a target array +func (s *SignedBeaconBlockBellatrix) MarshalSSZTo(buf []byte) (dst []byte, err error) { + dst = buf + offset := int(100) + + // Offset (0) 'Block' + dst = ssz.WriteOffset(dst, offset) + if s.Block == nil { + s.Block = new(BeaconBlockBellatrix) + } + offset += s.Block.SizeSSZ() + + // Field (1) 'Signature' + if size := len(s.Signature); size != 96 { + err = ssz.ErrBytesLengthFn("SignedBeaconBlockBellatrix.Signature", size, 96) + return + } + dst = append(dst, s.Signature...) + + // Field (0) 'Block' + if dst, err = s.Block.MarshalSSZTo(dst); err != nil { + return + } + + return +} + +// UnmarshalSSZ ssz unmarshals the SignedBeaconBlockBellatrix object +func (s *SignedBeaconBlockBellatrix) UnmarshalSSZ(buf []byte) error { + var err error + size := uint64(len(buf)) + if size < 100 { + return ssz.ErrSize + } + + tail := buf + var o0 uint64 + + // Offset (0) 'Block' + if o0 = ssz.ReadOffset(buf[0:4]); o0 > size { + return ssz.ErrOffset + } + + if o0 < 100 { + return ssz.ErrInvalidVariableOffset + } + + // Field (1) 'Signature' + if cap(s.Signature) == 0 { + s.Signature = make([]byte, 0, len(buf[4:100])) + } + s.Signature = append(s.Signature, buf[4:100]...) + + // Field (0) 'Block' + { + buf = tail[o0:] + if s.Block == nil { + s.Block = new(BeaconBlockBellatrix) + } + if err = s.Block.UnmarshalSSZ(buf); err != nil { + return err + } + } + return err +} + +// SizeSSZ returns the ssz encoded size in bytes for the SignedBeaconBlockBellatrix object +func (s *SignedBeaconBlockBellatrix) SizeSSZ() (size int) { + size = 100 + + // Field (0) 'Block' + if s.Block == nil { + s.Block = new(BeaconBlockBellatrix) + } + size += s.Block.SizeSSZ() + + return +} + +// HashTreeRoot ssz hashes the SignedBeaconBlockBellatrix object +func (s *SignedBeaconBlockBellatrix) HashTreeRoot() ([32]byte, error) { + return ssz.HashWithDefaultHasher(s) +} + +// HashTreeRootWith ssz hashes the SignedBeaconBlockBellatrix object with a hasher +func (s *SignedBeaconBlockBellatrix) HashTreeRootWith(hh ssz.HashWalker) (err error) { + indx := hh.Index() + + // Field (0) 'Block' + if err = s.Block.HashTreeRootWith(hh); err != nil { + return + } + + // Field (1) 'Signature' + if size := len(s.Signature); size != 96 { + err = ssz.ErrBytesLengthFn("SignedBeaconBlockBellatrix.Signature", size, 96) + return + } + hh.PutBytes(s.Signature) + + hh.Merkleize(indx) + return +} + +// GetTree ssz hashes the SignedBeaconBlockBellatrix object +func (s *SignedBeaconBlockBellatrix) GetTree() (*ssz.Node, error) { + return ssz.ProofTree(s) +} + +// MarshalSSZ ssz marshals the LightClientBootstrap object +func (l *LightClientBootstrap) MarshalSSZ() ([]byte, error) { + return ssz.MarshalSSZ(l) +} + +// MarshalSSZTo ssz marshals the LightClientBootstrap object to a target array +func (l *LightClientBootstrap) MarshalSSZTo(buf []byte) (dst []byte, err error) { + dst = buf + + // Field (0) 'Header' + if l.Header == nil { + l.Header = new(BeaconBlockHeader) + } + if dst, err = l.Header.MarshalSSZTo(dst); err != nil { + return + } + + // Field (1) 'CurrentSyncCommittee' + if l.CurrentSyncCommittee == nil { + l.CurrentSyncCommittee = new(SyncCommittee) + } + if dst, err = l.CurrentSyncCommittee.MarshalSSZTo(dst); err != nil { + return + } + + // Field (2) 'CurrentSyncCommitteeBranch' + if size := len(l.CurrentSyncCommitteeBranch); size != 5 { + err = ssz.ErrVectorLengthFn("LightClientBootstrap.CurrentSyncCommitteeBranch", size, 5) + return + } + for ii := 0; ii < 5; ii++ { + if size := len(l.CurrentSyncCommitteeBranch[ii]); size != 32 { + err = ssz.ErrBytesLengthFn("LightClientBootstrap.CurrentSyncCommitteeBranch[ii]", size, 32) + return + } + dst = append(dst, l.CurrentSyncCommitteeBranch[ii]...) + } + + return +} + +// UnmarshalSSZ ssz unmarshals the LightClientBootstrap object +func (l *LightClientBootstrap) UnmarshalSSZ(buf []byte) error { + var err error + size := uint64(len(buf)) + if size != 24848 { + return ssz.ErrSize + } + + // Field (0) 'Header' + if l.Header == nil { + l.Header = new(BeaconBlockHeader) + } + if err = l.Header.UnmarshalSSZ(buf[0:112]); err != nil { + return err + } + + // Field (1) 'CurrentSyncCommittee' + if l.CurrentSyncCommittee == nil { + l.CurrentSyncCommittee = new(SyncCommittee) + } + if err = l.CurrentSyncCommittee.UnmarshalSSZ(buf[112:24688]); err != nil { + return err + } + + // Field (2) 'CurrentSyncCommitteeBranch' + l.CurrentSyncCommitteeBranch = make([][]byte, 5) + for ii := 0; ii < 5; ii++ { + if cap(l.CurrentSyncCommitteeBranch[ii]) == 0 { + l.CurrentSyncCommitteeBranch[ii] = make([]byte, 0, len(buf[24688:24848][ii*32:(ii+1)*32])) + } + l.CurrentSyncCommitteeBranch[ii] = append(l.CurrentSyncCommitteeBranch[ii], buf[24688:24848][ii*32:(ii+1)*32]...) + } + + return err +} + +// SizeSSZ returns the ssz encoded size in bytes for the LightClientBootstrap object +func (l *LightClientBootstrap) SizeSSZ() (size int) { + size = 24848 + return +} + +// HashTreeRoot ssz hashes the LightClientBootstrap object +func (l *LightClientBootstrap) HashTreeRoot() ([32]byte, error) { + return ssz.HashWithDefaultHasher(l) +} + +// HashTreeRootWith ssz hashes the LightClientBootstrap object with a hasher +func (l *LightClientBootstrap) HashTreeRootWith(hh ssz.HashWalker) (err error) { + indx := hh.Index() + + // Field (0) 'Header' + if l.Header == nil { + l.Header = new(BeaconBlockHeader) + } + if err = l.Header.HashTreeRootWith(hh); err != nil { + return + } + + // Field (1) 'CurrentSyncCommittee' + if l.CurrentSyncCommittee == nil { + l.CurrentSyncCommittee = new(SyncCommittee) + } + if err = l.CurrentSyncCommittee.HashTreeRootWith(hh); err != nil { + return + } + + // Field (2) 'CurrentSyncCommitteeBranch' + { + if size := len(l.CurrentSyncCommitteeBranch); size != 5 { + err = ssz.ErrVectorLengthFn("LightClientBootstrap.CurrentSyncCommitteeBranch", size, 5) + return + } + subIndx := hh.Index() + for _, i := range l.CurrentSyncCommitteeBranch { + if len(i) != 32 { + err = ssz.ErrBytesLength + return + } + hh.Append(i) + } + hh.Merkleize(subIndx) + } + + hh.Merkleize(indx) + return +} + +// GetTree ssz hashes the LightClientBootstrap object +func (l *LightClientBootstrap) GetTree() (*ssz.Node, error) { + return ssz.ProofTree(l) +} + +// MarshalSSZ ssz marshals the SyncCommittee object +func (s *SyncCommittee) MarshalSSZ() ([]byte, error) { + return ssz.MarshalSSZ(s) +} + +// MarshalSSZTo ssz marshals the SyncCommittee object to a target array +func (s *SyncCommittee) MarshalSSZTo(buf []byte) (dst []byte, err error) { + dst = buf + + // Field (0) 'PubKeys' + if size := len(s.PubKeys); size != 512 { + err = ssz.ErrVectorLengthFn("SyncCommittee.PubKeys", size, 512) + return + } + for ii := 0; ii < 512; ii++ { + if size := len(s.PubKeys[ii]); size != 48 { + err = ssz.ErrBytesLengthFn("SyncCommittee.PubKeys[ii]", size, 48) + return + } + dst = append(dst, s.PubKeys[ii]...) + } + + return +} + +// UnmarshalSSZ ssz unmarshals the SyncCommittee object +func (s *SyncCommittee) UnmarshalSSZ(buf []byte) error { + var err error + size := uint64(len(buf)) + if size != 24576 { + return ssz.ErrSize + } + + // Field (0) 'PubKeys' + s.PubKeys = make([][]byte, 512) + for ii := 0; ii < 512; ii++ { + if cap(s.PubKeys[ii]) == 0 { + s.PubKeys[ii] = make([]byte, 0, len(buf[0:24576][ii*48:(ii+1)*48])) + } + s.PubKeys[ii] = append(s.PubKeys[ii], buf[0:24576][ii*48:(ii+1)*48]...) + } + + return err +} + +// SizeSSZ returns the ssz encoded size in bytes for the SyncCommittee object +func (s *SyncCommittee) SizeSSZ() (size int) { + size = 24576 + return +} + +// HashTreeRoot ssz hashes the SyncCommittee object +func (s *SyncCommittee) HashTreeRoot() ([32]byte, error) { + return ssz.HashWithDefaultHasher(s) +} + +// HashTreeRootWith ssz hashes the SyncCommittee object with a hasher +func (s *SyncCommittee) HashTreeRootWith(hh ssz.HashWalker) (err error) { + indx := hh.Index() + + // Field (0) 'PubKeys' + { + if size := len(s.PubKeys); size != 512 { + err = ssz.ErrVectorLengthFn("SyncCommittee.PubKeys", size, 512) + return + } + subIndx := hh.Index() + for _, i := range s.PubKeys { + if len(i) != 48 { + err = ssz.ErrBytesLength + return + } + hh.PutBytes(i) + } + hh.Merkleize(subIndx) + } + + hh.Merkleize(indx) + return +} + +// GetTree ssz hashes the SyncCommittee object +func (s *SyncCommittee) GetTree() (*ssz.Node, error) { + return ssz.ProofTree(s) +} + +// MarshalSSZ ssz marshals the LightClientUpdate object +func (l *LightClientUpdate) MarshalSSZ() ([]byte, error) { + return ssz.MarshalSSZ(l) +} + +// MarshalSSZTo ssz marshals the LightClientUpdate object to a target array +func (l *LightClientUpdate) MarshalSSZTo(buf []byte) (dst []byte, err error) { + dst = buf + + // Field (0) 'AttestedHeader' + if l.AttestedHeader == nil { + l.AttestedHeader = new(BeaconBlockHeader) + } + if dst, err = l.AttestedHeader.MarshalSSZTo(dst); err != nil { + return + } + + // Field (1) 'NextSyncCommitee' + if l.NextSyncCommitee == nil { + l.NextSyncCommitee = new(SyncCommittee) + } + if dst, err = l.NextSyncCommitee.MarshalSSZTo(dst); err != nil { + return + } + + // Field (2) 'NextSyncCommitteeBranch' + if size := len(l.NextSyncCommitteeBranch); size != 5 { + err = ssz.ErrVectorLengthFn("LightClientUpdate.NextSyncCommitteeBranch", size, 5) + return + } + for ii := 0; ii < 5; ii++ { + if size := len(l.NextSyncCommitteeBranch[ii]); size != 32 { + err = ssz.ErrBytesLengthFn("LightClientUpdate.NextSyncCommitteeBranch[ii]", size, 32) + return + } + dst = append(dst, l.NextSyncCommitteeBranch[ii]...) + } + + // Field (3) 'FinalizedHeader' + if l.FinalizedHeader == nil { + l.FinalizedHeader = new(BeaconBlockHeader) + } + if dst, err = l.FinalizedHeader.MarshalSSZTo(dst); err != nil { + return + } + + // Field (4) 'FinalityBranch' + if size := len(l.FinalityBranch); size != 6 { + err = ssz.ErrVectorLengthFn("LightClientUpdate.FinalityBranch", size, 6) + return + } + for ii := 0; ii < 6; ii++ { + if size := len(l.FinalityBranch[ii]); size != 32 { + err = ssz.ErrBytesLengthFn("LightClientUpdate.FinalityBranch[ii]", size, 32) + return + } + dst = append(dst, l.FinalityBranch[ii]...) + } + + // Field (5) 'SyncAggregate' + if l.SyncAggregate == nil { + l.SyncAggregate = new(SyncAggregate) + } + if dst, err = l.SyncAggregate.MarshalSSZTo(dst); err != nil { + return + } + + // Field (6) 'SignatureSlot' + dst = ssz.MarshalUint64(dst, l.SignatureSlot) + + return +} + +// UnmarshalSSZ ssz unmarshals the LightClientUpdate object +func (l *LightClientUpdate) UnmarshalSSZ(buf []byte) error { + var err error + size := uint64(len(buf)) + if size != 25320 { + return ssz.ErrSize + } + + // Field (0) 'AttestedHeader' + if l.AttestedHeader == nil { + l.AttestedHeader = new(BeaconBlockHeader) + } + if err = l.AttestedHeader.UnmarshalSSZ(buf[0:112]); err != nil { + return err + } + + // Field (1) 'NextSyncCommitee' + if l.NextSyncCommitee == nil { + l.NextSyncCommitee = new(SyncCommittee) + } + if err = l.NextSyncCommitee.UnmarshalSSZ(buf[112:24688]); err != nil { + return err + } + + // Field (2) 'NextSyncCommitteeBranch' + l.NextSyncCommitteeBranch = make([][]byte, 5) + for ii := 0; ii < 5; ii++ { + if cap(l.NextSyncCommitteeBranch[ii]) == 0 { + l.NextSyncCommitteeBranch[ii] = make([]byte, 0, len(buf[24688:24848][ii*32:(ii+1)*32])) + } + l.NextSyncCommitteeBranch[ii] = append(l.NextSyncCommitteeBranch[ii], buf[24688:24848][ii*32:(ii+1)*32]...) + } + + // Field (3) 'FinalizedHeader' + if l.FinalizedHeader == nil { + l.FinalizedHeader = new(BeaconBlockHeader) + } + if err = l.FinalizedHeader.UnmarshalSSZ(buf[24848:24960]); err != nil { + return err + } + + // Field (4) 'FinalityBranch' + l.FinalityBranch = make([][]byte, 6) + for ii := 0; ii < 6; ii++ { + if cap(l.FinalityBranch[ii]) == 0 { + l.FinalityBranch[ii] = make([]byte, 0, len(buf[24960:25152][ii*32:(ii+1)*32])) + } + l.FinalityBranch[ii] = append(l.FinalityBranch[ii], buf[24960:25152][ii*32:(ii+1)*32]...) + } + + // Field (5) 'SyncAggregate' + if l.SyncAggregate == nil { + l.SyncAggregate = new(SyncAggregate) + } + if err = l.SyncAggregate.UnmarshalSSZ(buf[25152:25312]); err != nil { + return err + } + + // Field (6) 'SignatureSlot' + l.SignatureSlot = ssz.UnmarshallUint64(buf[25312:25320]) + + return err +} + +// SizeSSZ returns the ssz encoded size in bytes for the LightClientUpdate object +func (l *LightClientUpdate) SizeSSZ() (size int) { + size = 25320 + return +} + +// HashTreeRoot ssz hashes the LightClientUpdate object +func (l *LightClientUpdate) HashTreeRoot() ([32]byte, error) { + return ssz.HashWithDefaultHasher(l) +} + +// HashTreeRootWith ssz hashes the LightClientUpdate object with a hasher +func (l *LightClientUpdate) HashTreeRootWith(hh ssz.HashWalker) (err error) { + indx := hh.Index() + + // Field (0) 'AttestedHeader' + if l.AttestedHeader == nil { + l.AttestedHeader = new(BeaconBlockHeader) + } + if err = l.AttestedHeader.HashTreeRootWith(hh); err != nil { + return + } + + // Field (1) 'NextSyncCommitee' + if l.NextSyncCommitee == nil { + l.NextSyncCommitee = new(SyncCommittee) + } + if err = l.NextSyncCommitee.HashTreeRootWith(hh); err != nil { + return + } + + // Field (2) 'NextSyncCommitteeBranch' + { + if size := len(l.NextSyncCommitteeBranch); size != 5 { + err = ssz.ErrVectorLengthFn("LightClientUpdate.NextSyncCommitteeBranch", size, 5) + return + } + subIndx := hh.Index() + for _, i := range l.NextSyncCommitteeBranch { + if len(i) != 32 { + err = ssz.ErrBytesLength + return + } + hh.Append(i) + } + hh.Merkleize(subIndx) + } + + // Field (3) 'FinalizedHeader' + if l.FinalizedHeader == nil { + l.FinalizedHeader = new(BeaconBlockHeader) + } + if err = l.FinalizedHeader.HashTreeRootWith(hh); err != nil { + return + } + + // Field (4) 'FinalityBranch' + { + if size := len(l.FinalityBranch); size != 6 { + err = ssz.ErrVectorLengthFn("LightClientUpdate.FinalityBranch", size, 6) + return + } + subIndx := hh.Index() + for _, i := range l.FinalityBranch { + if len(i) != 32 { + err = ssz.ErrBytesLength + return + } + hh.Append(i) + } + hh.Merkleize(subIndx) + } + + // Field (5) 'SyncAggregate' + if l.SyncAggregate == nil { + l.SyncAggregate = new(SyncAggregate) + } + if err = l.SyncAggregate.HashTreeRootWith(hh); err != nil { + return + } + + // Field (6) 'SignatureSlot' + hh.PutUint64(l.SignatureSlot) + + hh.Merkleize(indx) + return +} + +// GetTree ssz hashes the LightClientUpdate object +func (l *LightClientUpdate) GetTree() (*ssz.Node, error) { + return ssz.ProofTree(l) +} + +// MarshalSSZ ssz marshals the LightClientFinalityUpdate object +func (l *LightClientFinalityUpdate) MarshalSSZ() ([]byte, error) { + return ssz.MarshalSSZ(l) +} + +// MarshalSSZTo ssz marshals the LightClientFinalityUpdate object to a target array +func (l *LightClientFinalityUpdate) MarshalSSZTo(buf []byte) (dst []byte, err error) { + dst = buf + + // Field (0) 'AttestedHeader' + if l.AttestedHeader == nil { + l.AttestedHeader = new(BeaconBlockHeader) + } + if dst, err = l.AttestedHeader.MarshalSSZTo(dst); err != nil { + return + } + + // Field (1) 'FinalizedHeader' + if l.FinalizedHeader == nil { + l.FinalizedHeader = new(BeaconBlockHeader) + } + if dst, err = l.FinalizedHeader.MarshalSSZTo(dst); err != nil { + return + } + + // Field (2) 'FinalityBranch' + if size := len(l.FinalityBranch); size != 6 { + err = ssz.ErrVectorLengthFn("LightClientFinalityUpdate.FinalityBranch", size, 6) + return + } + for ii := 0; ii < 6; ii++ { + if size := len(l.FinalityBranch[ii]); size != 32 { + err = ssz.ErrBytesLengthFn("LightClientFinalityUpdate.FinalityBranch[ii]", size, 32) + return + } + dst = append(dst, l.FinalityBranch[ii]...) + } + + // Field (3) 'SyncAggregate' + if l.SyncAggregate == nil { + l.SyncAggregate = new(SyncAggregate) + } + if dst, err = l.SyncAggregate.MarshalSSZTo(dst); err != nil { + return + } + + // Field (4) 'SignatureSlot' + dst = ssz.MarshalUint64(dst, l.SignatureSlot) + + return +} + +// UnmarshalSSZ ssz unmarshals the LightClientFinalityUpdate object +func (l *LightClientFinalityUpdate) UnmarshalSSZ(buf []byte) error { + var err error + size := uint64(len(buf)) + if size != 584 { + return ssz.ErrSize + } + + // Field (0) 'AttestedHeader' + if l.AttestedHeader == nil { + l.AttestedHeader = new(BeaconBlockHeader) + } + if err = l.AttestedHeader.UnmarshalSSZ(buf[0:112]); err != nil { + return err + } + + // Field (1) 'FinalizedHeader' + if l.FinalizedHeader == nil { + l.FinalizedHeader = new(BeaconBlockHeader) + } + if err = l.FinalizedHeader.UnmarshalSSZ(buf[112:224]); err != nil { + return err + } + + // Field (2) 'FinalityBranch' + l.FinalityBranch = make([][]byte, 6) + for ii := 0; ii < 6; ii++ { + if cap(l.FinalityBranch[ii]) == 0 { + l.FinalityBranch[ii] = make([]byte, 0, len(buf[224:416][ii*32:(ii+1)*32])) + } + l.FinalityBranch[ii] = append(l.FinalityBranch[ii], buf[224:416][ii*32:(ii+1)*32]...) + } + + // Field (3) 'SyncAggregate' + if l.SyncAggregate == nil { + l.SyncAggregate = new(SyncAggregate) + } + if err = l.SyncAggregate.UnmarshalSSZ(buf[416:576]); err != nil { + return err + } + + // Field (4) 'SignatureSlot' + l.SignatureSlot = ssz.UnmarshallUint64(buf[576:584]) + + return err +} + +// SizeSSZ returns the ssz encoded size in bytes for the LightClientFinalityUpdate object +func (l *LightClientFinalityUpdate) SizeSSZ() (size int) { + size = 584 + return +} + +// HashTreeRoot ssz hashes the LightClientFinalityUpdate object +func (l *LightClientFinalityUpdate) HashTreeRoot() ([32]byte, error) { + return ssz.HashWithDefaultHasher(l) +} + +// HashTreeRootWith ssz hashes the LightClientFinalityUpdate object with a hasher +func (l *LightClientFinalityUpdate) HashTreeRootWith(hh ssz.HashWalker) (err error) { + indx := hh.Index() + + // Field (0) 'AttestedHeader' + if l.AttestedHeader == nil { + l.AttestedHeader = new(BeaconBlockHeader) + } + if err = l.AttestedHeader.HashTreeRootWith(hh); err != nil { + return + } + + // Field (1) 'FinalizedHeader' + if l.FinalizedHeader == nil { + l.FinalizedHeader = new(BeaconBlockHeader) + } + if err = l.FinalizedHeader.HashTreeRootWith(hh); err != nil { + return + } + + // Field (2) 'FinalityBranch' + { + if size := len(l.FinalityBranch); size != 6 { + err = ssz.ErrVectorLengthFn("LightClientFinalityUpdate.FinalityBranch", size, 6) + return + } + subIndx := hh.Index() + for _, i := range l.FinalityBranch { + if len(i) != 32 { + err = ssz.ErrBytesLength + return + } + hh.Append(i) + } + hh.Merkleize(subIndx) + } + + // Field (3) 'SyncAggregate' + if l.SyncAggregate == nil { + l.SyncAggregate = new(SyncAggregate) + } + if err = l.SyncAggregate.HashTreeRootWith(hh); err != nil { + return + } + + // Field (4) 'SignatureSlot' + hh.PutUint64(l.SignatureSlot) + + hh.Merkleize(indx) + return +} + +// GetTree ssz hashes the LightClientFinalityUpdate object +func (l *LightClientFinalityUpdate) GetTree() (*ssz.Node, error) { + return ssz.ProofTree(l) +} + +// MarshalSSZ ssz marshals the LightClientOptimisticUpdate object +func (l *LightClientOptimisticUpdate) MarshalSSZ() ([]byte, error) { + return ssz.MarshalSSZ(l) +} + +// MarshalSSZTo ssz marshals the LightClientOptimisticUpdate object to a target array +func (l *LightClientOptimisticUpdate) MarshalSSZTo(buf []byte) (dst []byte, err error) { + dst = buf + + // Field (0) 'AttestedHeader' + if l.AttestedHeader == nil { + l.AttestedHeader = new(BeaconBlockHeader) + } + if dst, err = l.AttestedHeader.MarshalSSZTo(dst); err != nil { + return + } + + // Field (1) 'SyncAggregate' + if l.SyncAggregate == nil { + l.SyncAggregate = new(SyncAggregate) + } + if dst, err = l.SyncAggregate.MarshalSSZTo(dst); err != nil { + return + } + + // Field (2) 'SignatureSlot' + dst = ssz.MarshalUint64(dst, l.SignatureSlot) + + return +} + +// UnmarshalSSZ ssz unmarshals the LightClientOptimisticUpdate object +func (l *LightClientOptimisticUpdate) UnmarshalSSZ(buf []byte) error { + var err error + size := uint64(len(buf)) + if size != 280 { + return ssz.ErrSize + } + + // Field (0) 'AttestedHeader' + if l.AttestedHeader == nil { + l.AttestedHeader = new(BeaconBlockHeader) + } + if err = l.AttestedHeader.UnmarshalSSZ(buf[0:112]); err != nil { + return err + } + + // Field (1) 'SyncAggregate' + if l.SyncAggregate == nil { + l.SyncAggregate = new(SyncAggregate) + } + if err = l.SyncAggregate.UnmarshalSSZ(buf[112:272]); err != nil { + return err + } + + // Field (2) 'SignatureSlot' + l.SignatureSlot = ssz.UnmarshallUint64(buf[272:280]) + + return err +} + +// SizeSSZ returns the ssz encoded size in bytes for the LightClientOptimisticUpdate object +func (l *LightClientOptimisticUpdate) SizeSSZ() (size int) { + size = 280 + return +} + +// HashTreeRoot ssz hashes the LightClientOptimisticUpdate object +func (l *LightClientOptimisticUpdate) HashTreeRoot() ([32]byte, error) { + return ssz.HashWithDefaultHasher(l) +} + +// HashTreeRootWith ssz hashes the LightClientOptimisticUpdate object with a hasher +func (l *LightClientOptimisticUpdate) HashTreeRootWith(hh ssz.HashWalker) (err error) { + indx := hh.Index() + + // Field (0) 'AttestedHeader' + if l.AttestedHeader == nil { + l.AttestedHeader = new(BeaconBlockHeader) + } + if err = l.AttestedHeader.HashTreeRootWith(hh); err != nil { + return + } + + // Field (1) 'SyncAggregate' + if l.SyncAggregate == nil { + l.SyncAggregate = new(SyncAggregate) + } + if err = l.SyncAggregate.HashTreeRootWith(hh); err != nil { + return + } + + // Field (2) 'SignatureSlot' + hh.PutUint64(l.SignatureSlot) + + hh.Merkleize(indx) + return +} + +// GetTree ssz hashes the LightClientOptimisticUpdate object +func (l *LightClientOptimisticUpdate) GetTree() (*ssz.Node, error) { + return ssz.ProofTree(l) +} diff --git a/cmd/lightclient/rpc/lightrpc/clone.go b/cmd/lightclient/rpc/lightrpc/clone.go new file mode 100644 index 00000000000..fdeb2ef5037 --- /dev/null +++ b/cmd/lightclient/rpc/lightrpc/clone.go @@ -0,0 +1,23 @@ +package lightrpc + +import "github.com/ledgerwatch/erigon/cmd/lightclient/sentinel/communication" + +func (*SignedBeaconBlockBellatrix) Clone() communication.Packet { + return &SignedBeaconBlockBellatrix{} +} + +func (*LightClientFinalityUpdate) Clone() communication.Packet { + return &LightClientFinalityUpdate{} +} + +func (*LightClientOptimisticUpdate) Clone() communication.Packet { + return &LightClientOptimisticUpdate{} +} + +func (*MetadataV1) Clone() communication.Packet { + return &MetadataV1{} +} + +func (*MetadataV2) Clone() communication.Packet { + return &MetadataV2{} +} diff --git a/cmd/lightclient/rpc/lightrpc/metadata.pb.go b/cmd/lightclient/rpc/lightrpc/metadata.pb.go new file mode 100644 index 00000000000..23055e84535 --- /dev/null +++ b/cmd/lightclient/rpc/lightrpc/metadata.pb.go @@ -0,0 +1,233 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.25.0-devel +// protoc v3.14.0 +// source: metadata.proto + +package lightrpc + +import ( + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +type MetadataV1 struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + SeqNumber uint64 `protobuf:"varint,1,opt,name=SeqNumber,json=seq_number,proto3" json:"SeqNumber,omitempty"` + Attnets uint64 `protobuf:"varint,2,opt,name=Attnets,json=attnets,proto3" json:"Attnets,omitempty"` +} + +func (x *MetadataV1) Reset() { + *x = MetadataV1{} + if protoimpl.UnsafeEnabled { + mi := &file_metadata_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *MetadataV1) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*MetadataV1) ProtoMessage() {} + +func (x *MetadataV1) ProtoReflect() protoreflect.Message { + mi := &file_metadata_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use MetadataV1.ProtoReflect.Descriptor instead. +func (*MetadataV1) Descriptor() ([]byte, []int) { + return file_metadata_proto_rawDescGZIP(), []int{0} +} + +func (x *MetadataV1) GetSeqNumber() uint64 { + if x != nil { + return x.SeqNumber + } + return 0 +} + +func (x *MetadataV1) GetAttnets() uint64 { + if x != nil { + return x.Attnets + } + return 0 +} + +type MetadataV2 struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + SeqNumber uint64 `protobuf:"varint,1,opt,name=SeqNumber,json=seq_number,proto3" json:"SeqNumber,omitempty"` + Attnets uint64 `protobuf:"varint,2,opt,name=Attnets,json=attnets,proto3" json:"Attnets,omitempty"` + Syncnets uint64 `protobuf:"varint,3,opt,name=Syncnets,json=syncnets,proto3" json:"Syncnets,omitempty"` +} + +func (x *MetadataV2) Reset() { + *x = MetadataV2{} + if protoimpl.UnsafeEnabled { + mi := &file_metadata_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *MetadataV2) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*MetadataV2) ProtoMessage() {} + +func (x *MetadataV2) ProtoReflect() protoreflect.Message { + mi := &file_metadata_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use MetadataV2.ProtoReflect.Descriptor instead. +func (*MetadataV2) Descriptor() ([]byte, []int) { + return file_metadata_proto_rawDescGZIP(), []int{1} +} + +func (x *MetadataV2) GetSeqNumber() uint64 { + if x != nil { + return x.SeqNumber + } + return 0 +} + +func (x *MetadataV2) GetAttnets() uint64 { + if x != nil { + return x.Attnets + } + return 0 +} + +func (x *MetadataV2) GetSyncnets() uint64 { + if x != nil { + return x.Syncnets + } + return 0 +} + +var File_metadata_proto protoreflect.FileDescriptor + +var file_metadata_proto_rawDesc = []byte{ + 0x0a, 0x0e, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x12, 0x08, 0x6c, 0x69, 0x67, 0x68, 0x74, 0x72, 0x70, 0x63, 0x22, 0x45, 0x0a, 0x0a, 0x4d, 0x65, + 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x56, 0x31, 0x12, 0x1d, 0x0a, 0x09, 0x53, 0x65, 0x71, 0x4e, + 0x75, 0x6d, 0x62, 0x65, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x04, 0x52, 0x0a, 0x73, 0x65, 0x71, + 0x5f, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x12, 0x18, 0x0a, 0x07, 0x41, 0x74, 0x74, 0x6e, 0x65, + 0x74, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x04, 0x52, 0x07, 0x61, 0x74, 0x74, 0x6e, 0x65, 0x74, + 0x73, 0x22, 0x61, 0x0a, 0x0a, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x56, 0x32, 0x12, + 0x1d, 0x0a, 0x09, 0x53, 0x65, 0x71, 0x4e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x04, 0x52, 0x0a, 0x73, 0x65, 0x71, 0x5f, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x12, 0x18, + 0x0a, 0x07, 0x41, 0x74, 0x74, 0x6e, 0x65, 0x74, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x04, 0x52, + 0x07, 0x61, 0x74, 0x74, 0x6e, 0x65, 0x74, 0x73, 0x12, 0x1a, 0x0a, 0x08, 0x53, 0x79, 0x6e, 0x63, + 0x6e, 0x65, 0x74, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x04, 0x52, 0x08, 0x73, 0x79, 0x6e, 0x63, + 0x6e, 0x65, 0x74, 0x73, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_metadata_proto_rawDescOnce sync.Once + file_metadata_proto_rawDescData = file_metadata_proto_rawDesc +) + +func file_metadata_proto_rawDescGZIP() []byte { + file_metadata_proto_rawDescOnce.Do(func() { + file_metadata_proto_rawDescData = protoimpl.X.CompressGZIP(file_metadata_proto_rawDescData) + }) + return file_metadata_proto_rawDescData +} + +var file_metadata_proto_msgTypes = make([]protoimpl.MessageInfo, 2) +var file_metadata_proto_goTypes = []interface{}{ + (*MetadataV1)(nil), // 0: lightrpc.MetadataV1 + (*MetadataV2)(nil), // 1: lightrpc.MetadataV2 +} +var file_metadata_proto_depIdxs = []int32{ + 0, // [0:0] is the sub-list for method output_type + 0, // [0:0] is the sub-list for method input_type + 0, // [0:0] is the sub-list for extension type_name + 0, // [0:0] is the sub-list for extension extendee + 0, // [0:0] is the sub-list for field type_name +} + +func init() { file_metadata_proto_init() } +func file_metadata_proto_init() { + if File_metadata_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_metadata_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*MetadataV1); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_metadata_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*MetadataV2); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_metadata_proto_rawDesc, + NumEnums: 0, + NumMessages: 2, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_metadata_proto_goTypes, + DependencyIndexes: file_metadata_proto_depIdxs, + MessageInfos: file_metadata_proto_msgTypes, + }.Build() + File_metadata_proto = out.File + file_metadata_proto_rawDesc = nil + file_metadata_proto_goTypes = nil + file_metadata_proto_depIdxs = nil +} diff --git a/cmd/lightclient/rpc/lightrpc/metadata.pb_encoding.go b/cmd/lightclient/rpc/lightrpc/metadata.pb_encoding.go new file mode 100644 index 00000000000..88a185b2de4 --- /dev/null +++ b/cmd/lightclient/rpc/lightrpc/metadata.pb_encoding.go @@ -0,0 +1,147 @@ +// Code generated by fastssz. DO NOT EDIT. +// Hash: 78616528ff009030b8c5c8324875c6bcfe04bd54faa9ee90c8e216ab17271c73 +// Version: 0.1.2 +package lightrpc + +import ( + ssz "github.com/ferranbt/fastssz" +) + +// MarshalSSZ ssz marshals the MetadataV1 object +func (m *MetadataV1) MarshalSSZ() ([]byte, error) { + return ssz.MarshalSSZ(m) +} + +// MarshalSSZTo ssz marshals the MetadataV1 object to a target array +func (m *MetadataV1) MarshalSSZTo(buf []byte) (dst []byte, err error) { + dst = buf + + // Field (0) 'SeqNumber' + dst = ssz.MarshalUint64(dst, m.SeqNumber) + + // Field (1) 'Attnets' + dst = ssz.MarshalUint64(dst, m.Attnets) + + return +} + +// UnmarshalSSZ ssz unmarshals the MetadataV1 object +func (m *MetadataV1) UnmarshalSSZ(buf []byte) error { + var err error + size := uint64(len(buf)) + if size != 16 { + return ssz.ErrSize + } + + // Field (0) 'SeqNumber' + m.SeqNumber = ssz.UnmarshallUint64(buf[0:8]) + + // Field (1) 'Attnets' + m.Attnets = ssz.UnmarshallUint64(buf[8:16]) + + return err +} + +// SizeSSZ returns the ssz encoded size in bytes for the MetadataV1 object +func (m *MetadataV1) SizeSSZ() (size int) { + size = 16 + return +} + +// HashTreeRoot ssz hashes the MetadataV1 object +func (m *MetadataV1) HashTreeRoot() ([32]byte, error) { + return ssz.HashWithDefaultHasher(m) +} + +// HashTreeRootWith ssz hashes the MetadataV1 object with a hasher +func (m *MetadataV1) HashTreeRootWith(hh ssz.HashWalker) (err error) { + indx := hh.Index() + + // Field (0) 'SeqNumber' + hh.PutUint64(m.SeqNumber) + + // Field (1) 'Attnets' + hh.PutUint64(m.Attnets) + + hh.Merkleize(indx) + return +} + +// GetTree ssz hashes the MetadataV1 object +func (m *MetadataV1) GetTree() (*ssz.Node, error) { + return ssz.ProofTree(m) +} + +// MarshalSSZ ssz marshals the MetadataV2 object +func (m *MetadataV2) MarshalSSZ() ([]byte, error) { + return ssz.MarshalSSZ(m) +} + +// MarshalSSZTo ssz marshals the MetadataV2 object to a target array +func (m *MetadataV2) MarshalSSZTo(buf []byte) (dst []byte, err error) { + dst = buf + + // Field (0) 'SeqNumber' + dst = ssz.MarshalUint64(dst, m.SeqNumber) + + // Field (1) 'Attnets' + dst = ssz.MarshalUint64(dst, m.Attnets) + + // Field (2) 'Syncnets' + dst = ssz.MarshalUint64(dst, m.Syncnets) + + return +} + +// UnmarshalSSZ ssz unmarshals the MetadataV2 object +func (m *MetadataV2) UnmarshalSSZ(buf []byte) error { + var err error + size := uint64(len(buf)) + if size != 24 { + return ssz.ErrSize + } + + // Field (0) 'SeqNumber' + m.SeqNumber = ssz.UnmarshallUint64(buf[0:8]) + + // Field (1) 'Attnets' + m.Attnets = ssz.UnmarshallUint64(buf[8:16]) + + // Field (2) 'Syncnets' + m.Syncnets = ssz.UnmarshallUint64(buf[16:24]) + + return err +} + +// SizeSSZ returns the ssz encoded size in bytes for the MetadataV2 object +func (m *MetadataV2) SizeSSZ() (size int) { + size = 24 + return +} + +// HashTreeRoot ssz hashes the MetadataV2 object +func (m *MetadataV2) HashTreeRoot() ([32]byte, error) { + return ssz.HashWithDefaultHasher(m) +} + +// HashTreeRootWith ssz hashes the MetadataV2 object with a hasher +func (m *MetadataV2) HashTreeRootWith(hh ssz.HashWalker) (err error) { + indx := hh.Index() + + // Field (0) 'SeqNumber' + hh.PutUint64(m.SeqNumber) + + // Field (1) 'Attnets' + hh.PutUint64(m.Attnets) + + // Field (2) 'Syncnets' + hh.PutUint64(m.Syncnets) + + hh.Merkleize(indx) + return +} + +// GetTree ssz hashes the MetadataV2 object +func (m *MetadataV2) GetTree() (*ssz.Node, error) { + return ssz.ProofTree(m) +} diff --git a/cmd/lightclient/rpc/lightrpc/sentinel.pb.go b/cmd/lightclient/rpc/lightrpc/sentinel.pb.go new file mode 100644 index 00000000000..b2448863e45 --- /dev/null +++ b/cmd/lightclient/rpc/lightrpc/sentinel.pb.go @@ -0,0 +1,275 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.25.0-devel +// protoc v3.14.0 +// source: sentinel.proto + +package lightrpc + +import ( + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +type GossipType int32 + +const ( + // Lightclient gossip + GossipType_LightClientFinalityUpdateGossipType GossipType = 0 + GossipType_LightClientOptimisticUpdateGossipType GossipType = 1 + // Legacy gossip + GossipType_BeaconBlockGossipType GossipType = 2 +) + +// Enum value maps for GossipType. +var ( + GossipType_name = map[int32]string{ + 0: "LightClientFinalityUpdateGossipType", + 1: "LightClientOptimisticUpdateGossipType", + 2: "BeaconBlockGossipType", + } + GossipType_value = map[string]int32{ + "LightClientFinalityUpdateGossipType": 0, + "LightClientOptimisticUpdateGossipType": 1, + "BeaconBlockGossipType": 2, + } +) + +func (x GossipType) Enum() *GossipType { + p := new(GossipType) + *p = x + return p +} + +func (x GossipType) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (GossipType) Descriptor() protoreflect.EnumDescriptor { + return file_sentinel_proto_enumTypes[0].Descriptor() +} + +func (GossipType) Type() protoreflect.EnumType { + return &file_sentinel_proto_enumTypes[0] +} + +func (x GossipType) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use GossipType.Descriptor instead. +func (GossipType) EnumDescriptor() ([]byte, []int) { + return file_sentinel_proto_rawDescGZIP(), []int{0} +} + +type GossipRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields +} + +func (x *GossipRequest) Reset() { + *x = GossipRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_sentinel_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *GossipRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GossipRequest) ProtoMessage() {} + +func (x *GossipRequest) ProtoReflect() protoreflect.Message { + mi := &file_sentinel_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use GossipRequest.ProtoReflect.Descriptor instead. +func (*GossipRequest) Descriptor() ([]byte, []int) { + return file_sentinel_proto_rawDescGZIP(), []int{0} +} + +type GossipData struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Data []byte `protobuf:"bytes,1,opt,name=data,proto3" json:"data,omitempty"` // SSZ encoded data + Type GossipType `protobuf:"varint,2,opt,name=type,proto3,enum=lightrpc.GossipType" json:"type,omitempty"` +} + +func (x *GossipData) Reset() { + *x = GossipData{} + if protoimpl.UnsafeEnabled { + mi := &file_sentinel_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *GossipData) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GossipData) ProtoMessage() {} + +func (x *GossipData) ProtoReflect() protoreflect.Message { + mi := &file_sentinel_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use GossipData.ProtoReflect.Descriptor instead. +func (*GossipData) Descriptor() ([]byte, []int) { + return file_sentinel_proto_rawDescGZIP(), []int{1} +} + +func (x *GossipData) GetData() []byte { + if x != nil { + return x.Data + } + return nil +} + +func (x *GossipData) GetType() GossipType { + if x != nil { + return x.Type + } + return GossipType_LightClientFinalityUpdateGossipType +} + +var File_sentinel_proto protoreflect.FileDescriptor + +var file_sentinel_proto_rawDesc = []byte{ + 0x0a, 0x0e, 0x73, 0x65, 0x6e, 0x74, 0x69, 0x6e, 0x65, 0x6c, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x12, 0x08, 0x6c, 0x69, 0x67, 0x68, 0x74, 0x72, 0x70, 0x63, 0x1a, 0x12, 0x62, 0x65, 0x61, 0x63, + 0x6f, 0x6e, 0x5f, 0x62, 0x6c, 0x6f, 0x63, 0x6b, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x0f, + 0x0a, 0x0d, 0x47, 0x6f, 0x73, 0x73, 0x69, 0x70, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x22, + 0x4a, 0x0a, 0x0a, 0x47, 0x6f, 0x73, 0x73, 0x69, 0x70, 0x44, 0x61, 0x74, 0x61, 0x12, 0x12, 0x0a, + 0x04, 0x64, 0x61, 0x74, 0x61, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x04, 0x64, 0x61, 0x74, + 0x61, 0x12, 0x28, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0e, 0x32, + 0x14, 0x2e, 0x6c, 0x69, 0x67, 0x68, 0x74, 0x72, 0x70, 0x63, 0x2e, 0x47, 0x6f, 0x73, 0x73, 0x69, + 0x70, 0x54, 0x79, 0x70, 0x65, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x2a, 0x7b, 0x0a, 0x0a, 0x47, + 0x6f, 0x73, 0x73, 0x69, 0x70, 0x54, 0x79, 0x70, 0x65, 0x12, 0x27, 0x0a, 0x23, 0x4c, 0x69, 0x67, + 0x68, 0x74, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x46, 0x69, 0x6e, 0x61, 0x6c, 0x69, 0x74, 0x79, + 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x47, 0x6f, 0x73, 0x73, 0x69, 0x70, 0x54, 0x79, 0x70, 0x65, + 0x10, 0x00, 0x12, 0x29, 0x0a, 0x25, 0x4c, 0x69, 0x67, 0x68, 0x74, 0x43, 0x6c, 0x69, 0x65, 0x6e, + 0x74, 0x4f, 0x70, 0x74, 0x69, 0x6d, 0x69, 0x73, 0x74, 0x69, 0x63, 0x55, 0x70, 0x64, 0x61, 0x74, + 0x65, 0x47, 0x6f, 0x73, 0x73, 0x69, 0x70, 0x54, 0x79, 0x70, 0x65, 0x10, 0x01, 0x12, 0x19, 0x0a, + 0x15, 0x42, 0x65, 0x61, 0x63, 0x6f, 0x6e, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x47, 0x6f, 0x73, 0x73, + 0x69, 0x70, 0x54, 0x79, 0x70, 0x65, 0x10, 0x02, 0x32, 0x4e, 0x0a, 0x08, 0x53, 0x65, 0x6e, 0x74, + 0x69, 0x6e, 0x65, 0x6c, 0x12, 0x42, 0x0a, 0x0f, 0x53, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x62, + 0x65, 0x47, 0x6f, 0x73, 0x73, 0x69, 0x70, 0x12, 0x17, 0x2e, 0x6c, 0x69, 0x67, 0x68, 0x74, 0x72, + 0x70, 0x63, 0x2e, 0x47, 0x6f, 0x73, 0x73, 0x69, 0x70, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, + 0x1a, 0x14, 0x2e, 0x6c, 0x69, 0x67, 0x68, 0x74, 0x72, 0x70, 0x63, 0x2e, 0x47, 0x6f, 0x73, 0x73, + 0x69, 0x70, 0x44, 0x61, 0x74, 0x61, 0x30, 0x01, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_sentinel_proto_rawDescOnce sync.Once + file_sentinel_proto_rawDescData = file_sentinel_proto_rawDesc +) + +func file_sentinel_proto_rawDescGZIP() []byte { + file_sentinel_proto_rawDescOnce.Do(func() { + file_sentinel_proto_rawDescData = protoimpl.X.CompressGZIP(file_sentinel_proto_rawDescData) + }) + return file_sentinel_proto_rawDescData +} + +var file_sentinel_proto_enumTypes = make([]protoimpl.EnumInfo, 1) +var file_sentinel_proto_msgTypes = make([]protoimpl.MessageInfo, 2) +var file_sentinel_proto_goTypes = []interface{}{ + (GossipType)(0), // 0: lightrpc.GossipType + (*GossipRequest)(nil), // 1: lightrpc.GossipRequest + (*GossipData)(nil), // 2: lightrpc.GossipData +} +var file_sentinel_proto_depIdxs = []int32{ + 0, // 0: lightrpc.GossipData.type:type_name -> lightrpc.GossipType + 1, // 1: lightrpc.Sentinel.SubscribeGossip:input_type -> lightrpc.GossipRequest + 2, // 2: lightrpc.Sentinel.SubscribeGossip:output_type -> lightrpc.GossipData + 2, // [2:3] is the sub-list for method output_type + 1, // [1:2] is the sub-list for method input_type + 1, // [1:1] is the sub-list for extension type_name + 1, // [1:1] is the sub-list for extension extendee + 0, // [0:1] is the sub-list for field type_name +} + +func init() { file_sentinel_proto_init() } +func file_sentinel_proto_init() { + if File_sentinel_proto != nil { + return + } + file_beacon_block_proto_init() + if !protoimpl.UnsafeEnabled { + file_sentinel_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*GossipRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_sentinel_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*GossipData); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_sentinel_proto_rawDesc, + NumEnums: 1, + NumMessages: 2, + NumExtensions: 0, + NumServices: 1, + }, + GoTypes: file_sentinel_proto_goTypes, + DependencyIndexes: file_sentinel_proto_depIdxs, + EnumInfos: file_sentinel_proto_enumTypes, + MessageInfos: file_sentinel_proto_msgTypes, + }.Build() + File_sentinel_proto = out.File + file_sentinel_proto_rawDesc = nil + file_sentinel_proto_goTypes = nil + file_sentinel_proto_depIdxs = nil +} diff --git a/cmd/lightclient/rpc/lightrpc/sentinel_grpc.pb.go b/cmd/lightclient/rpc/lightrpc/sentinel_grpc.pb.go new file mode 100644 index 00000000000..d4ed74db87e --- /dev/null +++ b/cmd/lightclient/rpc/lightrpc/sentinel_grpc.pb.go @@ -0,0 +1,128 @@ +// Code generated by protoc-gen-go-grpc. DO NOT EDIT. + +package lightrpc + +import ( + context "context" + grpc "google.golang.org/grpc" + codes "google.golang.org/grpc/codes" + status "google.golang.org/grpc/status" +) + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +// Requires gRPC-Go v1.32.0 or later. +const _ = grpc.SupportPackageIsVersion7 + +// SentinelClient is the client API for Sentinel service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream. +type SentinelClient interface { + SubscribeGossip(ctx context.Context, in *GossipRequest, opts ...grpc.CallOption) (Sentinel_SubscribeGossipClient, error) +} + +type sentinelClient struct { + cc grpc.ClientConnInterface +} + +func NewSentinelClient(cc grpc.ClientConnInterface) SentinelClient { + return &sentinelClient{cc} +} + +func (c *sentinelClient) SubscribeGossip(ctx context.Context, in *GossipRequest, opts ...grpc.CallOption) (Sentinel_SubscribeGossipClient, error) { + stream, err := c.cc.NewStream(ctx, &Sentinel_ServiceDesc.Streams[0], "/lightrpc.Sentinel/SubscribeGossip", opts...) + if err != nil { + return nil, err + } + x := &sentinelSubscribeGossipClient{stream} + if err := x.ClientStream.SendMsg(in); err != nil { + return nil, err + } + if err := x.ClientStream.CloseSend(); err != nil { + return nil, err + } + return x, nil +} + +type Sentinel_SubscribeGossipClient interface { + Recv() (*GossipData, error) + grpc.ClientStream +} + +type sentinelSubscribeGossipClient struct { + grpc.ClientStream +} + +func (x *sentinelSubscribeGossipClient) Recv() (*GossipData, error) { + m := new(GossipData) + if err := x.ClientStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +// SentinelServer is the server API for Sentinel service. +// All implementations must embed UnimplementedSentinelServer +// for forward compatibility +type SentinelServer interface { + SubscribeGossip(*GossipRequest, Sentinel_SubscribeGossipServer) error + mustEmbedUnimplementedSentinelServer() +} + +// UnimplementedSentinelServer must be embedded to have forward compatible implementations. +type UnimplementedSentinelServer struct { +} + +func (UnimplementedSentinelServer) SubscribeGossip(*GossipRequest, Sentinel_SubscribeGossipServer) error { + return status.Errorf(codes.Unimplemented, "method SubscribeGossip not implemented") +} +func (UnimplementedSentinelServer) mustEmbedUnimplementedSentinelServer() {} + +// UnsafeSentinelServer may be embedded to opt out of forward compatibility for this service. +// Use of this interface is not recommended, as added methods to SentinelServer will +// result in compilation errors. +type UnsafeSentinelServer interface { + mustEmbedUnimplementedSentinelServer() +} + +func RegisterSentinelServer(s grpc.ServiceRegistrar, srv SentinelServer) { + s.RegisterService(&Sentinel_ServiceDesc, srv) +} + +func _Sentinel_SubscribeGossip_Handler(srv interface{}, stream grpc.ServerStream) error { + m := new(GossipRequest) + if err := stream.RecvMsg(m); err != nil { + return err + } + return srv.(SentinelServer).SubscribeGossip(m, &sentinelSubscribeGossipServer{stream}) +} + +type Sentinel_SubscribeGossipServer interface { + Send(*GossipData) error + grpc.ServerStream +} + +type sentinelSubscribeGossipServer struct { + grpc.ServerStream +} + +func (x *sentinelSubscribeGossipServer) Send(m *GossipData) error { + return x.ServerStream.SendMsg(m) +} + +// Sentinel_ServiceDesc is the grpc.ServiceDesc for Sentinel service. +// It's only intended for direct use with grpc.RegisterService, +// and not to be introspected or modified (even as a copy) +var Sentinel_ServiceDesc = grpc.ServiceDesc{ + ServiceName: "lightrpc.Sentinel", + HandlerType: (*SentinelServer)(nil), + Methods: []grpc.MethodDesc{}, + Streams: []grpc.StreamDesc{ + { + StreamName: "SubscribeGossip", + Handler: _Sentinel_SubscribeGossip_Handler, + ServerStreams: true, + }, + }, + Metadata: "sentinel.proto", +} diff --git a/cmd/lightclient/rpc/proto/beacon_block.proto b/cmd/lightclient/rpc/proto/beacon_block.proto new file mode 100644 index 00000000000..818bd818134 --- /dev/null +++ b/cmd/lightclient/rpc/proto/beacon_block.proto @@ -0,0 +1,146 @@ +syntax = "proto3"; + +package lightrpc; + +message Eth1Data { + bytes Root = 1 [json_name = "deposit_root"]; // @gotags: sszsize:"32" + uint64 DepositCount = 2 [json_name = "deposit_count"]; + bytes BlockHash = 3 [json_name = "block_hash"]; // @gotags: sszsize:"32" +} + +message BeaconBlockHeader { + uint64 Slot = 1 [json_name = "slot"]; + uint64 ProposerIndex = 2 [json_name = "proposer_index"]; + bytes ParentRoot = 3 [json_name = "parent_root"]; // @gotags: sszsize:"32" + bytes Root = 4 [json_name = "root"]; // @gotags: sszsize:"32" + bytes BodyRoot = 5 [json_name = "body_root"]; // @gotags: sszsize:"32" +} + +message SignedBeaconBlockHeader { + BeaconBlockHeader Header = 1 [json_name = "message"]; + bytes Signature = 2 [json_name = "signature"]; // @gotags: sszsize:"96" +} + +message Slashing { + SignedBeaconBlockHeader Header1 = 1 [json_name = "signed_header_1"]; + SignedBeaconBlockHeader Header2 = 2 [json_name = "signed_header_2"]; +} + +// TODO(Giulio2002): Finish. +message AttestationData { + uint64 Slot = 1 [json_name = "slot"]; + uint64 Index = 2 [json_name = "index"]; + bytes BeaconBlockHash = 3 [json_name = "beacon_block_hash"]; // @gotags: sszsize:"32" +} + +message Attestation { + bytes AggregationBits = 1 [json_name = "aggregation_bits"]; // @gotags: sszmax:"2048" ssz:"bitlist" + AttestationData Data = 2 [json_name = "data"]; + bytes Signature = 3 [json_name = "signature"]; // @gotags: sszsize:"96" +} + +message DepositData { + bytes PubKey = 1 [json_name = "pubkey"]; // @gotags: sszsize:"48" + bytes WithdrawalCredentials = 2 [json_name = "withdrawal_credentials"]; // @gotags: sszsize:"32" + uint64 Amount = 3 [json_name = "amount"]; + bytes Signature = 4 [json_name = "signature"]; // @gotags: sszsize:"96" + bytes Root = 5; // @gotags: ssz:"-" +} + +message Deposit { + repeated bytes Proof = 1 [json_name = "proof"]; // @gotags: sszsize:"33,32" + DepositData Data = 2 [json_name = "data"]; +} + +message SignedVoluntaryExit { + VoluntaryExit VolunaryExit = 1 [json_name = "message"]; + bytes Signature = 2 [json_name = "signature"]; // @gotags: sszsize:"96" +} + +message VoluntaryExit { + uint64 Epoch = 1 [json_name = "epoch"]; + uint64 ValidatorIndex = 2 [json_name = "validator_index"]; +} + +message SyncAggregate { + bytes SyncCommiteeBits = 1 [json_name = "sync_committee_bits"]; // @gotags: sszsize:"64" + bytes SyncCommiteeSignature = 2 [json_name = "sync_committee_signature"]; // @gotags: sszsize:"96" +} + +// Lightclient will sent this to Erigon once validation is done. +message ExecutionPayload { + bytes ParentHash = 1 [json_name = "parent_hash"]; // @gotags: sszsize:"32" + bytes FeeRecipient = 2 [json_name = "fee_recipient"]; // @gotags: sszsize:"20" + bytes StateRoot = 3 [json_name = "state_root"]; // @gotags: sszsize:"32" + bytes ReceiptsRoot = 4 [json_name = "receipts_root"]; // @gotags: sszsize:"32" + bytes LogsBloom = 5 [json_name = "logs_bloom"]; // @gotags: sszsize:"256" + bytes PrevRandao = 6 [json_name = "prev_randao"]; // @gotags: sszsize:"32" + uint64 BlockNumber = 7 [json_name = "block_number"]; + uint64 GasLimit = 8 [json_name = "gas_limit"]; + uint64 GasUsed = 9 [json_name = "gas_used"]; + uint64 Timestamp = 10 [json_name = "timestamp"]; + bytes ExtraData = 11 [json_name = "extra_data"]; // @gotags: sszmax:"32" + bytes BaseFeePerGas = 12 [json_name = "base_fee_per_gas"]; // @gotags: sszsize:"32" + bytes BlockHash = 13 [json_name = "block_hash"]; // @gotags: sszsize:"32" + repeated bytes Transactions = 14 [json_name = "transactions"]; // @gotags: sszsize:"?,?" sszmax:"1048576,1073741824" +} + +message BeaconBodyBellatrix { + bytes RandaoReveal = 1 [json_name = "randao_reveal"]; // @gotags: sszsize:"96" + Eth1Data Eth1Data = 2 [json_name = "eth1_data"]; + bytes Graffiti = 3 [json_name = "graffiti"]; // @gotags: sszsize:"32" + repeated Slashing ProposerSlashings = 4 [json_name = "proposer_slashings"]; // @gotags: sszmax:"16" + repeated Slashing AttesterSlashings = 5 [json_name = "attester_slashings"]; // @gotags: sszmax:"2" + repeated Attestation Attestations = 6 [json_name = "attestations"]; // @gotags: sszmax:"128" + repeated Deposit Deposits = 7 [json_name = "deposits"]; // @gotags: sszmax:"16" + repeated SignedVoluntaryExit VoluntaryExits = 8 [json_name = "voluntary_exits"]; // @gotags: sszmax:"16" + SyncAggregate SyncAggregate = 9 [json_name = "sync_aggregate"]; + ExecutionPayload ExecutionPayload = 10 [json_name = "execution_payload"]; +} + +message BeaconBlockBellatrix { + uint64 Slot = 1 [json_name = "slot"]; + uint64 ProposerIndex = 2 [json_name = "proposer_index"]; + bytes ParentRoot = 3 [json_name = "parent_root"]; // @gotags: sszsize:"32" + bytes StateRoot = 4 [json_name = "root"]; // @gotags: sszsize:"32" + BeaconBodyBellatrix Body = 5 [json_name = "body"]; +} + +message SignedBeaconBlockBellatrix { + BeaconBlockBellatrix Block = 1 [json_name = "message"]; + bytes Signature = 2 [json_name = "signature"]; // @gotags: sszsize:"96" +} + +message LightClientBootstrap { + BeaconBlockHeader Header = 1 [json_name = "header"]; + SyncCommittee CurrentSyncCommittee = 2 [json_name = "current_sync_committee"]; + repeated bytes CurrentSyncCommitteeBranch = 3 [json_name = "current_sync_committee_branch"]; // @gotags: sszsize:"5,32" +} + +message SyncCommittee { + repeated bytes PubKeys = 1 [json_name = "current_sync_committee"]; // @gotags: sszsize:"512,48" +} + +message LightClientUpdate { + BeaconBlockHeader AttestedHeader = 1 [json_name = "attested_header"]; + SyncCommittee NextSyncCommitee = 2 [json_name = "next_sync_committee"]; + repeated bytes NextSyncCommitteeBranch = 3 [json_name = "next_sync_committee_branch"]; // @gotags: sszsize:"5,32" + BeaconBlockHeader FinalizedHeader = 4 [json_name = "finalized_header"]; + repeated bytes FinalityBranch = 5 [json_name = "finality_branch"]; // @gotags: sszsize:"6,32" + SyncAggregate SyncAggregate = 6 [json_name = "sync_aggregate"]; + uint64 SignatureSlot = 7 [json_name = "signature_slot"]; +} + +message LightClientFinalityUpdate { + BeaconBlockHeader AttestedHeader = 1 [json_name = "attested_header"]; + BeaconBlockHeader FinalizedHeader = 2 [json_name = "finalized_header"]; + repeated bytes FinalityBranch = 3 [json_name = "finality_branch"]; // @gotags: sszsize:"6,32" + SyncAggregate SyncAggregate = 4 [json_name = "sync_aggregate"]; + uint64 SignatureSlot = 5 [json_name = "signature_slot"]; +} + +message LightClientOptimisticUpdate { + BeaconBlockHeader AttestedHeader = 1 [json_name = "attested_header"]; + SyncAggregate SyncAggregate = 2 [json_name = "sync_aggregate"]; + uint64 SignatureSlot = 3 [json_name = "signature_slot"]; +} diff --git a/cmd/lightclient/rpc/proto/metadata.proto b/cmd/lightclient/rpc/proto/metadata.proto new file mode 100644 index 00000000000..9b00bf6a5ba --- /dev/null +++ b/cmd/lightclient/rpc/proto/metadata.proto @@ -0,0 +1,14 @@ +syntax = "proto3"; + +package lightrpc; + +message MetadataV1 { + uint64 SeqNumber = 1 [json_name="seq_number"]; + uint64 Attnets = 2 [json_name="attnets"]; +} + +message MetadataV2 { + uint64 SeqNumber = 1 [json_name="seq_number"]; + uint64 Attnets = 2 [json_name="attnets"]; + uint64 Syncnets = 3[json_name="syncnets"]; +} \ No newline at end of file diff --git a/cmd/lightclient/rpc/proto/sentinel.proto b/cmd/lightclient/rpc/proto/sentinel.proto new file mode 100644 index 00000000000..46c41b7e7b9 --- /dev/null +++ b/cmd/lightclient/rpc/proto/sentinel.proto @@ -0,0 +1,24 @@ +syntax = "proto3"; + +package lightrpc; + +import "beacon_block.proto"; + +message GossipRequest {} + +enum GossipType { + // Lightclient gossip + LightClientFinalityUpdateGossipType = 0; + LightClientOptimisticUpdateGossipType = 1; + // Legacy gossip + BeaconBlockGossipType = 2; +} + +message GossipData { + bytes data = 1; // SSZ encoded data + GossipType type = 2; +} + +service Sentinel { + rpc SubscribeGossip(GossipRequest) returns (stream GossipData); +} diff --git a/cmd/lightclient/sentinel/communication/p2p/generate.go b/cmd/lightclient/sentinel/communication/p2p/generate.go new file mode 100644 index 00000000000..4d0275686d1 --- /dev/null +++ b/cmd/lightclient/sentinel/communication/p2p/generate.go @@ -0,0 +1,3 @@ +package p2p + +//go:generate go run ./p2pgen -i spec_p2p.yaml -o . diff --git a/cmd/lightclient/sentinel/communication/p2p/generated.go b/cmd/lightclient/sentinel/communication/p2p/generated.go new file mode 100644 index 00000000000..8693747c7b3 --- /dev/null +++ b/cmd/lightclient/sentinel/communication/p2p/generated.go @@ -0,0 +1,87 @@ +package p2p + +//go:generate go run github.com/ferranbt/fastssz/sszgen -path generated.go -exclude-objs Bitvector4,Bitvector64,Bytea,Epoch,Root,Signature,Slot,Ignore + +import ( + "github.com/ledgerwatch/erigon/cmd/lightclient/sentinel/communication" +) + +type Bitvector4 uint8 + +type Bitvector64 uint64 + +type Bytea []byte + +type Epoch uint64 + +type Root [32]byte + +type Signature [96]byte + +type Slot uint64 + +type Checkpoint struct { + Epoch uint64 `json:"epoch" ` + + Root Root `json:"root" ssz-size:"32" ` +} + +func (typ *Checkpoint) Clone() communication.Packet { + return &Checkpoint{} +} + +type ENRForkID struct { + CurrentForkDigest Bytea `json:"current_fork_digest,omitempty" ssz-size:"4" ` + + NextForkVersion Bytea `json:"next_fork_version,omitempty" ssz-size:"4" ` + + NextForkEpoch Epoch `json:"next_fork_epoch,omitempty" ` +} + +func (typ *ENRForkID) Clone() communication.Packet { + return &ENRForkID{} +} + +type ForkData struct { + CurrentVersion [4]byte `json:"current_version" ssz-size:"4" ` + + GenesisValidatorsRoot Root `json:"genesis_validators_root" ssz-size:"32" ` +} + +func (typ *ForkData) Clone() communication.Packet { + return &ForkData{} +} + +type Ping struct { + Id uint64 `json:"id" ` +} + +func (typ *Ping) Clone() communication.Packet { + return &Ping{} +} + +type SingleRoot struct { + Root Root `json:"root" ssz-size:"32" ` + + BodyRoot Root `json:"body_root" ssz-size:"32" ` +} + +func (typ *SingleRoot) Clone() communication.Packet { + return &SingleRoot{} +} + +type Status struct { + ForkDigest Bytea `json:"fork_digest,omitempty" ssz-size:"4" ` + + FinalizedRoot Bytea `json:"finalized_root,omitempty" ssz-size:"32" ` + + FinalizedEpoch Epoch `json:"finalized_epoch,omitempty" ` + + HeadRoot Bytea `json:"head_root,omitempty" ssz-size:"32" ` + + HeadSlot Slot `json:"head_slot,omitempty" ` +} + +func (typ *Status) Clone() communication.Packet { + return &Status{} +} diff --git a/cmd/lightclient/sentinel/communication/p2p/generated_encoding.go b/cmd/lightclient/sentinel/communication/p2p/generated_encoding.go new file mode 100644 index 00000000000..ff2c0ee0d4d --- /dev/null +++ b/cmd/lightclient/sentinel/communication/p2p/generated_encoding.go @@ -0,0 +1,480 @@ +// Code generated by fastssz. DO NOT EDIT. +// Hash: 6c24bfc6e641d48afee20ff8a60ced263fd1394cb922659d4260b97310525fe8 +// Version: 0.1.2 +package p2p + +import ( + ssz "github.com/ferranbt/fastssz" +) + +// MarshalSSZ ssz marshals the Checkpoint object +func (c *Checkpoint) MarshalSSZ() ([]byte, error) { + return ssz.MarshalSSZ(c) +} + +// MarshalSSZTo ssz marshals the Checkpoint object to a target array +func (c *Checkpoint) MarshalSSZTo(buf []byte) (dst []byte, err error) { + dst = buf + + // Field (0) 'Epoch' + dst = ssz.MarshalUint64(dst, c.Epoch) + + // Field (1) 'Root' + dst = append(dst, c.Root[:]...) + + return +} + +// UnmarshalSSZ ssz unmarshals the Checkpoint object +func (c *Checkpoint) UnmarshalSSZ(buf []byte) error { + var err error + size := uint64(len(buf)) + if size != 40 { + return ssz.ErrSize + } + + // Field (0) 'Epoch' + c.Epoch = ssz.UnmarshallUint64(buf[0:8]) + + // Field (1) 'Root' + copy(c.Root[:], buf[8:40]) + + return err +} + +// SizeSSZ returns the ssz encoded size in bytes for the Checkpoint object +func (c *Checkpoint) SizeSSZ() (size int) { + size = 40 + return +} + +// HashTreeRoot ssz hashes the Checkpoint object +func (c *Checkpoint) HashTreeRoot() ([32]byte, error) { + return ssz.HashWithDefaultHasher(c) +} + +// HashTreeRootWith ssz hashes the Checkpoint object with a hasher +func (c *Checkpoint) HashTreeRootWith(hh ssz.HashWalker) (err error) { + indx := hh.Index() + + // Field (0) 'Epoch' + hh.PutUint64(c.Epoch) + + // Field (1) 'Root' + hh.PutBytes(c.Root[:]) + + hh.Merkleize(indx) + return +} + +// GetTree ssz hashes the Checkpoint object +func (c *Checkpoint) GetTree() (*ssz.Node, error) { + return ssz.ProofTree(c) +} + +// MarshalSSZ ssz marshals the ENRForkID object +func (e *ENRForkID) MarshalSSZ() ([]byte, error) { + return ssz.MarshalSSZ(e) +} + +// MarshalSSZTo ssz marshals the ENRForkID object to a target array +func (e *ENRForkID) MarshalSSZTo(buf []byte) (dst []byte, err error) { + dst = buf + + // Field (0) 'CurrentForkDigest' + if size := len(e.CurrentForkDigest); size != 4 { + err = ssz.ErrBytesLengthFn("ENRForkID.CurrentForkDigest", size, 4) + return + } + dst = append(dst, e.CurrentForkDigest...) + + // Field (1) 'NextForkVersion' + if size := len(e.NextForkVersion); size != 4 { + err = ssz.ErrBytesLengthFn("ENRForkID.NextForkVersion", size, 4) + return + } + dst = append(dst, e.NextForkVersion...) + + // Field (2) 'NextForkEpoch' + dst = ssz.MarshalUint64(dst, uint64(e.NextForkEpoch)) + + return +} + +// UnmarshalSSZ ssz unmarshals the ENRForkID object +func (e *ENRForkID) UnmarshalSSZ(buf []byte) error { + var err error + size := uint64(len(buf)) + if size != 16 { + return ssz.ErrSize + } + + // Field (0) 'CurrentForkDigest' + if cap(e.CurrentForkDigest) == 0 { + e.CurrentForkDigest = make([]byte, 0, len(buf[0:4])) + } + e.CurrentForkDigest = append(e.CurrentForkDigest, buf[0:4]...) + + // Field (1) 'NextForkVersion' + if cap(e.NextForkVersion) == 0 { + e.NextForkVersion = make([]byte, 0, len(buf[4:8])) + } + e.NextForkVersion = append(e.NextForkVersion, buf[4:8]...) + + // Field (2) 'NextForkEpoch' + e.NextForkEpoch = Epoch(ssz.UnmarshallUint64(buf[8:16])) + + return err +} + +// SizeSSZ returns the ssz encoded size in bytes for the ENRForkID object +func (e *ENRForkID) SizeSSZ() (size int) { + size = 16 + return +} + +// HashTreeRoot ssz hashes the ENRForkID object +func (e *ENRForkID) HashTreeRoot() ([32]byte, error) { + return ssz.HashWithDefaultHasher(e) +} + +// HashTreeRootWith ssz hashes the ENRForkID object with a hasher +func (e *ENRForkID) HashTreeRootWith(hh ssz.HashWalker) (err error) { + indx := hh.Index() + + // Field (0) 'CurrentForkDigest' + if size := len(e.CurrentForkDigest); size != 4 { + err = ssz.ErrBytesLengthFn("ENRForkID.CurrentForkDigest", size, 4) + return + } + hh.PutBytes(e.CurrentForkDigest) + + // Field (1) 'NextForkVersion' + if size := len(e.NextForkVersion); size != 4 { + err = ssz.ErrBytesLengthFn("ENRForkID.NextForkVersion", size, 4) + return + } + hh.PutBytes(e.NextForkVersion) + + // Field (2) 'NextForkEpoch' + hh.PutUint64(uint64(e.NextForkEpoch)) + + hh.Merkleize(indx) + return +} + +// GetTree ssz hashes the ENRForkID object +func (e *ENRForkID) GetTree() (*ssz.Node, error) { + return ssz.ProofTree(e) +} + +// MarshalSSZ ssz marshals the ForkData object +func (f *ForkData) MarshalSSZ() ([]byte, error) { + return ssz.MarshalSSZ(f) +} + +// MarshalSSZTo ssz marshals the ForkData object to a target array +func (f *ForkData) MarshalSSZTo(buf []byte) (dst []byte, err error) { + dst = buf + + // Field (0) 'CurrentVersion' + dst = append(dst, f.CurrentVersion[:]...) + + // Field (1) 'GenesisValidatorsRoot' + dst = append(dst, f.GenesisValidatorsRoot[:]...) + + return +} + +// UnmarshalSSZ ssz unmarshals the ForkData object +func (f *ForkData) UnmarshalSSZ(buf []byte) error { + var err error + size := uint64(len(buf)) + if size != 36 { + return ssz.ErrSize + } + + // Field (0) 'CurrentVersion' + copy(f.CurrentVersion[:], buf[0:4]) + + // Field (1) 'GenesisValidatorsRoot' + copy(f.GenesisValidatorsRoot[:], buf[4:36]) + + return err +} + +// SizeSSZ returns the ssz encoded size in bytes for the ForkData object +func (f *ForkData) SizeSSZ() (size int) { + size = 36 + return +} + +// HashTreeRoot ssz hashes the ForkData object +func (f *ForkData) HashTreeRoot() ([32]byte, error) { + return ssz.HashWithDefaultHasher(f) +} + +// HashTreeRootWith ssz hashes the ForkData object with a hasher +func (f *ForkData) HashTreeRootWith(hh ssz.HashWalker) (err error) { + indx := hh.Index() + + // Field (0) 'CurrentVersion' + hh.PutBytes(f.CurrentVersion[:]) + + // Field (1) 'GenesisValidatorsRoot' + hh.PutBytes(f.GenesisValidatorsRoot[:]) + + hh.Merkleize(indx) + return +} + +// GetTree ssz hashes the ForkData object +func (f *ForkData) GetTree() (*ssz.Node, error) { + return ssz.ProofTree(f) +} + +// MarshalSSZ ssz marshals the Ping object +func (p *Ping) MarshalSSZ() ([]byte, error) { + return ssz.MarshalSSZ(p) +} + +// MarshalSSZTo ssz marshals the Ping object to a target array +func (p *Ping) MarshalSSZTo(buf []byte) (dst []byte, err error) { + dst = buf + + // Field (0) 'Id' + dst = ssz.MarshalUint64(dst, p.Id) + + return +} + +// UnmarshalSSZ ssz unmarshals the Ping object +func (p *Ping) UnmarshalSSZ(buf []byte) error { + var err error + size := uint64(len(buf)) + if size != 8 { + return ssz.ErrSize + } + + // Field (0) 'Id' + p.Id = ssz.UnmarshallUint64(buf[0:8]) + + return err +} + +// SizeSSZ returns the ssz encoded size in bytes for the Ping object +func (p *Ping) SizeSSZ() (size int) { + size = 8 + return +} + +// HashTreeRoot ssz hashes the Ping object +func (p *Ping) HashTreeRoot() ([32]byte, error) { + return ssz.HashWithDefaultHasher(p) +} + +// HashTreeRootWith ssz hashes the Ping object with a hasher +func (p *Ping) HashTreeRootWith(hh ssz.HashWalker) (err error) { + indx := hh.Index() + + // Field (0) 'Id' + hh.PutUint64(p.Id) + + hh.Merkleize(indx) + return +} + +// GetTree ssz hashes the Ping object +func (p *Ping) GetTree() (*ssz.Node, error) { + return ssz.ProofTree(p) +} + +// MarshalSSZ ssz marshals the SingleRoot object +func (s *SingleRoot) MarshalSSZ() ([]byte, error) { + return ssz.MarshalSSZ(s) +} + +// MarshalSSZTo ssz marshals the SingleRoot object to a target array +func (s *SingleRoot) MarshalSSZTo(buf []byte) (dst []byte, err error) { + dst = buf + + // Field (0) 'Root' + dst = append(dst, s.Root[:]...) + + // Field (1) 'BodyRoot' + dst = append(dst, s.BodyRoot[:]...) + + return +} + +// UnmarshalSSZ ssz unmarshals the SingleRoot object +func (s *SingleRoot) UnmarshalSSZ(buf []byte) error { + var err error + size := uint64(len(buf)) + if size != 64 { + return ssz.ErrSize + } + + // Field (0) 'Root' + copy(s.Root[:], buf[0:32]) + + // Field (1) 'BodyRoot' + copy(s.BodyRoot[:], buf[32:64]) + + return err +} + +// SizeSSZ returns the ssz encoded size in bytes for the SingleRoot object +func (s *SingleRoot) SizeSSZ() (size int) { + size = 64 + return +} + +// HashTreeRoot ssz hashes the SingleRoot object +func (s *SingleRoot) HashTreeRoot() ([32]byte, error) { + return ssz.HashWithDefaultHasher(s) +} + +// HashTreeRootWith ssz hashes the SingleRoot object with a hasher +func (s *SingleRoot) HashTreeRootWith(hh ssz.HashWalker) (err error) { + indx := hh.Index() + + // Field (0) 'Root' + hh.PutBytes(s.Root[:]) + + // Field (1) 'BodyRoot' + hh.PutBytes(s.BodyRoot[:]) + + hh.Merkleize(indx) + return +} + +// GetTree ssz hashes the SingleRoot object +func (s *SingleRoot) GetTree() (*ssz.Node, error) { + return ssz.ProofTree(s) +} + +// MarshalSSZ ssz marshals the Status object +func (s *Status) MarshalSSZ() ([]byte, error) { + return ssz.MarshalSSZ(s) +} + +// MarshalSSZTo ssz marshals the Status object to a target array +func (s *Status) MarshalSSZTo(buf []byte) (dst []byte, err error) { + dst = buf + + // Field (0) 'ForkDigest' + if size := len(s.ForkDigest); size != 4 { + err = ssz.ErrBytesLengthFn("Status.ForkDigest", size, 4) + return + } + dst = append(dst, s.ForkDigest...) + + // Field (1) 'FinalizedRoot' + if size := len(s.FinalizedRoot); size != 32 { + err = ssz.ErrBytesLengthFn("Status.FinalizedRoot", size, 32) + return + } + dst = append(dst, s.FinalizedRoot...) + + // Field (2) 'FinalizedEpoch' + dst = ssz.MarshalUint64(dst, uint64(s.FinalizedEpoch)) + + // Field (3) 'HeadRoot' + if size := len(s.HeadRoot); size != 32 { + err = ssz.ErrBytesLengthFn("Status.HeadRoot", size, 32) + return + } + dst = append(dst, s.HeadRoot...) + + // Field (4) 'HeadSlot' + dst = ssz.MarshalUint64(dst, uint64(s.HeadSlot)) + + return +} + +// UnmarshalSSZ ssz unmarshals the Status object +func (s *Status) UnmarshalSSZ(buf []byte) error { + var err error + size := uint64(len(buf)) + if size != 84 { + return ssz.ErrSize + } + + // Field (0) 'ForkDigest' + if cap(s.ForkDigest) == 0 { + s.ForkDigest = make([]byte, 0, len(buf[0:4])) + } + s.ForkDigest = append(s.ForkDigest, buf[0:4]...) + + // Field (1) 'FinalizedRoot' + if cap(s.FinalizedRoot) == 0 { + s.FinalizedRoot = make([]byte, 0, len(buf[4:36])) + } + s.FinalizedRoot = append(s.FinalizedRoot, buf[4:36]...) + + // Field (2) 'FinalizedEpoch' + s.FinalizedEpoch = Epoch(ssz.UnmarshallUint64(buf[36:44])) + + // Field (3) 'HeadRoot' + if cap(s.HeadRoot) == 0 { + s.HeadRoot = make([]byte, 0, len(buf[44:76])) + } + s.HeadRoot = append(s.HeadRoot, buf[44:76]...) + + // Field (4) 'HeadSlot' + s.HeadSlot = Slot(ssz.UnmarshallUint64(buf[76:84])) + + return err +} + +// SizeSSZ returns the ssz encoded size in bytes for the Status object +func (s *Status) SizeSSZ() (size int) { + size = 84 + return +} + +// HashTreeRoot ssz hashes the Status object +func (s *Status) HashTreeRoot() ([32]byte, error) { + return ssz.HashWithDefaultHasher(s) +} + +// HashTreeRootWith ssz hashes the Status object with a hasher +func (s *Status) HashTreeRootWith(hh ssz.HashWalker) (err error) { + indx := hh.Index() + + // Field (0) 'ForkDigest' + if size := len(s.ForkDigest); size != 4 { + err = ssz.ErrBytesLengthFn("Status.ForkDigest", size, 4) + return + } + hh.PutBytes(s.ForkDigest) + + // Field (1) 'FinalizedRoot' + if size := len(s.FinalizedRoot); size != 32 { + err = ssz.ErrBytesLengthFn("Status.FinalizedRoot", size, 32) + return + } + hh.PutBytes(s.FinalizedRoot) + + // Field (2) 'FinalizedEpoch' + hh.PutUint64(uint64(s.FinalizedEpoch)) + + // Field (3) 'HeadRoot' + if size := len(s.HeadRoot); size != 32 { + err = ssz.ErrBytesLengthFn("Status.HeadRoot", size, 32) + return + } + hh.PutBytes(s.HeadRoot) + + // Field (4) 'HeadSlot' + hh.PutUint64(uint64(s.HeadSlot)) + + hh.Merkleize(indx) + return +} + +// GetTree ssz hashes the Status object +func (s *Status) GetTree() (*ssz.Node, error) { + return ssz.ProofTree(s) +} diff --git a/cmd/lightclient/sentinel/communication/p2p/p2pgen/main.go b/cmd/lightclient/sentinel/communication/p2p/p2pgen/main.go new file mode 100644 index 00000000000..cccf8fefe32 --- /dev/null +++ b/cmd/lightclient/sentinel/communication/p2p/p2pgen/main.go @@ -0,0 +1,96 @@ +package main + +import ( + "bytes" + "flag" + "go/format" + "os" + "path" + "strings" + "text/template" + + "gopkg.in/yaml.v3" +) + +var input string +var output string + +type Spec struct { + Aliases map[string]string + Structs map[string][]Field +} +type Field struct { + Name string + Type string + Tags TagMap +} + +type TagMap map[string]string + +func (t TagMap) String() string { + sb := new(strings.Builder) + sb.WriteString("`") + for k, v := range t { + sb.WriteString(k) + sb.WriteString(":") + sb.WriteRune('"') + sb.WriteString(v) + sb.WriteString(`" `) + } + sb.WriteString("`") + return sb.String() +} + +func main() { + flag.StringVar(&input, "i", "spec_p2p.yaml", "yaml file to read") + flag.StringVar(&output, "o", ".", "directory to output") + flag.Parse() + b, err := os.ReadFile(input) + if err != nil { + panic(err) + } + tmp := template.Must(template.New("p2pspec").Parse(tmpl)) + s := &Spec{} + err = yaml.Unmarshal(b, s) + if err != nil { + panic(err) + } + buf := new(bytes.Buffer) + err = tmp.Execute(buf, s) + if err != nil { + panic(err) + } + src, err := format.Source(buf.Bytes()) + if err != nil { + panic(err) + } + os.WriteFile(path.Join(output, "generated.go"), src, 0600) +} + +const tmpl = `package p2p + +//go:generate go run github.com/ferranbt/fastssz/sszgen -path generated.go -exclude-objs {{range $key, $val := .Aliases}}{{$key}},{{end}}Ignore + +import ( + "github.com/ledgerwatch/erigon/cmd/lightclient/sentinel/communication" +) + +{{range $key, $val := .Aliases}} +type {{$key}} {{$val}} +{{end}} + +{{range $key, $val := .Structs}} + +type {{$key}} struct { + +{{range $name, $field := $val}} + {{$field.Name}} {{$field.Type}} {{$field.Tags.String}} +{{end}} + +} + +func (typ *{{$key}}) Clone() communication.Packet { + return &{{$key}}{} +} +{{end}} +` diff --git a/cmd/lightclient/sentinel/communication/p2p/spec_p2p.yaml b/cmd/lightclient/sentinel/communication/p2p/spec_p2p.yaml new file mode 100644 index 00000000000..ac679ccbb78 --- /dev/null +++ b/cmd/lightclient/sentinel/communication/p2p/spec_p2p.yaml @@ -0,0 +1,85 @@ +aliases: + Slot: uint64 + Epoch: uint64 + Bitvector64: uint64 + Bitvector4: uint8 + Root: '[32]byte' + Signature: '[96]byte' + Bytea: '[]byte' +structs: + Status: + - name: ForkDigest + type: Bytea + tags: + json: 'fork_digest,omitempty' + ssz-size: 4 + - name: FinalizedRoot + type: Bytea + tags: + json: 'finalized_root,omitempty' + ssz-size: 32 + - name: FinalizedEpoch + type: Epoch + tags: + json: 'finalized_epoch,omitempty' + - name: HeadRoot + type: Bytea + tags: + json: 'head_root,omitempty' + ssz-size: 32 + - name: HeadSlot + type: Slot + tags: + json: 'head_slot,omitempty' + ForkData: + - name: CurrentVersion + type: '[4]byte' + tags: + json: current_version + ssz-size: 4 + - name: GenesisValidatorsRoot + type: Root + tags: + json: genesis_validators_root + ssz-size: 32 + SingleRoot: + - name: Root + type: Root + tags: + json: root + ssz-size: 32 + - name: BodyRoot + type: Root + tags: + json: body_root + ssz-size: 32 + Checkpoint: + - name: Epoch + type: uint64 + tags: + json: epoch + - name: Root + type: Root + tags: + json: root + ssz-size: 32 + Ping: + - name: Id + type: uint64 + tags: + json: id + ENRForkID: + - name: CurrentForkDigest + type: Bytea + tags: + json: 'current_fork_digest,omitempty' + ssz-size: 4 + - name: NextForkVersion + type: Bytea + tags: + json: 'next_fork_version,omitempty' + ssz-size: 4 + - name: NextForkEpoch + type: Epoch + tags: + json: 'next_fork_epoch,omitempty' diff --git a/cmd/lightclient/sentinel/communication/packet.go b/cmd/lightclient/sentinel/communication/packet.go new file mode 100644 index 00000000000..eaeb7b1c79f --- /dev/null +++ b/cmd/lightclient/sentinel/communication/packet.go @@ -0,0 +1,101 @@ +/* + Copyright 2022 Erigon-Lightclient contributors + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + http://www.apache.org/licenses/LICENSE-2.0 + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package communication + +import ( + "context" + "fmt" + + "github.com/libp2p/go-libp2p-core/protocol" + pubsub "github.com/libp2p/go-libp2p-pubsub" + "github.com/libp2p/go-libp2p/core/network" +) + +// packet simply needs to implement clone so that it can be instantiated within the generic +type Packet interface { + Clone() Packet +} + +// context includes the original stream, the raw decompressed bytes, the codec the context was generated from, and the protocol ID +type StreamContext struct { + Packet Packet + Protocol protocol.ID + Codec StreamCodec + Stream network.Stream + + Raw []byte +} + +func (c *StreamContext) String() string { + return fmt.Sprintf("peer %s | packet %s | len %d", c.Stream.ID(), c.Protocol, c.Packet) +} + +type GossipContext struct { + // the packet + Packet Packet + // the topic of the message + Topic *pubsub.Topic + // the actual message + Msg *pubsub.Message + // the codec used to decode the message + Codec GossipCodec + // the decompressed message in the native encoding of msg + Raw []byte +} + +// PacketCodec describes a wire format. +type StreamCodec interface { + Close() error + CloseWriter() error + CloseReader() error + + Write(payload []byte) (n int, err error) + WritePacket(pck Packet, prefix ...byte) (n int, err error) + Decode(Packet) (ctx *StreamContext, err error) + + Read(payload []byte) (n int, err error) + ReadByte() (b byte, err error) +} + +// GossipCodec describes a wire format for pubsub messages +// it is linked to a single topiC +type GossipCodec interface { + WritePacket(ctx context.Context, pck Packet) (err error) + Decode(context.Context, Packet) (*GossipContext, error) +} + +func (c *GossipContext) String() string { + return fmt.Sprintf("peer %s | topic %s | len %d", c.Msg.ReceivedFrom, c.Topic, c.Packet) +} + +// the empty packet doesn't implement any serialization, so it means to skip. +type EmptyPacket struct{} + +func (e *EmptyPacket) Clone() Packet { + return &EmptyPacket{} +} + +// the error message skips decoding but does do the decompression. +type ErrorMessage struct { + Message []byte `json:"message"` +} + +func (typ *ErrorMessage) Clone() Packet { + return &ErrorMessage{} +} + +func (typ *ErrorMessage) UnmarshalSSZ(buf []byte) error { + typ.Message = buf + return nil +} diff --git a/cmd/lightclient/sentinel/communication/ssz_snappy/gossip.go b/cmd/lightclient/sentinel/communication/ssz_snappy/gossip.go new file mode 100644 index 00000000000..99616fa3215 --- /dev/null +++ b/cmd/lightclient/sentinel/communication/ssz_snappy/gossip.go @@ -0,0 +1,84 @@ +/* + Copyright 2022 Erigon-Lightclient contributors + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + http://www.apache.org/licenses/LICENSE-2.0 + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package ssz_snappy + +import ( + "context" + + ssz "github.com/ferranbt/fastssz" + "github.com/ledgerwatch/erigon/cmd/lightclient/sentinel/communication" + "github.com/ledgerwatch/erigon/cmd/lightclient/utils" + pubsub "github.com/libp2p/go-libp2p-pubsub" +) + +type GossipCodec struct { + sub *pubsub.Subscription + top *pubsub.Topic +} + +func NewGossipCodec( + sub *pubsub.Subscription, + top *pubsub.Topic, +) communication.GossipCodec { + return &GossipCodec{ + sub: sub, + top: top, + } +} + +// decode into packet p, then return the packet context +func (d *GossipCodec) Decode(ctx context.Context, p communication.Packet) (sctx *communication.GossipContext, err error) { + msg, err := d.sub.Next(ctx) + if err != nil { + return nil, err + } + sctx, err = d.readPacket(msg, p) + return +} + +func (d *GossipCodec) WritePacket(ctx context.Context, p communication.Packet) error { + if val, ok := p.(ssz.Marshaler); ok { + ans, err := utils.EncodeSSZSnappy(val) + if err != nil { + return err + } + + return d.top.Publish(ctx, ans) + } + return nil +} + +func (d *GossipCodec) readPacket(msg *pubsub.Message, p communication.Packet) (*communication.GossipContext, error) { + // read the next message + c := &communication.GossipContext{ + Packet: p, + Codec: d, + } + c.Topic = d.top + c.Msg = msg + if p == nil { + return c, nil + } + + return c, d.decodeData(p, msg.Data) +} + +func (d *GossipCodec) decodeData(p communication.Packet, data []byte) error { + var val ssz.Unmarshaler + var ok bool + if val, ok = p.(ssz.Unmarshaler); !ok { + return nil + } + return utils.DecodeSSZSnappy(val, data) +} diff --git a/cmd/lightclient/sentinel/communication/ssz_snappy/stream.go b/cmd/lightclient/sentinel/communication/ssz_snappy/stream.go new file mode 100644 index 00000000000..6d0f0471a82 --- /dev/null +++ b/cmd/lightclient/sentinel/communication/ssz_snappy/stream.go @@ -0,0 +1,164 @@ +/* + Copyright 2022 Erigon-Lightclient contributors + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + http://www.apache.org/licenses/LICENSE-2.0 + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package ssz_snappy + +import ( + "bufio" + "encoding/binary" + "errors" + "fmt" + "io" + "reflect" + + ssz "github.com/ferranbt/fastssz" + "github.com/golang/snappy" + "github.com/ledgerwatch/erigon/cmd/lightclient/rpc/lightrpc" + "github.com/ledgerwatch/erigon/cmd/lightclient/sentinel/communication" + "github.com/libp2p/go-libp2p/core/network" +) + +type StreamCodec struct { + s network.Stream + sr *snappy.Reader +} + +func NewStreamCodec( + s network.Stream, +) communication.StreamCodec { + return &StreamCodec{ + s: s, + sr: snappy.NewReader(s), + } +} + +func (d *StreamCodec) Close() error { + if err := d.s.Close(); err != nil { + return err + } + return nil +} + +func (d *StreamCodec) CloseWriter() error { + if err := d.s.CloseWrite(); err != nil { + return err + } + return nil +} + +func (d *StreamCodec) CloseReader() error { + if err := d.s.CloseRead(); err != nil { + return err + } + return nil +} + +// write packet to stream. will add correct header + compression +// will error if packet does not implement ssz.Marshaler interface +func (d *StreamCodec) WritePacket(pkt communication.Packet, prefix ...byte) (n int, err error) { + // if its a metadata request we dont write anything + if reflect.TypeOf(pkt) == reflect.TypeOf(&lightrpc.MetadataV1{}) || reflect.TypeOf(pkt) == reflect.TypeOf(&lightrpc.MetadataV2{}) { + return 0, nil + } + val, ok := pkt.(ssz.Marshaler) + if !ok { + return 0, nil + } + lengthBuf := make([]byte, 10) + vin := binary.PutUvarint(lengthBuf, uint64(val.SizeSSZ())) + wr := bufio.NewWriterSize(d.s, 10+val.SizeSSZ()) + defer wr.Flush() + wr.Write(prefix) // write prefix first (done for responses) + wr.Write(lengthBuf[:vin]) + sw := snappy.NewBufferedWriter(wr) + defer sw.Flush() + xs := make([]byte, 0, val.SizeSSZ()) + enc, err := val.MarshalSSZTo(xs) + if err != nil { + return 0, err + } + return sw.Write(enc) +} + +// write raw bytes to stream +func (d *StreamCodec) Write(payload []byte) (n int, err error) { + return d.s.Write(payload) +} + +// read raw bytes to stream +func (d *StreamCodec) Read(b []byte) (n int, err error) { + return d.s.Read(b) +} + +// read raw bytes to stream +func (d *StreamCodec) ReadByte() (b byte, err error) { + o := [1]byte{} + _, err = io.ReadFull(d.s, o[:]) + if err != nil { + return + } + return o[0], nil +} + +// decode into packet p, then return the packet context +func (d *StreamCodec) Decode(p communication.Packet) (ctx *communication.StreamContext, err error) { + ctx, err = d.readPacket(p) + return +} + +func (d *StreamCodec) readPacket(p communication.Packet) (ctx *communication.StreamContext, err error) { + c := &communication.StreamContext{ + Packet: p, + Stream: d.s, + Codec: d, + Protocol: d.s.Protocol(), + } + if val, ok := p.(ssz.Unmarshaler); ok { + ln, err := readUvarint(d.s) + if err != nil { + return c, err + } + c.Raw = make([]byte, ln) + _, err = io.ReadFull(d.sr, c.Raw) + if err != nil { + return c, fmt.Errorf("readPacket: %w", err) + } + err = val.UnmarshalSSZ(c.Raw) + if err != nil { + return c, fmt.Errorf("readPacket: %w", err) + } + } + return c, nil +} + +func readUvarint(r io.Reader) (uint64, error) { + var x uint64 + var s uint + bs := [1]byte{} + for i := 0; i < 10; i++ { + _, err := r.Read(bs[:]) + if err != nil { + return x, err + } + b := bs[0] + if b < 0x80 { + if i == 10-1 && b > 1 { + return x, errors.New("readUvarint: overflow") + } + return x | uint64(b)<Host", "from", ctx.Stream.ID(), "endpoint", ctx.Protocol, "msg", val) + } +} diff --git a/cmd/lightclient/sentinel/handlers/handlers.go b/cmd/lightclient/sentinel/handlers/handlers.go new file mode 100644 index 00000000000..7617586c264 --- /dev/null +++ b/cmd/lightclient/sentinel/handlers/handlers.go @@ -0,0 +1,66 @@ +/* + Copyright 2022 Erigon-Lightclient contributors + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + http://www.apache.org/licenses/LICENSE-2.0 + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package handlers + +import ( + "github.com/ledgerwatch/erigon/cmd/lightclient/rpc/lightrpc" + "github.com/ledgerwatch/erigon/cmd/lightclient/sentinel/communication/ssz_snappy" + "github.com/ledgerwatch/erigon/cmd/lightclient/sentinel/peers" + + "github.com/ledgerwatch/log/v3" + "github.com/libp2p/go-libp2p/core/host" + "github.com/libp2p/go-libp2p/core/network" + "github.com/libp2p/go-libp2p/core/protocol" +) + +type ConsensusHandlers struct { + handlers map[protocol.ID]network.StreamHandler + host host.Host + peers *peers.Peers + metadataV1 *lightrpc.MetadataV1 +} + +const SuccessfullResponsePrefix = 0x00 + +func NewConsensusHandlers(host host.Host, peers *peers.Peers, metadataV1 *lightrpc.MetadataV1) *ConsensusHandlers { + c := &ConsensusHandlers{ + peers: peers, + host: host, + metadataV1: metadataV1, + } + c.handlers = map[protocol.ID]network.StreamHandler{ + protocol.ID(PingProtocolV1): curryStreamHandler(ssz_snappy.NewStreamCodec, pingHandler), + protocol.ID(GoodbyeProtocolV1): curryStreamHandler(ssz_snappy.NewStreamCodec, pingHandler), + protocol.ID(StatusProtocolV1): curryStreamHandler(ssz_snappy.NewStreamCodec, statusHandler), + protocol.ID(MetadataProtocolV1): curryStreamHandler(ssz_snappy.NewStreamCodec, nilHandler), + protocol.ID(MetadataProtocolV2): curryStreamHandler(ssz_snappy.NewStreamCodec, nilHandler), + protocol.ID(BeaconBlockByRangeProtocolV1): c.blocksByRangeHandler, + protocol.ID(BeaconBlockByRootProtocolV1): c.beaconBlocksByRootHandler, + } + return c +} + +func (c *ConsensusHandlers) blocksByRangeHandler(stream network.Stream) { + log.Info("Got block by range handler call") +} + +func (c *ConsensusHandlers) beaconBlocksByRootHandler(stream network.Stream) { + log.Info("Got beacon block by root handler call") +} + +func (c *ConsensusHandlers) Start() { + for id, handler := range c.handlers { + c.host.SetStreamHandler(id, handler) + } +} diff --git a/cmd/lightclient/sentinel/handlers/heartbeats.go b/cmd/lightclient/sentinel/handlers/heartbeats.go new file mode 100644 index 00000000000..1d5113d5d30 --- /dev/null +++ b/cmd/lightclient/sentinel/handlers/heartbeats.go @@ -0,0 +1,53 @@ +/* + Copyright 2022 Erigon-Lightclient contributors + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + http://www.apache.org/licenses/LICENSE-2.0 + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package handlers + +import ( + "github.com/ledgerwatch/erigon/cmd/lightclient/sentinel/communication" + "github.com/ledgerwatch/erigon/cmd/lightclient/sentinel/communication/p2p" + "github.com/ledgerwatch/erigon/cmd/lightclient/utils" + "github.com/ledgerwatch/log/v3" +) + +// type safe handlers which all have access to the original stream & decompressed data +// ping handler +func pingHandler(ctx *communication.StreamContext, dat *p2p.Ping) error { + // since packets are just structs, they can be resent with no issue + _, err := ctx.Codec.WritePacket(dat, SuccessfullResponsePrefix) + if err != nil { + return err + } + return nil +} + +// does nothing +func nilHandler(ctx *communication.StreamContext, dat *communication.EmptyPacket) error { + return nil +} + +// TODO: Actually respond with proper status +func statusHandler(ctx *communication.StreamContext, dat *p2p.Status) error { + log.Debug("[ReqResp] Status", + "epoch", dat.FinalizedEpoch, + "final root", utils.BytesToHex(dat.FinalizedRoot), + "head root", utils.BytesToHex(dat.HeadRoot), + "head slot", dat.HeadSlot, + "fork digest", utils.BytesToHex(dat.ForkDigest), + ) + _, err := ctx.Codec.WritePacket(dat, SuccessfullResponsePrefix) + if err != nil { + return err + } + return nil +} diff --git a/cmd/lightclient/sentinel/handlers/heartbeats_test.go b/cmd/lightclient/sentinel/handlers/heartbeats_test.go new file mode 100644 index 00000000000..99274fa790f --- /dev/null +++ b/cmd/lightclient/sentinel/handlers/heartbeats_test.go @@ -0,0 +1,107 @@ +/* + Copyright 2022 Erigon-Lightclient contributors + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + http://www.apache.org/licenses/LICENSE-2.0 + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package handlers + +import ( + "context" + "testing" + "time" + + "github.com/ledgerwatch/erigon/cmd/lightclient/rpc/lightrpc" + "github.com/ledgerwatch/erigon/cmd/lightclient/sentinel/communication/p2p" + "github.com/ledgerwatch/erigon/cmd/lightclient/sentinel/communication/ssz_snappy" + "github.com/ledgerwatch/erigon/cmd/lightclient/sentinel/peers" + "github.com/ledgerwatch/erigon/common" + "github.com/libp2p/go-libp2p/core/host" + "github.com/libp2p/go-libp2p/core/protocol" + basichost "github.com/libp2p/go-libp2p/p2p/host/basic" + swarmt "github.com/libp2p/go-libp2p/p2p/net/swarm/testing" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func initializeNetwork(t *testing.T, ctx context.Context) (*ConsensusHandlers, host.Host, host.Host) { + h1, err := basichost.NewHost(swarmt.GenSwarm(t), nil) + require.NoError(t, err) + + h2, err := basichost.NewHost(swarmt.GenSwarm(t), nil) + require.NoError(t, err) + h2pi := h2.Peerstore().PeerInfo(h2.ID()) + require.NoError(t, h1.Connect(ctx, h2pi)) + + return NewConsensusHandlers(h2, &peers.Peers{}, &lightrpc.MetadataV1{}), h1, h2 +} + +func TestPingHandler(t *testing.T) { + ctx := context.TODO() + + handlers, h1, h2 := initializeNetwork(t, ctx) + defer h1.Close() + defer h2.Close() + handlers.Start() + + stream, err := h1.NewStream(ctx, h2.ID(), protocol.ID(PingProtocolV1)) + require.NoError(t, err) + packet := &p2p.Ping{ + Id: 32, + } + codec := ssz_snappy.NewStreamCodec(stream) + _, err = codec.WritePacket(packet) + require.NoError(t, err) + require.NoError(t, codec.CloseWriter()) + time.Sleep(100 * time.Millisecond) + r := &p2p.Ping{} + + code := make([]byte, 1) + stream.Read(code) + assert.Equal(t, code, []byte{SuccessfullResponsePrefix}) + + _, err = codec.Decode(r) + require.NoError(t, err) + + assert.Equal(t, r, packet) +} + +func TestStatusHandler(t *testing.T) { + ctx := context.TODO() + + handlers, h1, h2 := initializeNetwork(t, ctx) + defer h1.Close() + defer h2.Close() + handlers.Start() + + stream, err := h1.NewStream(ctx, h2.ID(), protocol.ID(StatusProtocolV1)) + require.NoError(t, err) + packet := &p2p.Status{ + ForkDigest: common.Hex2Bytes("69696969"), + HeadRoot: make([]byte, 32), + FinalizedRoot: make([]byte, 32), + HeadSlot: 666999, + } + codec := ssz_snappy.NewStreamCodec(stream) + _, err = codec.WritePacket(packet) + require.NoError(t, err) + require.NoError(t, codec.CloseWriter()) + time.Sleep(100 * time.Millisecond) + r := &p2p.Status{} + + code := make([]byte, 1) + stream.Read(code) + assert.Equal(t, code, []byte{SuccessfullResponsePrefix}) + + _, err = codec.Decode(r) + require.NoError(t, err) + + assert.Equal(t, r, packet) +} diff --git a/cmd/lightclient/sentinel/handlers/topics.go b/cmd/lightclient/sentinel/handlers/topics.go new file mode 100644 index 00000000000..e9cdf8399aa --- /dev/null +++ b/cmd/lightclient/sentinel/handlers/topics.go @@ -0,0 +1,43 @@ +/* + Copyright 2022 Erigon-Lightclient contributors + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + http://www.apache.org/licenses/LICENSE-2.0 + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package handlers + +const ProtocolPrefix = "/eth2/beacon_chain/req" +const EncodingProtocol = "/ssz_snappy" + +// request and response versions +const Schema1 = "/1" +const Schema2 = "/2" + +// Request and Response topics +const MetadataTopic = "/metadata" +const PingTopic = "/ping" +const StatusTopic = "/status" +const GoodbyeTopic = "/goodbye" +const BeaconBlockByRangeTopic = "/beacon_block_by_range" +const BeaconBlockByRootTopic = "/beacon_block_by_root" + +// Request and Response protocol ids +var ( + PingProtocolV1 = ProtocolPrefix + PingTopic + Schema1 + EncodingProtocol + GoodbyeProtocolV1 = ProtocolPrefix + GoodbyeTopic + Schema1 + EncodingProtocol + + MetadataProtocolV1 = ProtocolPrefix + MetadataTopic + Schema1 + EncodingProtocol + MetadataProtocolV2 = ProtocolPrefix + MetadataTopic + Schema2 + EncodingProtocol + + StatusProtocolV1 = ProtocolPrefix + StatusTopic + Schema1 + EncodingProtocol + + BeaconBlockByRangeProtocolV1 = ProtocolPrefix + BeaconBlockByRangeTopic + Schema1 + EncodingProtocol + BeaconBlockByRootProtocolV1 = ProtocolPrefix + BeaconBlockByRootTopic + Schema1 + EncodingProtocol +) diff --git a/cmd/lightclient/sentinel/peers/peers.go b/cmd/lightclient/sentinel/peers/peers.go new file mode 100644 index 00000000000..1bbfcd9508b --- /dev/null +++ b/cmd/lightclient/sentinel/peers/peers.go @@ -0,0 +1,89 @@ +/* + Copyright 2022 Erigon-Lightclient contributors + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + http://www.apache.org/licenses/LICENSE-2.0 + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package peers + +import ( + "sync" + + lru "github.com/hashicorp/golang-lru" + "github.com/ledgerwatch/log/v3" + "github.com/libp2p/go-libp2p/core/host" + "github.com/libp2p/go-libp2p/core/peer" +) + +const ( + maxBadPeers = 1000 // Always cap memory consumption at 1 MB + DefaultMaxPeers = 200 + MaxBadResponses = 10 +) + +type Peers struct { + badPeers *lru.Cache // Keep track of bad peers + penalties *lru.Cache // Keep track on how many penalties a peer accumulated, PeerId => penalties + host host.Host + mu sync.Mutex +} + +func New(host host.Host) *Peers { + badPeers, err := lru.New(maxBadPeers) + if err != nil { + panic(err) + } + + penalties, err := lru.New(maxBadPeers) + if err != nil { + panic(err) + } + return &Peers{ + badPeers: badPeers, + penalties: penalties, + host: host, + } +} + +func (p *Peers) IsBadPeer(pid peer.ID) bool { + p.mu.Lock() + defer p.mu.Unlock() + return p.badPeers.Contains(pid) +} + +func (p *Peers) Penalize(pid peer.ID) { + p.mu.Lock() + defer p.mu.Unlock() + + penaltyInterface, has := p.penalties.Get(pid) + if !has { + p.penalties.Add(pid, 1) + return + } + penalties := penaltyInterface.(int) + 1 + + p.penalties.Add(pid, penalties) + // Drop peer and delete the map element. + if penalties > MaxBadResponses { + p.banBadPeer(pid) + p.penalties.Remove(pid) + } +} + +func (p *Peers) banBadPeer(pid peer.ID) { + p.DisconnectPeer(pid) + p.badPeers.Add(pid, []byte{0}) + log.Debug("[Peers] bad peers has been banned", "peer-id", pid) +} + +func (p *Peers) DisconnectPeer(pid peer.ID) { + log.Trace("[Peers] disconnecting from peer", "peer-id", pid) + p.host.Peerstore().RemovePeer(pid) +} diff --git a/cmd/lightclient/sentinel/pubsub.go b/cmd/lightclient/sentinel/pubsub.go new file mode 100644 index 00000000000..b4d169e15dd --- /dev/null +++ b/cmd/lightclient/sentinel/pubsub.go @@ -0,0 +1,191 @@ +/* + Copyright 2022 Erigon-Lightclient contributors + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + http://www.apache.org/licenses/LICENSE-2.0 + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package sentinel + +import ( + "context" + "fmt" + "strconv" + "strings" + "sync" + "time" + + "github.com/ledgerwatch/erigon/cmd/lightclient/fork" + "github.com/ledgerwatch/erigon/cmd/lightclient/rpc/lightrpc" + "github.com/ledgerwatch/erigon/cmd/lightclient/sentinel/communication" + "github.com/ledgerwatch/erigon/cmd/lightclient/sentinel/communication/ssz_snappy" + "github.com/ledgerwatch/log/v3" + pubsub "github.com/libp2p/go-libp2p-pubsub" +) + +const ( + // overlay parameters + gossipSubD = 8 // topic stable mesh target count + gossipSubDlo = 6 // topic stable mesh low watermark + gossipSubDhi = 12 // topic stable mesh high watermark + + // gossip parameters + gossipSubMcacheLen = 6 // number of windows to retain full messages in cache for `IWANT` responses + gossipSubMcacheGossip = 3 // number of windows to gossip about + gossipSubSeenTTL = 550 // number of heartbeat intervals to retain message IDs + + // fanout ttl + gossipSubFanoutTTL = 60000000000 // TTL for fanout maps for topics we are not subscribed to but have published to, in nano seconds + + // heartbeat interval + gossipSubHeartbeatInterval = 700 * time.Millisecond // frequency of heartbeat, milliseconds + + // misc + rSubD = 8 // random gossip target +) + +// Specifies the prefix for any pubsub topic. +const gossipTopicPrefix = "/eth2/" +const blockSubnetTopicFormat = "/eth2/%x/beacon_block" + +type TopicName string + +const ( + BeaconBlockTopic TopicName = "beacon_block" + LightClientFinalityUpdateTopic TopicName = "light_client_finality_update" + LightClientOptimisticUpdateTopic TopicName = "light_client_optimistic_update" +) + +type GossipTopic struct { + Name TopicName + Codec func(*pubsub.Subscription, *pubsub.Topic) communication.GossipCodec + Typ communication.Packet + CodecStr string +} + +var BeaconBlockSsz = GossipTopic{ + Name: BeaconBlockTopic, + Typ: &lightrpc.SignedBeaconBlockBellatrix{}, + Codec: ssz_snappy.NewGossipCodec, + CodecStr: "ssz_snappy", +} +var LightClientFinalityUpdateSsz = GossipTopic{ + Name: LightClientFinalityUpdateTopic, + Typ: &lightrpc.LightClientFinalityUpdate{}, + Codec: ssz_snappy.NewGossipCodec, + CodecStr: "ssz_snappy", +} +var LightClientOptimisticUpdateSsz = GossipTopic{ + Name: LightClientOptimisticUpdateTopic, + Typ: &lightrpc.LightClientOptimisticUpdate{}, + Codec: ssz_snappy.NewGossipCodec, + CodecStr: "ssz_snappy", +} + +type GossipManager struct { + ch chan *communication.GossipContext + subscriptions map[string]*GossipSubscription + mu sync.RWMutex +} + +// construct a new gossip manager that will handle packets with the given handlerfunc +func NewGossipManager( + ctx context.Context, +) *GossipManager { + g := &GossipManager{ + ch: make(chan *communication.GossipContext, 1), + subscriptions: map[string]*GossipSubscription{}, + } + return g +} + +func (s *GossipManager) Recv() <-chan *communication.GossipContext { + return s.ch +} + +// closes a specific topic +func (s *GossipManager) CloseTopic(topic string) { + s.mu.Lock() + defer s.mu.Unlock() + if val, ok := s.subscriptions[topic]; ok { + val.Close() + delete(s.subscriptions, topic) + } +} + +// get a specific topic +func (s *GossipManager) GetSubscription(topic string) (*GossipSubscription, bool) { + s.mu.RLock() + defer s.mu.RUnlock() + if val, ok := s.subscriptions[topic]; ok { + return val, true + } + return nil, false +} + +// starts listening to a specific topic (forwarding its messages to the gossip manager channel) +func (s *GossipManager) ListenTopic(topic string) error { + s.mu.RLock() + defer s.mu.RUnlock() + if val, ok := s.subscriptions[topic]; ok { + return val.Listen() + } + return nil +} + +// closes the gossip manager +func (s *GossipManager) Close() { + s.mu.Lock() + defer s.mu.Unlock() + for _, val := range s.subscriptions { + val.Close() + } + close(s.ch) +} +func (s *GossipManager) String() string { + sb := new(strings.Builder) + s.mu.RLock() + for _, v := range s.subscriptions { + sb.Write([]byte(v.topic.String())) + sb.WriteString("=") + sb.WriteString(strconv.Itoa(len(v.topic.ListPeers()))) + sb.WriteString(" ") + } + s.mu.RUnlock() + return sb.String() +} +func (s *Sentinel) SubscribeGossip(topic GossipTopic, opts ...pubsub.TopicOpt) (sub *GossipSubscription, err error) { + sub = &GossipSubscription{ + gossip_topic: topic, + ch: s.subManager.ch, + host: s.host.ID(), + ctx: s.ctx, + } + path := s.getTopic(topic) + sub.topic, err = s.pubsub.Join(path, opts...) + if err != nil { + return nil, fmt.Errorf("failed to join topic %s, err=%w", path, err) + } + s.subManager.mu.Lock() + s.subManager.subscriptions[path] = sub + s.subManager.mu.Unlock() + return sub, nil +} + +func (s *Sentinel) LogTopicPeers() { + log.Info("[Gossip] Network Update", "topic peers", s.subManager.String()) +} + +func (s *Sentinel) getTopic(topic GossipTopic) string { + o, err := fork.ComputeForkDigest(s.cfg.BeaconConfig, s.cfg.GenesisConfig) + if err != nil { + log.Error("[Gossip] Failed to calculate fork choice", "err", err) + } + return fmt.Sprintf("/eth2/%x/%s/%s", o, topic.Name, topic.CodecStr) +} diff --git a/cmd/lightclient/sentinel/request.go b/cmd/lightclient/sentinel/request.go new file mode 100644 index 00000000000..995d878ee4a --- /dev/null +++ b/cmd/lightclient/sentinel/request.go @@ -0,0 +1,142 @@ +/* + Copyright 2022 Erigon-Lightclient contributors + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + http://www.apache.org/licenses/LICENSE-2.0 + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package sentinel + +import ( + "fmt" + "reflect" + "time" + + "github.com/ledgerwatch/erigon/cmd/lightclient/clparams" + "github.com/ledgerwatch/erigon/cmd/lightclient/rpc/lightrpc" + "github.com/ledgerwatch/erigon/cmd/lightclient/sentinel/communication" + "github.com/ledgerwatch/erigon/cmd/lightclient/sentinel/communication/p2p" + "github.com/ledgerwatch/erigon/cmd/lightclient/sentinel/communication/ssz_snappy" + "github.com/ledgerwatch/erigon/cmd/lightclient/sentinel/handlers" + "github.com/ledgerwatch/log/v3" + "github.com/libp2p/go-libp2p/core/peer" + "github.com/libp2p/go-libp2p/core/protocol" +) + +func (s *Sentinel) SendPingReqV1() (communication.Packet, error) { + requestPacket := &p2p.Ping{ + Id: s.metadataV1.SeqNumber, + } + responsePacket := &p2p.Ping{} + return sendRequest(s, requestPacket, responsePacket, handlers.PingProtocolV1) +} + +func (s *Sentinel) SendMetadataReqV1() (communication.Packet, error) { + requestPacket := &lightrpc.MetadataV1{} + responsePacket := &lightrpc.MetadataV1{} + + return sendRequest(s, requestPacket, responsePacket, handlers.MetadataProtocolV1) +} + +// TODO: add the rest of the request topics + +func sendRequest(s *Sentinel, requestPacket communication.Packet, responsePacket communication.Packet, topic string) (communication.Packet, error) { + _, peerInfo, err := connectToRandomPeer(s) + if err != nil { + return nil, fmt.Errorf("failed to connect to a random peer err=%s", err) + } + + peerId := peerInfo.ID + + reqRetryTimer := time.NewTimer(clparams.ReqTimeout) + defer reqRetryTimer.Stop() + + retryTicker := time.NewTicker(10 * time.Millisecond) + defer retryTicker.Stop() + + sc, err := writeRequest(s, requestPacket, peerId, topic) + for err != nil { + select { + case <-s.ctx.Done(): + log.Warn("[Req] sentinel has been shut down") + return nil, nil + case <-reqRetryTimer.C: + log.Debug("[Req] timeout", "topic", topic, "peer", peerId) + return nil, err + case <-retryTicker.C: + sc, err = writeRequest(s, requestPacket, peerId, topic) + } + } + + defer sc.Close() + log.Debug("[Req] sent request", "topic", topic, "peer", peerId) + + respRetryTimer := time.NewTimer(clparams.RespTimeout) + defer respRetryTimer.Stop() + + responsePacket, err = decodeResponse(sc, responsePacket, peerId) + for err != nil { + select { + case <-s.ctx.Done(): + log.Warn("[Resp] sentinel has been shutdown") + return nil, nil + case <-respRetryTimer.C: + log.Debug("[Resp] timeout", "topic", topic, "peer", peerId) + return nil, err + case <-retryTicker.C: + responsePacket, err = decodeResponse(sc, responsePacket, peerId) + } + } + + return responsePacket, nil +} + +func writeRequest(s *Sentinel, requestPacket communication.Packet, peerId peer.ID, topic string) (communication.StreamCodec, error) { + stream, err := s.host.NewStream(s.ctx, peerId, protocol.ID(topic)) + if err != nil { + return nil, fmt.Errorf("failed to begin stream, err=%s", err) + } + + sc := ssz_snappy.NewStreamCodec(stream) + + if _, err := sc.WritePacket(requestPacket); err != nil { + return nil, fmt.Errorf("failed to write packet type=%s, err=%s", reflect.TypeOf(requestPacket), err) + } + + if err := sc.CloseWriter(); err != nil { + return nil, fmt.Errorf("failed to close write stream, err=%s", err) + } + + return sc, nil +} + +func decodeResponse(sc communication.StreamCodec, responsePacket communication.Packet, peerId peer.ID) (communication.Packet, error) { + code, err := sc.ReadByte() + if err != nil { + return nil, fmt.Errorf("failed to read code byte peer=%s, err=%s", peerId, err) + } + + if code != 0 { + errPacket := &communication.ErrorMessage{} + protoCtx, err := sc.Decode(errPacket) + if err != nil { + return nil, fmt.Errorf("failed to decode error packet got=%s, err=%s", string(protoCtx.Raw), err) + } + log.Debug("[Resp] got error packet", "error-message", string(errPacket.Message), "peer", peerId) + return errPacket, nil + } + + protoCtx, err := sc.Decode(responsePacket) + if err != nil { + return nil, fmt.Errorf("failed to decode packet got=%s, err=%s", string(protoCtx.Raw), err) + } + log.Debug("[Resp] got response from", "response", responsePacket, "peer", peerId) + + return responsePacket, nil +} diff --git a/cmd/lightclient/sentinel/sentinel.go b/cmd/lightclient/sentinel/sentinel.go new file mode 100644 index 00000000000..e7d821f97bc --- /dev/null +++ b/cmd/lightclient/sentinel/sentinel.go @@ -0,0 +1,250 @@ +/* + Copyright 2022 Erigon-Lightclient contributors + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + http://www.apache.org/licenses/LICENSE-2.0 + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package sentinel + +import ( + "context" + "crypto/ecdsa" + "fmt" + "net" + "strings" + + "github.com/ledgerwatch/erigon/cmd/lightclient/fork" + "github.com/ledgerwatch/erigon/cmd/lightclient/rpc/lightrpc" + "github.com/ledgerwatch/erigon/cmd/lightclient/sentinel/communication" + "github.com/ledgerwatch/erigon/cmd/lightclient/sentinel/handlers" + "github.com/ledgerwatch/erigon/cmd/lightclient/sentinel/peers" + "github.com/ledgerwatch/erigon/crypto" + "github.com/ledgerwatch/erigon/p2p/discover" + "github.com/ledgerwatch/erigon/p2p/enode" + "github.com/ledgerwatch/erigon/p2p/enr" + "github.com/ledgerwatch/log/v3" + "github.com/libp2p/go-libp2p" + pubsub "github.com/libp2p/go-libp2p-pubsub" + "github.com/libp2p/go-libp2p/core/host" + "github.com/libp2p/go-libp2p/p2p/protocol/identify" + "github.com/pkg/errors" +) + +type Sentinel struct { + started bool + listener *discover.UDPv5 // this is us in the network. + ctx context.Context + host host.Host + cfg *SentinelConfig + peers *peers.Peers + metadataV1 *lightrpc.MetadataV1 + + discoverConfig discover.Config + pubsub *pubsub.PubSub + subManager *GossipManager +} + +func (s *Sentinel) createLocalNode( + privKey *ecdsa.PrivateKey, + ipAddr net.IP, + udpPort, tcpPort int, +) (*enode.LocalNode, error) { + db, err := enode.OpenDB("") + if err != nil { + return nil, errors.Wrap(err, "could not open node's peer database") + } + localNode := enode.NewLocalNode(db, privKey) + + ipEntry := enr.IP(ipAddr) + udpEntry := enr.UDP(udpPort) + tcpEntry := enr.TCP(tcpPort) + + localNode.Set(ipEntry) + localNode.Set(udpEntry) + localNode.Set(tcpEntry) + + localNode.SetFallbackIP(ipAddr) + localNode.SetFallbackUDP(udpPort) + s.setupENR(localNode) + + return localNode, nil +} + +func (s *Sentinel) createListener() (*discover.UDPv5, error) { + var ( + ipAddr = s.cfg.IpAddr + port = s.cfg.Port + discCfg = s.discoverConfig + ) + + ip := net.ParseIP(ipAddr) + if ip.To4() == nil { + return nil, fmt.Errorf("IPV4 address not provided instead %s was provided", ipAddr) + } + + var bindIP net.IP + var networkVersion string + + // check for our network version + switch { + // if we have 16 byte and 4 byte representation then we are in using udp6 + case ip.To16() != nil && ip.To4() == nil: + bindIP = net.IPv6zero + networkVersion = "udp6" + // only 4 bytes then we are using udp4 + case ip.To4() != nil: + bindIP = net.IPv4zero + networkVersion = "udp4" + default: + return nil, fmt.Errorf("bad ip address provided, %s was provided", ipAddr) + } + + udpAddr := &net.UDPAddr{ + IP: bindIP, + Port: port, + } + conn, err := net.ListenUDP(networkVersion, udpAddr) + if err != nil { + return nil, err + } + + localNode, err := s.createLocalNode(discCfg.PrivateKey, ip, port, int(s.cfg.TCPPort)) + if err != nil { + return nil, err + } + + // TODO: Set up proper attestation number + s.metadataV1 = &lightrpc.MetadataV1{ + SeqNumber: localNode.Seq(), + Attnets: 0, + } + + // Start stream handlers + handlers.NewConsensusHandlers(s.host, s.peers, s.metadataV1).Start() + + net, err := discover.ListenV5(s.ctx, conn, localNode, discCfg) + if err != nil { + return nil, err + } + return net, err +} + +func (s *Sentinel) pubsubOptions() []pubsub.Option { + pubsubQueueSize := 600 + gsp := pubsub.DefaultGossipSubParams() + psOpts := []pubsub.Option{ + pubsub.WithMessageSignaturePolicy(pubsub.StrictNoSign), + pubsub.WithMessageIdFn(fork.MsgID), + pubsub.WithNoAuthor(), + pubsub.WithSubscriptionFilter(nil), + pubsub.WithPeerOutboundQueueSize(pubsubQueueSize), + pubsub.WithMaxMessageSize(int(s.cfg.NetworkConfig.GossipMaxSize)), + pubsub.WithValidateQueueSize(pubsubQueueSize), + pubsub.WithGossipSubParams(gsp), + } + return psOpts +} + +// This is just one of the examples from the libp2p repository. +func New( + ctx context.Context, + cfg *SentinelConfig, +) (*Sentinel, error) { + s := &Sentinel{ + ctx: ctx, + cfg: cfg, + } + + // Setup discovery + enodes := make([]*enode.Node, len(cfg.NetworkConfig.BootNodes)) + for i, bootnode := range cfg.NetworkConfig.BootNodes { + newNode, err := enode.Parse(enode.ValidSchemes, bootnode) + if err != nil { + return nil, err + } + enodes[i] = newNode + } + privateKey, err := crypto.GenerateKey() + if err != nil { + return nil, err + } + s.discoverConfig = discover.Config{ + PrivateKey: privateKey, + Bootnodes: enodes, + } + + opts, err := buildOptions(cfg, s) + if err != nil { + return nil, err + } + + host, err := libp2p.New(opts...) + if err != nil { + return nil, err + } + + host.RemoveStreamHandler(identify.IDDelta) + s.host = host + s.peers = peers.New(s.host) + + s.pubsub, err = pubsub.NewGossipSub(s.ctx, s.host, s.pubsubOptions()...) + if err != nil { + return nil, fmt.Errorf("[Sentinel] failed to subscribe to gossip err=%w", err) + } + + return s, nil +} + +func (s *Sentinel) RecvGossip() <-chan *communication.GossipContext { + return s.subManager.Recv() +} + +func (s *Sentinel) Start( +// potentially we can put the req/resp handler here as well? +) error { + if s.started { + log.Warn("Sentinel already running") + } + var err error + s.listener, err = s.createListener() + if err != nil { + return fmt.Errorf("failed creating sentinel listener err=%w", err) + } + + if err := s.connectToBootnodes(); err != nil { + return fmt.Errorf("failed to connect to bootnodes err=%w", err) + } + go s.listenForPeers() + s.subManager = NewGossipManager(s.ctx) + return nil +} + +func (s *Sentinel) String() string { + return s.listener.Self().String() +} + +func (s *Sentinel) HasTooManyPeers() bool { + return len(s.host.Network().Peers()) >= peers.DefaultMaxPeers +} + +func (s *Sentinel) GetPeersCount() int { + // Check how many peers are subscribed to beacon block + var sub *GossipSubscription + for topic, currSub := range s.subManager.subscriptions { + if strings.Contains(topic, string(BeaconBlockTopic)) { + sub = currSub + } + } + + if sub == nil { + return len(s.host.Network().Peers()) + } + return len(sub.topic.ListPeers()) +} diff --git a/cmd/lightclient/sentinel/service/notifiers.go b/cmd/lightclient/sentinel/service/notifiers.go new file mode 100644 index 00000000000..a435a1d0424 --- /dev/null +++ b/cmd/lightclient/sentinel/service/notifiers.go @@ -0,0 +1,65 @@ +package service + +import ( + "fmt" + "sync" + + "github.com/ledgerwatch/erigon/cmd/lightclient/rpc/lightrpc" +) + +const ( + maxSubscribers = 100 // only 100 lightclients per sentinel +) + +type gossipObject struct { + data []byte // gossip data + t lightrpc.GossipType // determine which gossip message we are notifying of +} + +type gossipNotifier struct { + notifiers []chan gossipObject + + mu sync.Mutex +} + +func newGossipNotifier() *gossipNotifier { + return &gossipNotifier{ + notifiers: make([]chan gossipObject, 0, maxSubscribers), + } +} + +func (g *gossipNotifier) notify(t lightrpc.GossipType, data []byte) { + g.mu.Lock() + defer g.mu.Unlock() + + for _, ch := range g.notifiers { + ch <- gossipObject{ + data: data, + t: t, + } + } +} + +func (g *gossipNotifier) addSubscriber() (chan gossipObject, int, error) { + g.mu.Lock() + defer g.mu.Unlock() + + if len(g.notifiers) >= maxSubscribers { + return nil, -1, fmt.Errorf("too many subsribers, try again later") + } + ch := make(chan gossipObject) + g.notifiers = append(g.notifiers, ch) + return ch, len(g.notifiers) - 1, nil +} + +func (g *gossipNotifier) removeSubscriber(id int) error { + g.mu.Lock() + defer g.mu.Unlock() + + if len(g.notifiers) <= id { + return fmt.Errorf("invalid id, no subscription exist with this id") + } + close(g.notifiers[id]) + g.notifiers = append(g.notifiers[:id], g.notifiers[id+1:]...) + return nil +} diff --git a/cmd/lightclient/sentinel/service/service.go b/cmd/lightclient/sentinel/service/service.go new file mode 100644 index 00000000000..d8fc8557a7f --- /dev/null +++ b/cmd/lightclient/sentinel/service/service.go @@ -0,0 +1,87 @@ +package service + +import ( + "context" + + ssz "github.com/ferranbt/fastssz" + "github.com/ledgerwatch/erigon/cmd/lightclient/rpc/lightrpc" + "github.com/ledgerwatch/erigon/cmd/lightclient/sentinel" + "github.com/ledgerwatch/erigon/cmd/lightclient/sentinel/communication" + "github.com/ledgerwatch/log/v3" +) + +type SentinelServer struct { + lightrpc.UnimplementedSentinelServer + + ctx context.Context + sentinel *sentinel.Sentinel + gossipNotifier *gossipNotifier +} + +func NewSentinelServer(ctx context.Context, sentinel *sentinel.Sentinel) *SentinelServer { + return &SentinelServer{ + sentinel: sentinel, + ctx: ctx, + gossipNotifier: newGossipNotifier(), + } +} + +func (s *SentinelServer) SubscribeGossip(_ *lightrpc.GossipRequest, stream lightrpc.Sentinel_SubscribeGossipServer) error { + // first of all subscribe + ch, subId, err := s.gossipNotifier.addSubscriber() + if err != nil { + return err + } + defer s.gossipNotifier.removeSubscriber(subId) + + for { + select { + // Exit on stream context done + case <-stream.Context().Done(): + return nil + case packet := <-ch: + if err := stream.Send(&lightrpc.GossipData{ + Data: packet.data, + Type: packet.t, + }); err != nil { + log.Warn("Could not relay gossip packet", "reason", err) + } + } + } +} + +func (s *SentinelServer) ListenToGossip() { + for { + select { + case pkt := <-s.sentinel.RecvGossip(): + s.handleGossipPacket(pkt) + case <-s.ctx.Done(): + return + } + } +} + +func (s *SentinelServer) handleGossipPacket(pkt *communication.GossipContext) error { + log.Trace("[Gossip] Received Packet", "topic", pkt.Topic) + err := pkt.Codec.WritePacket(context.TODO(), pkt.Packet) + if err != nil { + log.Warn("[Gossip] Error Forwarding Packet", "err", err) + } + // Compute data + u := pkt.Packet.(ssz.Marshaler) + var data []byte + // Make data + if data, err = u.MarshalSSZ(); err != nil { + return err + } + switch pkt.Packet.(type) { + case *lightrpc.SignedBeaconBlockBellatrix: + s.gossipNotifier.notify(lightrpc.GossipType_BeaconBlockGossipType, data) + case *lightrpc.LightClientFinalityUpdate: + s.gossipNotifier.notify(lightrpc.GossipType_LightClientFinalityUpdateGossipType, data) + case *lightrpc.LightClientOptimisticUpdate: + s.gossipNotifier.notify(lightrpc.GossipType_LightClientOptimisticUpdateGossipType, data) + default: + } + return nil +} diff --git a/cmd/lightclient/sentinel/service/start.go b/cmd/lightclient/sentinel/service/start.go new file mode 100644 index 00000000000..a22513b5736 --- /dev/null +++ b/cmd/lightclient/sentinel/service/start.go @@ -0,0 +1,74 @@ +package service + +import ( + "context" + "net" + "time" + + "github.com/ledgerwatch/erigon/cmd/lightclient/rpc/lightrpc" + "github.com/ledgerwatch/erigon/cmd/lightclient/sentinel" + "github.com/ledgerwatch/log/v3" + "google.golang.org/grpc" + "google.golang.org/grpc/credentials/insecure" +) + +type ServerConfig struct { + Network string + Addr string +} + +func StartSentinelService(cfg *sentinel.SentinelConfig, srvCfg *ServerConfig) (lightrpc.SentinelClient, error) { + ctx := context.Background() + sent, err := sentinel.New(context.Background(), cfg) + if err != nil { + return nil, err + } + if err := sent.Start(); err != nil { + return nil, err + } + gossip_topics := []sentinel.GossipTopic{ + sentinel.BeaconBlockSsz, + sentinel.LightClientFinalityUpdateSsz, + sentinel.LightClientOptimisticUpdateSsz, + } + for _, v := range gossip_topics { + // now lets separately connect to the gossip topics. this joins the room + subscriber, err := sent.SubscribeGossip(v) + if err != nil { + log.Error("failed to start sentinel", "err", err) + } + // actually start the subscription, ala listening and sending packets to the sentinel recv channel + err = subscriber.Listen() + if err != nil { + log.Error("failed to start sentinel", "err", err) + } + } + log.Info("Sentinel started", "enr", sent.String()) + + server := NewSentinelServer(ctx, sent) + go StartServe(server, srvCfg) + // Wait a bit for the serving (TODO: make it better, this is ugly) + time.Sleep(5 * time.Second) + + conn, err := grpc.DialContext(ctx, srvCfg.Addr, grpc.WithTransportCredentials(insecure.NewCredentials())) + if err != nil { + return nil, err + } + + return lightrpc.NewSentinelClient(conn), nil +} + +func StartServe(server *SentinelServer, srvCfg *ServerConfig) { + lis, err := net.Listen(srvCfg.Network, srvCfg.Addr) + if err != nil { + log.Warn("[Sentinel] could not serve service", "reason", err) + } + // Create a gRPC server + gRPCserver := grpc.NewServer() + go server.ListenToGossip() + // Regiser our server as a gRPC server + lightrpc.RegisterSentinelServer(gRPCserver, server) + if err := gRPCserver.Serve(lis); err != nil { + log.Warn("[Sentinel] could not serve service", "reason", err) + } +} diff --git a/cmd/lightclient/sentinel/utils.go b/cmd/lightclient/sentinel/utils.go new file mode 100644 index 00000000000..c7cc8fb5ab2 --- /dev/null +++ b/cmd/lightclient/sentinel/utils.go @@ -0,0 +1,133 @@ +/* + Copyright 2022 Erigon-Lightclient contributors + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + http://www.apache.org/licenses/LICENSE-2.0 + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package sentinel + +import ( + "crypto/ecdsa" + "fmt" + "net" + + "github.com/btcsuite/btcd/btcec/v2" + "github.com/ledgerwatch/erigon/p2p/enode" + "github.com/ledgerwatch/log/v3" + "github.com/libp2p/go-libp2p-core/crypto" + "github.com/libp2p/go-libp2p-core/peer" + "github.com/multiformats/go-multiaddr" + "github.com/pkg/errors" +) + +func convertToInterfacePubkey(pubkey *ecdsa.PublicKey) (crypto.PubKey, error) { + xVal, yVal := new(btcec.FieldVal), new(btcec.FieldVal) + overflows := xVal.SetByteSlice(pubkey.X.Bytes()) + if overflows { + return nil, errors.Errorf("X value overflows") + } + overflows = yVal.SetByteSlice(pubkey.Y.Bytes()) + if overflows { + return nil, errors.Errorf("Y value overflows") + } + newKey := crypto.PubKey((*crypto.Secp256k1PublicKey)(btcec.NewPublicKey(xVal, yVal))) + // Zero out temporary values. + xVal.Zero() + yVal.Zero() + return newKey, nil +} + +func convertToAddrInfo(node *enode.Node) (*peer.AddrInfo, multiaddr.Multiaddr, error) { + multiAddr, err := convertToSingleMultiAddr(node) + if err != nil { + return nil, nil, err + } + info, err := peer.AddrInfoFromP2pAddr(multiAddr) + if err != nil { + return nil, nil, err + } + return info, multiAddr, nil +} + +func convertToSingleMultiAddr(node *enode.Node) (multiaddr.Multiaddr, error) { + pubkey := node.Pubkey() + assertedKey, err := convertToInterfacePubkey(pubkey) + if err != nil { + return nil, errors.Wrap(err, "could not get pubkey") + } + id, err := peer.IDFromPublicKey(assertedKey) + if err != nil { + return nil, errors.Wrap(err, "could not get peer id") + } + return multiAddressBuilderWithID(node.IP().String(), "tcp", uint(node.TCP()), id) +} + +func multiAddressBuilderWithID(ipAddr, protocol string, port uint, id peer.ID) (multiaddr.Multiaddr, error) { + parsedIP := net.ParseIP(ipAddr) + if parsedIP.To4() == nil && parsedIP.To16() == nil { + return nil, errors.Errorf("invalid ip address provided: %s", ipAddr) + } + if id.String() == "" { + return nil, errors.New("empty peer id given") + } + if parsedIP.To4() != nil { + return multiaddr.NewMultiaddr(fmt.Sprintf("/ip4/%s/%s/%d/p2p/%s", ipAddr, protocol, port, id.String())) + } + return multiaddr.NewMultiaddr(fmt.Sprintf("/ip6/%s/%s/%d/p2p/%s", ipAddr, protocol, port, id.String())) +} + +func convertToMultiAddr(nodes []*enode.Node) []multiaddr.Multiaddr { + multiAddrs := []multiaddr.Multiaddr{} + for _, node := range nodes { + // ignore nodes with no ip address stored + if node.IP() == nil { + continue + } + multiAddr, err := convertToSingleMultiAddr(node) + if err != nil { + log.Debug("Could not convert to multiAddr", "err", err) + continue + } + multiAddrs = append(multiAddrs, multiAddr) + } + return multiAddrs +} + +// will iterate onto randoms nodes until our sentinel connects to one +func connectToRandomPeer(s *Sentinel) (node *enode.Node, peerInfo *peer.AddrInfo, err error) { + iterator := s.listener.RandomNodes() + defer iterator.Close() + + connectedPeer := false + for !connectedPeer { + + if exists := iterator.Next(); !exists { + break + } + + node = iterator.Node() + peerInfo, _, err = convertToAddrInfo(node) + if err != nil { + return nil, nil, fmt.Errorf("error converting to addres info, err=%s", err) + } + + if err := s.connectWithPeer(s.ctx, *peerInfo); err != nil { + log.Debug("couldn't connect to peer", "err", err) + continue + } + connectedPeer = true + } + + if !connectedPeer { + return nil, nil, fmt.Errorf("failed to connect to peer") + } + + return node, peerInfo, nil +} diff --git a/cmd/lightclient/utils/bytes.go b/cmd/lightclient/utils/bytes.go new file mode 100644 index 00000000000..45b9a2f5558 --- /dev/null +++ b/cmd/lightclient/utils/bytes.go @@ -0,0 +1,102 @@ +/* + Copyright 2022 Erigon-Lightclient contributors + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + http://www.apache.org/licenses/LICENSE-2.0 + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package utils + +import ( + "encoding/binary" + "encoding/hex" + + ssz "github.com/ferranbt/fastssz" + "github.com/golang/snappy" +) + +func Uint32ToBytes4(n uint32) (ret [4]byte) { + binary.BigEndian.PutUint32(ret[:], n) + return +} + +func BytesToBytes4(b []byte) (ret [4]byte) { + copy(ret[:], b) + return +} + +func BytesToHex(b []byte) string { + return hex.EncodeToString(b) +} + +func DecompressSnappy(data []byte) ([]byte, error) { + // Decode the snappy + lenDecoded, err := snappy.DecodedLen(data) + if err != nil { + return nil, err + } + decodedData := make([]byte, lenDecoded) + + snappy.Decode(decodedData, data) + return decodedData, nil +} + +func CompressSnappy(data []byte) ([]byte, error) { + // Decode the snappy + lenDecoded, err := snappy.DecodedLen(data) + if err != nil { + return nil, err + } + decodedData := make([]byte, lenDecoded) + + snappy.Decode(decodedData, data) + return decodedData, nil +} + +func Uint64ToLE(i uint64) []byte { + buf := make([]byte, 8) + binary.LittleEndian.PutUint64(buf, i) + return buf +} + +func BytesToBytes32(b []byte) (ret [32]byte) { + copy(ret[:], b) + return +} + +func BytesSliceToBytes32Slice(b [][]byte) (ret [][32]byte) { + for _, str := range b { + ret = append(ret, BytesToBytes32(str)) + } + return +} + +func EncodeSSZSnappy(data ssz.Marshaler) ([]byte, error) { + enc := make([]byte, data.SizeSSZ()) + enc, err := data.MarshalSSZTo(enc[:0]) + if err != nil { + return nil, err + } + + return snappy.Encode(nil, enc), nil +} + +func DecodeSSZSnappy(dst ssz.Unmarshaler, src []byte) error { + dec, err := snappy.Decode(nil, src) + if err != nil { + return err + } + + err = dst.UnmarshalSSZ(dec) + if err != nil { + return err + } + + return nil +} diff --git a/cmd/lightclient/utils/crypto.go b/cmd/lightclient/utils/crypto.go new file mode 100644 index 00000000000..99e586131db --- /dev/null +++ b/cmd/lightclient/utils/crypto.go @@ -0,0 +1,42 @@ +/* + Copyright 2022 Erigon-Lightclient contributors + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + http://www.apache.org/licenses/LICENSE-2.0 + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package utils + +import ( + "crypto/sha256" + "hash" + "sync" +) + +var hasherPool = sync.Pool{ + New: func() interface{} { + return sha256.New() + }, +} + +func Keccak256(data []byte) [32]byte { + h, ok := hasherPool.Get().(hash.Hash) + if !ok { + h = sha256.New() + } + defer hasherPool.Put(h) + h.Reset() + + var b [32]byte + + h.Write(data) + h.Sum(b[:0]) + + return b +} diff --git a/cmd/lightclient/utils/time.go b/cmd/lightclient/utils/time.go new file mode 100644 index 00000000000..f25a7357398 --- /dev/null +++ b/cmd/lightclient/utils/time.go @@ -0,0 +1,36 @@ +/* + Copyright 2022 Erigon-Lightclient contributors + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + http://www.apache.org/licenses/LICENSE-2.0 + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package utils + +import "time" + +// compute current slot. +func GetCurrentSlot(genesisTime uint64, secondsPerSlot uint64) uint64 { + now := uint64(time.Now().Unix()) + if now < genesisTime { + return 0 + } + + return (now - genesisTime) / secondsPerSlot +} + +// compute current epoch. +func GetCurrentEpoch(genesisTime uint64, secondsPerSlot uint64, slotsPerEpoch uint64) uint64 { + now := uint64(time.Now().Unix()) + if now < genesisTime { + return 0 + } + + return GetCurrentSlot(genesisTime, secondsPerSlot) / slotsPerEpoch +} diff --git a/cmd/observer/database/db_retrier.go b/cmd/observer/database/db_retrier.go index 09674f2b2df..4158dba5ae9 100644 --- a/cmd/observer/database/db_retrier.go +++ b/cmd/observer/database/db_retrier.go @@ -2,10 +2,11 @@ package database import ( "context" - "github.com/ledgerwatch/erigon/cmd/observer/utils" - "github.com/ledgerwatch/log/v3" "math/rand" "time" + + "github.com/ledgerwatch/erigon/cmd/observer/utils" + "github.com/ledgerwatch/log/v3" ) type DBRetrier struct { @@ -21,7 +22,7 @@ func retryBackoffTime(attempt int) time.Duration { if attempt <= 0 { return 0 } - jitter := rand.Int63n(30 * time.Millisecond.Nanoseconds() * int64(attempt)) + jitter := rand.Int63n(30 * time.Millisecond.Nanoseconds() * int64(attempt)) // nolint: gosec var ns int64 if attempt <= 6 { ns = ((50 * time.Millisecond.Nanoseconds()) << (attempt - 1)) + jitter diff --git a/cmd/observer/database/db_sqlite.go b/cmd/observer/database/db_sqlite.go index b59e33974f6..0823b3715ac 100644 --- a/cmd/observer/database/db_sqlite.go +++ b/cmd/observer/database/db_sqlite.go @@ -5,10 +5,11 @@ import ( "database/sql" "errors" "fmt" - _ "modernc.org/sqlite" "net" "strings" "time" + + _ "modernc.org/sqlite" ) type DBSQLite struct { diff --git a/cmd/observer/database/db_sqlite_test.go b/cmd/observer/database/db_sqlite_test.go index c43e39f15be..fdb18c767f1 100644 --- a/cmd/observer/database/db_sqlite_test.go +++ b/cmd/observer/database/db_sqlite_test.go @@ -2,11 +2,12 @@ package database import ( "context" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" "net" "path/filepath" "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" ) func TestDBSQLiteInsertAndFind(t *testing.T) { diff --git a/cmd/observer/main.go b/cmd/observer/main.go index 7063beb4219..5173d4828e1 100644 --- a/cmd/observer/main.go +++ b/cmd/observer/main.go @@ -4,6 +4,8 @@ import ( "context" "errors" "fmt" + "path/filepath" + "github.com/ledgerwatch/erigon-lib/common" "github.com/ledgerwatch/erigon/cmd/observer/database" "github.com/ledgerwatch/erigon/cmd/observer/observer" @@ -11,7 +13,6 @@ import ( "github.com/ledgerwatch/erigon/cmd/utils" "github.com/ledgerwatch/erigon/params" "github.com/ledgerwatch/log/v3" - "path/filepath" ) func mainWithFlags(ctx context.Context, flags observer.CommandFlags) error { diff --git a/cmd/observer/observer/command.go b/cmd/observer/observer/command.go index 24d1395483d..3686987c5ed 100644 --- a/cmd/observer/observer/command.go +++ b/cmd/observer/observer/command.go @@ -3,12 +3,13 @@ package observer import ( "context" "errors" + "runtime" + "time" + "github.com/ledgerwatch/erigon/cmd/utils" "github.com/ledgerwatch/erigon/internal/debug" "github.com/spf13/cobra" "github.com/urfave/cli" - "runtime" - "time" ) type CommandFlags struct { diff --git a/cmd/observer/observer/diplomacy.go b/cmd/observer/observer/diplomacy.go index 18be20f4fab..836c0e57a26 100644 --- a/cmd/observer/observer/diplomacy.go +++ b/cmd/observer/observer/diplomacy.go @@ -5,13 +5,14 @@ import ( "crypto/ecdsa" "errors" "fmt" + "sync/atomic" + "time" + "github.com/ledgerwatch/erigon/cmd/observer/database" "github.com/ledgerwatch/erigon/cmd/observer/observer/node_utils" "github.com/ledgerwatch/erigon/cmd/observer/utils" "github.com/ledgerwatch/log/v3" "golang.org/x/sync/semaphore" - "sync/atomic" - "time" ) type Diplomacy struct { diff --git a/cmd/observer/observer/diplomat.go b/cmd/observer/observer/diplomat.go index 9812b3bc037..b75757ef304 100644 --- a/cmd/observer/observer/diplomat.go +++ b/cmd/observer/observer/diplomat.go @@ -4,11 +4,12 @@ import ( "context" "crypto/ecdsa" "errors" + "time" + "github.com/ledgerwatch/erigon/cmd/observer/database" "github.com/ledgerwatch/erigon/p2p" "github.com/ledgerwatch/erigon/p2p/enode" "github.com/ledgerwatch/log/v3" - "time" ) type Diplomat struct { diff --git a/cmd/observer/observer/handshake_test.go b/cmd/observer/observer/handshake_test.go index 2691cd24006..92dc7c12f08 100644 --- a/cmd/observer/observer/handshake_test.go +++ b/cmd/observer/observer/handshake_test.go @@ -2,13 +2,14 @@ package observer import ( "context" + "testing" + "github.com/ledgerwatch/erigon/crypto" "github.com/ledgerwatch/erigon/eth/protocols/eth" "github.com/ledgerwatch/erigon/p2p/enode" "github.com/ledgerwatch/erigon/params" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" - "testing" ) func TestHandshake(t *testing.T) { diff --git a/cmd/observer/observer/interrogator.go b/cmd/observer/observer/interrogator.go index a66bd811600..c1f9e83a6b2 100644 --- a/cmd/observer/observer/interrogator.go +++ b/cmd/observer/observer/interrogator.go @@ -5,14 +5,15 @@ import ( "crypto/ecdsa" "errors" "fmt" + "strings" + "time" + "github.com/ledgerwatch/erigon/cmd/observer/utils" "github.com/ledgerwatch/erigon/core/forkid" "github.com/ledgerwatch/erigon/eth/protocols/eth" "github.com/ledgerwatch/erigon/p2p/enode" "github.com/ledgerwatch/log/v3" "golang.org/x/sync/semaphore" - "strings" - "time" ) type DiscV4Transport interface { diff --git a/cmd/observer/observer/keygen.go b/cmd/observer/observer/keygen.go index 5de9bc3f9c0..525a8ad5aaf 100644 --- a/cmd/observer/observer/keygen.go +++ b/cmd/observer/observer/keygen.go @@ -3,10 +3,11 @@ package observer import ( "context" "crypto/ecdsa" + "time" + "github.com/ledgerwatch/erigon/crypto" "github.com/ledgerwatch/erigon/p2p/enode" "github.com/ledgerwatch/log/v3" - "time" ) func keygen( diff --git a/cmd/observer/observer/keygen_test.go b/cmd/observer/observer/keygen_test.go index b57e56b0d23..c2d0b0fb528 100644 --- a/cmd/observer/observer/keygen_test.go +++ b/cmd/observer/observer/keygen_test.go @@ -2,12 +2,13 @@ package observer import ( "context" - "github.com/ledgerwatch/erigon/crypto" - "github.com/ledgerwatch/log/v3" - "github.com/stretchr/testify/assert" "runtime" "testing" "time" + + "github.com/ledgerwatch/erigon/crypto" + "github.com/ledgerwatch/log/v3" + "github.com/stretchr/testify/assert" ) func TestKeygen(t *testing.T) { diff --git a/cmd/observer/observer/node_utils/node_addr.go b/cmd/observer/observer/node_utils/node_addr.go index 0e09add518c..e59b762c5fa 100644 --- a/cmd/observer/observer/node_utils/node_addr.go +++ b/cmd/observer/observer/node_utils/node_addr.go @@ -2,11 +2,12 @@ package node_utils import ( "fmt" + "net" + "github.com/ledgerwatch/erigon/cmd/observer/database" "github.com/ledgerwatch/erigon/cmd/observer/utils" "github.com/ledgerwatch/erigon/p2p/enode" "github.com/ledgerwatch/erigon/p2p/enr" - "net" ) func MakeNodeAddr(node *enode.Node) database.NodeAddr { diff --git a/cmd/observer/observer/node_utils/node_id.go b/cmd/observer/observer/node_utils/node_id.go index e7c27f0e985..ae9dedb7e8b 100644 --- a/cmd/observer/observer/node_utils/node_id.go +++ b/cmd/observer/observer/node_utils/node_id.go @@ -3,9 +3,10 @@ package node_utils import ( "errors" "fmt" + "net/url" + "github.com/ledgerwatch/erigon/cmd/observer/database" "github.com/ledgerwatch/erigon/p2p/enode" - "net/url" ) func NodeID(node *enode.Node) (database.NodeID, error) { diff --git a/cmd/observer/observer/sentry_candidates/intake.go b/cmd/observer/observer/sentry_candidates/intake.go index d6ba42fe786..05ece15cb5e 100644 --- a/cmd/observer/observer/sentry_candidates/intake.go +++ b/cmd/observer/observer/sentry_candidates/intake.go @@ -3,6 +3,8 @@ package sentry_candidates import ( "context" "fmt" + "time" + "github.com/ledgerwatch/erigon/cmd/observer/database" "github.com/ledgerwatch/erigon/cmd/observer/observer/node_utils" "github.com/ledgerwatch/erigon/cmd/observer/utils" @@ -10,7 +12,6 @@ import ( "github.com/ledgerwatch/erigon/params" "github.com/ledgerwatch/log/v3" "github.com/nxadm/tail" - "time" ) type Intake struct { diff --git a/cmd/observer/observer/sentry_candidates/log.go b/cmd/observer/observer/sentry_candidates/log.go index 71ad2a3f0ce..f8b5129de05 100644 --- a/cmd/observer/observer/sentry_candidates/log.go +++ b/cmd/observer/observer/sentry_candidates/log.go @@ -4,12 +4,13 @@ import ( "bufio" "context" "encoding/json" - "github.com/ledgerwatch/erigon/eth/protocols/eth" - "github.com/nxadm/tail" "io" "strconv" "strings" "time" + + "github.com/ledgerwatch/erigon/eth/protocols/eth" + "github.com/nxadm/tail" ) type Log struct { diff --git a/cmd/observer/observer/sentry_candidates/log_test.go b/cmd/observer/observer/sentry_candidates/log_test.go index e1ac9c391c7..3afba7a0b68 100644 --- a/cmd/observer/observer/sentry_candidates/log_test.go +++ b/cmd/observer/observer/sentry_candidates/log_test.go @@ -2,11 +2,12 @@ package sentry_candidates import ( "context" + "strings" + "testing" + "github.com/nxadm/tail" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" - "strings" - "testing" ) func TestLogRead(t *testing.T) { diff --git a/cmd/observer/observer/server.go b/cmd/observer/observer/server.go index 907b7c709a6..e89e70f08bd 100644 --- a/cmd/observer/observer/server.go +++ b/cmd/observer/observer/server.go @@ -5,6 +5,9 @@ import ( "crypto/ecdsa" "errors" "fmt" + "net" + "path/filepath" + "github.com/ledgerwatch/erigon/cmd/utils" "github.com/ledgerwatch/erigon/common/debug" "github.com/ledgerwatch/erigon/core/forkid" @@ -17,8 +20,6 @@ import ( "github.com/ledgerwatch/erigon/p2p/netutil" "github.com/ledgerwatch/erigon/params" "github.com/ledgerwatch/log/v3" - "net" - "path/filepath" ) type Server struct { diff --git a/cmd/observer/observer/status_logger.go b/cmd/observer/observer/status_logger.go index 2757ecd7ca3..0a9363d3deb 100644 --- a/cmd/observer/observer/status_logger.go +++ b/cmd/observer/observer/status_logger.go @@ -3,10 +3,11 @@ package observer import ( "context" "errors" + "time" + "github.com/ledgerwatch/erigon/cmd/observer/database" "github.com/ledgerwatch/erigon/cmd/observer/utils" "github.com/ledgerwatch/log/v3" - "time" ) func StatusLoggerLoop(ctx context.Context, db database.DB, networkID uint, period time.Duration, logger log.Logger) { diff --git a/cmd/observer/reports/clients_estimate_report.go b/cmd/observer/reports/clients_estimate_report.go index 4ec7d149bf0..9920d1f2da1 100644 --- a/cmd/observer/reports/clients_estimate_report.go +++ b/cmd/observer/reports/clients_estimate_report.go @@ -3,9 +3,10 @@ package reports import ( "context" "fmt" - "github.com/ledgerwatch/erigon/cmd/observer/database" "math" "strings" + + "github.com/ledgerwatch/erigon/cmd/observer/database" ) type ClientsEstimateReportEntry struct { diff --git a/cmd/observer/reports/clients_report.go b/cmd/observer/reports/clients_report.go index e41b82e49dd..3ce39d99eeb 100644 --- a/cmd/observer/reports/clients_report.go +++ b/cmd/observer/reports/clients_report.go @@ -3,9 +3,10 @@ package reports import ( "context" "fmt" + "strings" + "github.com/ledgerwatch/erigon/cmd/observer/database" "github.com/ledgerwatch/erigon/cmd/observer/observer" - "strings" ) type ClientsReportEntry struct { diff --git a/cmd/observer/reports/command.go b/cmd/observer/reports/command.go index bba73cbe363..eace18c0563 100644 --- a/cmd/observer/reports/command.go +++ b/cmd/observer/reports/command.go @@ -2,6 +2,7 @@ package reports import ( "context" + "github.com/ledgerwatch/erigon/cmd/utils" "github.com/spf13/cobra" "github.com/urfave/cli" diff --git a/cmd/observer/reports/sentry_candidates_report.go b/cmd/observer/reports/sentry_candidates_report.go index 29e7b7cfb0b..1c9ecc1b6f9 100644 --- a/cmd/observer/reports/sentry_candidates_report.go +++ b/cmd/observer/reports/sentry_candidates_report.go @@ -3,11 +3,12 @@ package reports import ( "context" "fmt" - "github.com/ledgerwatch/erigon/cmd/observer/database" - "github.com/ledgerwatch/erigon/cmd/observer/observer/sentry_candidates" "net/url" "os" "strings" + + "github.com/ledgerwatch/erigon/cmd/observer/database" + "github.com/ledgerwatch/erigon/cmd/observer/observer/sentry_candidates" ) type SentryCandidatesReport struct { diff --git a/cmd/observer/reports/status_report.go b/cmd/observer/reports/status_report.go index c094e9b2036..11c80063982 100644 --- a/cmd/observer/reports/status_report.go +++ b/cmd/observer/reports/status_report.go @@ -3,8 +3,9 @@ package reports import ( "context" "fmt" - "github.com/ledgerwatch/erigon/cmd/observer/database" "strings" + + "github.com/ledgerwatch/erigon/cmd/observer/database" ) type StatusReport struct { diff --git a/cmd/observer/utils/pubkey_hex.go b/cmd/observer/utils/pubkey_hex.go index 0644bba00a8..94a596f1590 100644 --- a/cmd/observer/utils/pubkey_hex.go +++ b/cmd/observer/utils/pubkey_hex.go @@ -4,6 +4,7 @@ import ( "crypto/ecdsa" "encoding/hex" "fmt" + "github.com/ledgerwatch/erigon/crypto" "github.com/ledgerwatch/erigon/p2p/enode" ) diff --git a/cmd/observer/utils/retry.go b/cmd/observer/utils/retry.go index ab745415a37..14cdebc74a0 100644 --- a/cmd/observer/utils/retry.go +++ b/cmd/observer/utils/retry.go @@ -2,8 +2,9 @@ package utils import ( "context" - "github.com/ledgerwatch/log/v3" "time" + + "github.com/ledgerwatch/log/v3" ) func Retry( diff --git a/cmd/p2psim/main.go b/cmd/p2psim/main.go index 456b0ce0b64..0031437c10f 100644 --- a/cmd/p2psim/main.go +++ b/cmd/p2psim/main.go @@ -19,21 +19,20 @@ // Here is an example of creating a 2 node network with the first node // connected to the second: // -// $ p2psim node create -// Created node01 +// $ p2psim node create +// Created node01 // -// $ p2psim node start node01 -// Started node01 +// $ p2psim node start node01 +// Started node01 // -// $ p2psim node create -// Created node02 +// $ p2psim node create +// Created node02 // -// $ p2psim node start node02 -// Started node02 -// -// $ p2psim node connect node01 node02 -// Connected node01 to node02 +// $ p2psim node start node02 +// Started node02 // +// $ p2psim node connect node01 node02 +// Connected node01 to node02 package main import ( diff --git a/cmd/prometheus/dashboards/erigon.json b/cmd/prometheus/dashboards/erigon.json index 7c4fb19ae67..489653a00aa 100644 --- a/cmd/prometheus/dashboards/erigon.json +++ b/cmd/prometheus/dashboards/erigon.json @@ -3,7 +3,10 @@ "list": [ { "builtIn": 1, - "datasource": "-- Grafana --", + "datasource": { + "type": "datasource", + "uid": "grafana" + }, "enable": true, "hide": true, "iconColor": "rgba(0, 211, 255, 1)", @@ -22,11 +25,15 @@ "fiscalYearStartMonth": 0, "graphTooltip": 0, "id": 1, - "iteration": 1642907202703, + "iteration": 1661832292076, "links": [], "liveNow": false, "panels": [ { + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, "gridPos": { "h": 1, "w": 24, @@ -34,10 +41,23 @@ "y": 0 }, "id": 171, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, + "refId": "A" + } + ], "title": "Blocks execution", "type": "row" }, { + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, "fieldConfig": { "defaults": { "color": { @@ -106,12 +126,17 @@ "placement": "bottom" }, "tooltip": { - "mode": "multi" + "mode": "multi", + "sort": "none" } }, "pluginVersion": "8.0.6", "targets": [ { + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, "exemplar": true, "expr": "sync{instance=~\"$instance\"}", "format": "time_series", @@ -125,6 +150,10 @@ "type": "timeseries" }, { + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, "fieldConfig": { "defaults": { "color": { @@ -193,12 +222,17 @@ "placement": "bottom" }, "tooltip": { - "mode": "multi" + "mode": "multi", + "sort": "none" } }, "pluginVersion": "8.0.6", "targets": [ { + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, "exemplar": true, "expr": "rate(sync{instance=~\"$instance\"}[$rate_interval])", "format": "time_series", @@ -212,6 +246,10 @@ "type": "timeseries" }, { + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, "fieldConfig": { "defaults": { "color": { @@ -281,12 +319,17 @@ "placement": "bottom" }, "tooltip": { - "mode": "multi" + "mode": "multi", + "sort": "none" } }, "pluginVersion": "8.0.6", "targets": [ { + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, "exemplar": true, "expr": "chain_execution_seconds{quantile=\"$quantile\",instance=~\"$instance\"}", "format": "time_series", @@ -301,6 +344,10 @@ }, { "collapsed": false, + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, "gridPos": { "h": 1, "w": 24, @@ -309,10 +356,23 @@ }, "id": 17, "panels": [], + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, + "refId": "A" + } + ], "title": "Database", "type": "row" }, { + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, "fieldConfig": { "defaults": { "color": { @@ -378,14 +438,19 @@ "placement": "bottom" }, "tooltip": { - "mode": "multi" + "mode": "multi", + "sort": "none" } }, "pluginVersion": "8.0.6", "targets": [ { + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, "exemplar": true, - "expr": "rate(db_commit_seconds_count{phase=\"total\",instance=~\"$instance\"}[$__rate_interval])", + "expr": "rate(db_commit_seconds_count{phase=\"total\",instance=~\"$instance\"}[$rate_interval])", "interval": "", "legendFormat": "commit: {{instance}}", "refId": "A" @@ -395,6 +460,10 @@ "type": "timeseries" }, { + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, "description": "", "fieldConfig": { "defaults": { @@ -463,12 +532,17 @@ "placement": "bottom" }, "tooltip": { - "mode": "multi" + "mode": "multi", + "sort": "none" } }, "pluginVersion": "8.0.6", "targets": [ { + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, "exemplar": true, "expr": "db_commit_seconds{phase=\"total\",quantile=\"$quantile\",instance=~\"$instance\"}", "interval": "", @@ -476,6 +550,10 @@ "refId": "A" }, { + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, "exemplar": true, "expr": "db_commit_seconds{phase=\"gc\",quantile=\"$quantile\",instance=~\"$instance\"}", "hide": false, @@ -484,6 +562,10 @@ "refId": "B" }, { + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, "exemplar": true, "expr": "db_commit_seconds{phase=\"write\",quantile=\"$quantile\",instance=~\"$instance\"}", "hide": false, @@ -492,6 +574,10 @@ "refId": "C" }, { + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, "exemplar": true, "expr": "db_commit_seconds{phase=\"sync\",quantile=\"$quantile\",instance=~\"$instance\"}", "hide": false, @@ -504,6 +590,10 @@ "type": "timeseries" }, { + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, "fieldConfig": { "defaults": { "color": { @@ -571,12 +661,17 @@ "placement": "bottom" }, "tooltip": { - "mode": "single" + "mode": "single", + "sort": "none" } }, "pluginVersion": "8.0.6", "targets": [ { + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, "expr": "db_size{instance=~\"$instance\"}", "interval": "", "legendFormat": "size: {{instance}}", @@ -587,6 +682,10 @@ "type": "timeseries" }, { + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, "fieldConfig": { "defaults": { "color": { @@ -654,12 +753,17 @@ "placement": "bottom" }, "tooltip": { - "mode": "multi" + "mode": "multi", + "sort": "none" } }, "pluginVersion": "8.0.6", "targets": [ { + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, "exemplar": true, "expr": "db_gc_leaf{instance=~\"$instance\"}", "interval": "", @@ -667,6 +771,10 @@ "refId": "A" }, { + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, "exemplar": true, "expr": "db_gc_overflow{instance=~\"$instance\"}", "hide": false, @@ -675,6 +783,10 @@ "refId": "B" }, { + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, "exemplar": true, "expr": "db_state_leaf{instance=~\"$instance\"}", "hide": false, @@ -683,6 +795,10 @@ "refId": "C" }, { + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, "exemplar": true, "expr": "db_state_branch{instance=~\"$instance\"}", "hide": false, @@ -695,6 +811,10 @@ "type": "timeseries" }, { + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, "fieldConfig": { "defaults": { "color": { @@ -762,12 +882,17 @@ "placement": "bottom" }, "tooltip": { - "mode": "multi" + "mode": "multi", + "sort": "none" } }, "pluginVersion": "8.0.6", "targets": [ { + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, "exemplar": true, "expr": "rate(db_pgops_newly{instance=~\"$instance\"}[$rate_interval])", "hide": true, @@ -776,6 +901,10 @@ "refId": "A" }, { + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, "exemplar": true, "expr": "rate(db_pgops_cow{instance=~\"$instance\"}[$rate_interval])", "hide": true, @@ -784,6 +913,10 @@ "refId": "B" }, { + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, "exemplar": true, "expr": "rate(db_pgops_clone{instance=~\"$instance\"}[$rate_interval])", "hide": false, @@ -792,6 +925,10 @@ "refId": "C" }, { + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, "exemplar": true, "expr": "rate(db_pgops_split{instance=~\"$instance\"}[$rate_interval])", "hide": false, @@ -800,6 +937,10 @@ "refId": "D" }, { + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, "exemplar": true, "expr": "rate(db_pgops_merge{instance=~\"$instance\"}[$rate_interval])", "hide": true, @@ -808,6 +949,10 @@ "refId": "E" }, { + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, "exemplar": true, "expr": "rate(db_pgops_spill{instance=~\"$instance\"}[$rate_interval])", "hide": false, @@ -816,6 +961,10 @@ "refId": "F" }, { + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, "exemplar": true, "expr": "rate(db_pgops_wops{instance=~\"$instance\"}[$rate_interval])", "hide": false, @@ -824,6 +973,10 @@ "refId": "G" }, { + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, "exemplar": true, "expr": "rate(db_pgops_unspill{instance=~\"$instance\"}[$rate_interval])", "hide": false, @@ -836,6 +989,10 @@ "type": "timeseries" }, { + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, "fieldConfig": { "defaults": { "color": { @@ -903,18 +1060,27 @@ "placement": "bottom" }, "tooltip": { - "mode": "multi" + "mode": "multi", + "sort": "none" } }, "pluginVersion": "8.0.6", "targets": [ { + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, "expr": "tx_limit{instance=~\"$instance\"}", "interval": "", "legendFormat": "limit: {{instance}}", "refId": "A" }, { + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, "expr": "tx_dirty{instance=~\"$instance\"}", "hide": false, "interval": "", @@ -927,6 +1093,10 @@ }, { "collapsed": false, + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, "gridPos": { "h": 1, "w": 24, @@ -935,10 +1105,23 @@ }, "id": 134, "panels": [], + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, + "refId": "A" + } + ], "title": "Process", "type": "row" }, { + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, "fieldConfig": { "defaults": { "color": { @@ -978,15 +1161,23 @@ }, "textMode": "auto" }, - "pluginVersion": "8.3.3", + "pluginVersion": "9.0.3", "targets": [ { + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, "expr": "ru_inblock{instance=~\"$instance\"}", "interval": "", "legendFormat": "inblock: {{instance}}", "refId": "A" }, { + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, "expr": "ru_outblock{instance=~\"$instance\"}", "hide": false, "interval": "", @@ -994,6 +1185,10 @@ "refId": "B" }, { + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, "expr": "ru_minflt{instance=~\"$instance\"}", "hide": false, "interval": "", @@ -1001,6 +1196,10 @@ "refId": "C" }, { + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, "expr": "ru_majflt{instance=~\"$instance\"}", "hide": false, "interval": "", @@ -1008,6 +1207,10 @@ "refId": "D" }, { + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, "expr": "system_disk_readbytes{instance=~\"$instance\"}", "hide": false, "interval": "", @@ -1015,6 +1218,10 @@ "refId": "E" }, { + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, "expr": "system_disk_writebytes{instance=~\"$instance\"}", "hide": false, "interval": "", @@ -1022,6 +1229,10 @@ "refId": "F" }, { + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, "expr": "db_pgops_newly{instance=~\"$instance\"}", "hide": false, "interval": "", @@ -1029,6 +1240,10 @@ "refId": "H" }, { + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, "expr": "db_pgops_cow{instance=~\"$instance\"}", "hide": false, "interval": "", @@ -1036,6 +1251,10 @@ "refId": "I" }, { + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, "expr": "db_pgops_clone{instance=~\"$instance\"}", "hide": false, "interval": "", @@ -1043,6 +1262,10 @@ "refId": "J" }, { + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, "expr": "db_pgops_split{instance=~\"$instance\"}", "hide": false, "interval": "", @@ -1050,6 +1273,10 @@ "refId": "K" }, { + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, "expr": "db_pgops_merge{instance=~\"$instance\"}", "hide": false, "interval": "", @@ -1057,6 +1284,10 @@ "refId": "L" }, { + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, "expr": "db_pgops_spill{instance=~\"$instance\"}", "hide": false, "interval": "", @@ -1064,6 +1295,10 @@ "refId": "G" }, { + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, "expr": "db_pgops_unspill{instance=~\"$instance\"}", "hide": false, "interval": "", @@ -1071,6 +1306,10 @@ "refId": "M" }, { + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, "expr": "db_pgops_wops{instance=~\"$instance\"}", "hide": false, "interval": "", @@ -1155,12 +1394,17 @@ "placement": "bottom" }, "tooltip": { - "mode": "multi" + "mode": "multi", + "sort": "none" } }, "pluginVersion": "8.0.6", "targets": [ { + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, "exemplar": true, "expr": "rate(process_io_write_syscalls_total{instance=~\"$instance\"}[$rate_interval])", "format": "time_series", @@ -1171,6 +1415,10 @@ "refId": "C" }, { + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, "exemplar": true, "expr": "rate(process_io_read_syscalls_total{instance=~\"$instance\"}[$rate_interval])", "format": "time_series", @@ -1185,6 +1433,10 @@ "type": "timeseries" }, { + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, "description": "", "fieldConfig": { "defaults": { @@ -1253,19 +1505,28 @@ "placement": "bottom" }, "tooltip": { - "mode": "multi" + "mode": "multi", + "sort": "none" } }, "pluginVersion": "8.0.6", "targets": [ { - "exemplar": true, - "expr": "rate(process_minor_pagefaults_total{instance=~\"$instance\"}[$rate_interval])", - "interval": "", + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, + "exemplar": true, + "expr": "rate(process_minor_pagefaults_total{instance=~\"$instance\"}[$rate_interval])", + "interval": "", "legendFormat": "soft: {{instance}}", "refId": "A" }, { + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, "exemplar": true, "expr": "rate(process_major_pagefaults_total{instance=~\"$instance\"}[$rate_interval])", "hide": false, @@ -1320,8 +1581,7 @@ "mode": "absolute", "steps": [ { - "color": "green", - "value": null + "color": "green" }, { "color": "red", @@ -1356,6 +1616,10 @@ "pluginVersion": "8.0.6", "targets": [ { + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, "exemplar": true, "expr": "rate(process_io_storage_read_bytes_total{instance=~\"$instance\"}[$rate_interval])", "format": "time_series", @@ -1365,6 +1629,10 @@ "refId": "A" }, { + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, "exemplar": true, "expr": "rate(process_io_storage_written_bytes_total{instance=~\"$instance\"}[$rate_interval])", "format": "time_series", @@ -1378,6 +1646,10 @@ "type": "timeseries" }, { + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, "description": "", "fieldConfig": { "defaults": { @@ -1417,8 +1689,7 @@ "mode": "absolute", "steps": [ { - "color": "green", - "value": null + "color": "green" }, { "color": "red", @@ -1452,6 +1723,10 @@ "pluginVersion": "8.0.6", "targets": [ { + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, "exemplar": true, "expr": "rate(go_cgo_calls_count{instance=~\"$instance\"}[$rate_interval])", "interval": "", @@ -1506,8 +1781,7 @@ "mode": "absolute", "steps": [ { - "color": "green", - "value": null + "color": "green" }, { "color": "red", @@ -1540,6 +1814,10 @@ "pluginVersion": "8.0.6", "targets": [ { + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, "exemplar": true, "expr": "go_memstats_stack_sys_bytes{instance=~\"$instance\"}", "format": "time_series", @@ -1549,6 +1827,10 @@ "refId": "A" }, { + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, "exemplar": true, "expr": "go_memstats_sys_bytes{instance=~\"$instance\"}", "format": "time_series", @@ -1559,6 +1841,10 @@ "refId": "B" }, { + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, "exemplar": true, "expr": "go_memstats_stack_inuse_bytes{instance=~\"$instance\"}", "format": "time_series", @@ -1569,6 +1855,10 @@ "refId": "C" }, { + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, "exemplar": true, "expr": "go_memstats_mspan_sys_bytes{instance=~\"$instance\"}", "format": "time_series", @@ -1579,6 +1869,10 @@ "refId": "D" }, { + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, "exemplar": true, "expr": "go_memstats_mcache_sys_bytes{instance=~\"$instance\"}", "format": "time_series", @@ -1589,6 +1883,10 @@ "refId": "E" }, { + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, "exemplar": true, "expr": "go_memstats_heap_alloc_bytes{instance=~\"$instance\"}", "format": "time_series", @@ -1645,8 +1943,7 @@ "mode": "absolute", "steps": [ { - "color": "green", - "value": null + "color": "green" }, { "color": "red", @@ -1678,6 +1975,10 @@ "pluginVersion": "8.0.6", "targets": [ { + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, "expr": "go_goroutines{instance=~\"$instance\"}", "instant": false, "interval": "", @@ -1685,6 +1986,10 @@ "refId": "A" }, { + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, "expr": "go_threads{instance=~\"$instance\"}", "instant": false, "interval": "", @@ -1696,6 +2001,10 @@ "type": "timeseries" }, { + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, "description": "", "fieldConfig": { "defaults": { @@ -1735,8 +2044,7 @@ "mode": "absolute", "steps": [ { - "color": "green", - "value": null + "color": "green" }, { "color": "red", @@ -1770,6 +2078,10 @@ "pluginVersion": "8.0.6", "targets": [ { + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, "exemplar": true, "expr": "process_virtual_memory_bytes{instance=~\"$instance\"}", "hide": true, @@ -1778,6 +2090,10 @@ "refId": "A" }, { + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, "exemplar": true, "expr": "process_resident_memory_anon_bytes{instance=~\"$instance\"}", "hide": true, @@ -1786,6 +2102,10 @@ "refId": "B" }, { + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, "exemplar": true, "expr": "process_resident_memory_bytes{instance=~\"$instance\"}", "hide": false, @@ -1794,6 +2114,10 @@ "refId": "C" }, { + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, "expr": "mem_data{instance=~\"$instance\"}", "hide": false, "interval": "", @@ -1801,6 +2125,10 @@ "refId": "D" }, { + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, "expr": "mem_stack{instance=~\"$instance\"}", "hide": false, "interval": "", @@ -1808,6 +2136,10 @@ "refId": "E" }, { + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, "expr": "mem_locked{instance=~\"$instance\"}", "hide": false, "interval": "", @@ -1815,6 +2147,10 @@ "refId": "F" }, { + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, "expr": "mem_swap{instance=~\"$instance\"}", "hide": false, "interval": "", @@ -1868,8 +2204,7 @@ "mode": "absolute", "steps": [ { - "color": "green", - "value": null + "color": "green" }, { "color": "red", @@ -1878,33 +2213,7 @@ ] }, "unit": "s" - }, - "overrides": [ - { - "__systemRef": "hideSeriesFrom", - "matcher": { - "id": "byNames", - "options": { - "mode": "exclude", - "names": [ - "{instance=\"turbogeth16c.weblogix.it:6062\", job=\"erigon\", quantile=\"0.75\"}" - ], - "prefix": "All except:", - "readOnly": true - } - }, - "properties": [ - { - "id": "custom.hideFrom", - "value": { - "legend": false, - "tooltip": false, - "viz": true - } - } - ] - } - ] + } }, "gridPos": { "h": 5, @@ -1926,6 +2235,10 @@ "pluginVersion": "8.0.6", "targets": [ { + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, "exemplar": true, "expr": "rate(go_gc_duration_seconds{quantile=\"0.75\",instance=~\"$instance\"}[$rate_interval])", "instant": false, @@ -1981,8 +2294,7 @@ "mode": "absolute", "steps": [ { - "color": "green", - "value": null + "color": "green" }, { "color": "red", @@ -2015,6 +2327,10 @@ "pluginVersion": "8.0.6", "targets": [ { + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, "exemplar": true, "expr": "rate(go_memstats_mallocs_total{instance=~\"$instance\"}[$rate_interval])", "format": "time_series", @@ -2024,6 +2340,10 @@ "refId": "A" }, { + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, "exemplar": true, "expr": "rate(go_memstats_frees_total{instance=~\"$instance\"}[$rate_interval])", "format": "time_series", @@ -2080,8 +2400,7 @@ "mode": "absolute", "steps": [ { - "color": "green", - "value": null + "color": "green" }, { "color": "red", @@ -2114,6 +2433,10 @@ "pluginVersion": "8.0.6", "targets": [ { + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, "exemplar": true, "expr": "increase(process_cpu_seconds_system_total{instance=~\"$instance\"}[1m])", "format": "time_series", @@ -2123,6 +2446,10 @@ "refId": "A" }, { + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, "exemplar": true, "expr": "increase(process_cpu_seconds_user_total{instance=~\"$instance\"}[1m])", "format": "time_series", @@ -2137,6 +2464,10 @@ }, { "collapsed": false, + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, "gridPos": { "h": 1, "w": 24, @@ -2145,6 +2476,15 @@ }, "id": 82, "panels": [], + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, + "refId": "A" + } + ], "title": "System", "type": "row" }, @@ -2192,8 +2532,7 @@ "mode": "absolute", "steps": [ { - "color": "green", - "value": null + "color": "green" }, { "color": "red", @@ -2228,6 +2567,10 @@ "pluginVersion": "8.0.6", "targets": [ { + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, "expr": "vmem_total{instance=~\"$instance\"}", "format": "time_series", "interval": "", @@ -2236,6 +2579,10 @@ "refId": "A" }, { + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, "expr": "vmem_available{instance=~\"$instance\"}", "format": "time_series", "hide": false, @@ -2245,6 +2592,10 @@ "refId": "B" }, { + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, "expr": "vmem_used{instance=~\"$instance\"}", "format": "time_series", "hide": false, @@ -2254,6 +2605,10 @@ "refId": "C" }, { + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, "expr": "vmem_buffers{instance=~\"$instance\"}", "format": "time_series", "hide": false, @@ -2263,6 +2618,10 @@ "refId": "D" }, { + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, "expr": "vmem_cached{instance=~\"$instance\"}", "format": "time_series", "hide": false, @@ -2272,6 +2631,10 @@ "refId": "E" }, { + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, "expr": "vmem_writeback{instance=~\"$instance\"}", "format": "time_series", "hide": false, @@ -2281,6 +2644,10 @@ "refId": "F" }, { + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, "expr": "vmem_dirty{instance=~\"$instance\"}", "format": "time_series", "hide": false, @@ -2290,6 +2657,10 @@ "refId": "G" }, { + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, "expr": "vmem_shared{instance=~\"$instance\"}", "format": "time_series", "hide": false, @@ -2299,6 +2670,10 @@ "refId": "H" }, { + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, "expr": "vmem_mapped{instance=~\"$instance\"}", "format": "time_series", "hide": false, @@ -2313,6 +2688,10 @@ }, { "collapsed": false, + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, "gridPos": { "h": 1, "w": 24, @@ -2321,10 +2700,23 @@ }, "id": 173, "panels": [], + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, + "refId": "A" + } + ], "title": "TxPool v2", "type": "row" }, { + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, "fieldConfig": { "defaults": { "color": { @@ -2363,8 +2755,7 @@ "mode": "absolute", "steps": [ { - "color": "green", - "value": null + "color": "green" }, { "color": "red", @@ -2373,33 +2764,7 @@ ] }, "unit": "s" - }, - "overrides": [ - { - "__systemRef": "hideSeriesFrom", - "matcher": { - "id": "byNames", - "options": { - "mode": "exclude", - "names": [ - "pool_write_to_db: turbogeth16c.weblogix.it:6060" - ], - "prefix": "All except:", - "readOnly": true - } - }, - "properties": [ - { - "id": "custom.hideFrom", - "value": { - "legend": false, - "tooltip": false, - "viz": true - } - } - ] - } - ] + } }, "gridPos": { "h": 8, @@ -2423,6 +2788,10 @@ }, "targets": [ { + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, "exemplar": true, "expr": "pool_process_remote_txs{quantile=\"$quantile\",instance=~\"$instance\"}", "interval": "", @@ -2430,6 +2799,10 @@ "refId": "A" }, { + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, "exemplar": true, "expr": "pool_add_remote_txs{quantile=\"$quantile\",instance=~\"$instance\"}", "hide": false, @@ -2438,6 +2811,10 @@ "refId": "B" }, { + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, "exemplar": true, "expr": "pool_new_block{quantile=\"$quantile\",instance=~\"$instance\"}", "hide": false, @@ -2446,6 +2823,10 @@ "refId": "C" }, { + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, "exemplar": true, "expr": "pool_write_to_db{quantile=\"$quantile\",instance=~\"$instance\"}", "hide": false, @@ -2454,6 +2835,10 @@ "refId": "D" }, { + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, "exemplar": true, "expr": "pool_propagate_to_new_peer{quantile=\"$quantile\",instance=~\"$instance\"}", "hide": false, @@ -2462,6 +2847,10 @@ "refId": "E" }, { + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, "exemplar": true, "expr": "pool_propagate_new_txs{quantile=\"$quantile\",instance=~\"$instance\"}", "hide": false, @@ -2475,6 +2864,10 @@ "type": "timeseries" }, { + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, "fieldConfig": { "defaults": { "color": { @@ -2513,8 +2906,7 @@ "mode": "absolute", "steps": [ { - "color": "green", - "value": null + "color": "green" }, { "color": "red", @@ -2523,33 +2915,7 @@ ] }, "unit": "reqps" - }, - "overrides": [ - { - "__systemRef": "hideSeriesFrom", - "matcher": { - "id": "byNames", - "options": { - "mode": "exclude", - "names": [ - "pool_add_remote_txs_count: turbogeth16c.weblogix.it:6060" - ], - "prefix": "All except:", - "readOnly": true - } - }, - "properties": [ - { - "id": "custom.hideFrom", - "value": { - "legend": false, - "tooltip": false, - "viz": true - } - } - ] - } - ] + } }, "gridPos": { "h": 8, @@ -2573,6 +2939,10 @@ }, "targets": [ { + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, "exemplar": true, "expr": "rate(pool_process_remote_txs_count{instance=~\"$instance\"}[$rate_interval])", "hide": false, @@ -2581,6 +2951,10 @@ "refId": "A" }, { + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, "exemplar": true, "expr": "rate(pool_add_remote_txs_count{instance=~\"$instance\"}[$rate_interval])", "hide": false, @@ -2589,6 +2963,10 @@ "refId": "B" }, { + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, "exemplar": true, "expr": "rate(pool_new_block_count{instance=~\"$instance\"}[$rate_interval])", "hide": false, @@ -2597,6 +2975,10 @@ "refId": "C" }, { + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, "exemplar": true, "expr": "rate(pool_write_to_db_count{instance=~\"$instance\"}[$rate_interval])", "hide": false, @@ -2610,6 +2992,10 @@ "type": "timeseries" }, { + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, "fieldConfig": { "defaults": { "color": { @@ -2648,8 +3034,7 @@ "mode": "absolute", "steps": [ { - "color": "green", - "value": null + "color": "green" }, { "color": "red", @@ -2682,6 +3067,10 @@ }, "targets": [ { + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, "exemplar": true, "expr": "sum(delta(cache_total{result=\"hit\",name=\"txpool\",instance=~\"$instance\"}[1m]))/sum(delta(cache_total{name=\"txpool\",instance=~\"$instance\"}[1m])) ", "hide": false, @@ -2694,6 +3083,10 @@ "type": "timeseries" }, { + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, "fieldConfig": { "defaults": { "color": { @@ -2732,8 +3125,7 @@ "mode": "absolute", "steps": [ { - "color": "green", - "value": null + "color": "green" }, { "color": "red", @@ -2741,33 +3133,7 @@ } ] } - }, - "overrides": [ - { - "__systemRef": "hideSeriesFrom", - "matcher": { - "id": "byNames", - "options": { - "mode": "exclude", - "names": [ - "hit: turbogeth16c.weblogix.it:6060 " - ], - "prefix": "All except:", - "readOnly": true - } - }, - "properties": [ - { - "id": "custom.hideFrom", - "value": { - "legend": false, - "tooltip": false, - "viz": true - } - } - ] - } - ] + } }, "gridPos": { "h": 6, @@ -2791,6 +3157,10 @@ }, "targets": [ { + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, "exemplar": true, "expr": "rate(cache_total{name=\"txpool\",instance=~\"$instance\"}[1m])", "hide": false, @@ -2799,6 +3169,10 @@ "refId": "A" }, { + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, "exemplar": true, "expr": "rate(cache_timeout_total{name=\"txpool\",instance=~\"$instance\"}[1m])", "hide": false, @@ -2811,6 +3185,10 @@ "type": "timeseries" }, { + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, "fieldConfig": { "defaults": { "color": { @@ -2849,8 +3227,7 @@ "mode": "absolute", "steps": [ { - "color": "green", - "value": null + "color": "green" }, { "color": "red", @@ -2884,6 +3261,10 @@ }, "targets": [ { + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, "exemplar": true, "expr": "cache_keys_total{name=\"txpool\",instance=~\"$instance\"}", "hide": false, @@ -2892,6 +3273,10 @@ "refId": "A" }, { + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, "exemplar": true, "expr": "cache_list_total{name=\"txpool\",instance=~\"$instance\"}", "hide": false, @@ -2904,6 +3289,10 @@ "type": "timeseries" }, { + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, "fieldConfig": { "defaults": { "color": { @@ -2942,8 +3331,7 @@ "mode": "absolute", "steps": [ { - "color": "green", - "value": null + "color": "green" }, { "color": "red", @@ -2977,6 +3365,10 @@ }, "targets": [ { + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, "exemplar": true, "expr": "rate(pool_write_to_db_bytes{instance=~\"$instance\"}[$rate_interval])", "hide": false, @@ -2990,6 +3382,10 @@ }, { "collapsed": false, + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, "gridPos": { "h": 1, "w": 24, @@ -2998,10 +3394,23 @@ }, "id": 183, "panels": [], + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, + "refId": "A" + } + ], "title": "RPC", "type": "row" }, { + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, "fieldConfig": { "defaults": { "color": { @@ -3040,8 +3449,7 @@ "mode": "absolute", "steps": [ { - "color": "green", - "value": null + "color": "green" }, { "color": "red", @@ -3050,33 +3458,7 @@ ] }, "unit": "reqps" - }, - "overrides": [ - { - "__systemRef": "hideSeriesFrom", - "matcher": { - "id": "byNames", - "options": { - "mode": "exclude", - "names": [ - "success eth_call turbogeth16c.weblogix.it:6062 " - ], - "prefix": "All except:", - "readOnly": true - } - }, - "properties": [ - { - "id": "custom.hideFrom", - "value": { - "legend": false, - "tooltip": false, - "viz": true - } - } - ] - } - ] + } }, "gridPos": { "h": 8, @@ -3100,6 +3482,10 @@ }, "targets": [ { + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, "exemplar": true, "expr": "rate(rpc_duration_seconds_count{instance=~\"$instance\",success=\"success\"}[1m])", "interval": "", @@ -3107,6 +3493,10 @@ "refId": "A" }, { + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, "exemplar": true, "expr": "rate(rpc_duration_seconds_count{instance=~\"$instance\",success=\"failure\"}[1m])", "hide": false, @@ -3120,6 +3510,10 @@ "type": "timeseries" }, { + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, "fieldConfig": { "defaults": { "color": { @@ -3158,8 +3552,7 @@ "mode": "absolute", "steps": [ { - "color": "green", - "value": null + "color": "green" }, { "color": "red", @@ -3193,6 +3586,10 @@ }, "targets": [ { + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, "exemplar": true, "expr": "db_begin_seconds{quantile=\"$quantile\",instance=~\"$instance\"}", "interval": "", @@ -3205,6 +3602,10 @@ "type": "timeseries" }, { + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, "fieldConfig": { "defaults": { "color": { @@ -3243,8 +3644,7 @@ "mode": "absolute", "steps": [ { - "color": "green", - "value": null + "color": "green" }, { "color": "red", @@ -3253,34 +3653,7 @@ ] }, "unit": "s" - }, - "overrides": [ - { - "__systemRef": "hideSeriesFrom", - "matcher": { - "id": "byNames", - "options": { - "mode": "exclude", - "names": [ - " eth_call turbogeth16c.weblogix.it:6062 success", - " eth_call turbogeth16c.weblogix.it:6062 failure" - ], - "prefix": "All except:", - "readOnly": true - } - }, - "properties": [ - { - "id": "custom.hideFrom", - "value": { - "legend": false, - "tooltip": false, - "viz": true - } - } - ] - } - ] + } }, "gridPos": { "h": 8, @@ -3304,6 +3677,10 @@ }, "targets": [ { + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, "exemplar": true, "expr": "rpc_duration_seconds{quantile=\"$quantile\",instance=~\"$instance\"}", "interval": "", @@ -3358,8 +3735,7 @@ "mode": "absolute", "steps": [ { - "color": "green", - "value": null + "color": "green" }, { "color": "red", @@ -3391,6 +3767,10 @@ "pluginVersion": "8.0.6", "targets": [ { + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, "expr": "go_goroutines{instance=~\"$instance\"}", "instant": false, "interval": "", @@ -3398,6 +3778,10 @@ "refId": "A" }, { + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, "expr": "go_threads{instance=~\"$instance\"}", "instant": false, "interval": "", @@ -3409,6 +3793,10 @@ "type": "timeseries" }, { + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, "fieldConfig": { "defaults": { "color": { @@ -3447,8 +3835,7 @@ "mode": "absolute", "steps": [ { - "color": "green", - "value": null + "color": "green" }, { "color": "red", @@ -3482,6 +3869,10 @@ }, "targets": [ { + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, "exemplar": true, "expr": "cache_keys_total{name=\"rpc\",instance=~\"$instance\"}", "hide": false, @@ -3490,6 +3881,10 @@ "refId": "A" }, { + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, "exemplar": true, "expr": "cache_list_total{name=\"rpc\",instance=~\"$instance\"}", "hide": false, @@ -3498,6 +3893,10 @@ "refId": "B" }, { + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, "exemplar": true, "expr": "cache_code_keys_total{name=\"rpc\",instance=~\"$instance\"}", "hide": false, @@ -3506,6 +3905,10 @@ "refId": "C" }, { + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, "exemplar": true, "expr": "cache_code_list_total{name=\"rpc\",instance=~\"$instance\"}", "hide": false, @@ -3518,6 +3921,10 @@ "type": "timeseries" }, { + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, "fieldConfig": { "defaults": { "color": { @@ -3556,8 +3963,7 @@ "mode": "absolute", "steps": [ { - "color": "green", - "value": null + "color": "green" }, { "color": "red", @@ -3590,6 +3996,10 @@ }, "targets": [ { + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, "exemplar": true, "expr": "sum(delta(cache_total{result=\"hit\",name=\"rpc\",instance=~\"$instance\"}[1m]))/sum(delta(cache_total{name=\"rpc\",instance=~\"$instance\"}[1m])) ", "hide": false, @@ -3598,6 +4008,10 @@ "refId": "A" }, { + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, "exemplar": true, "expr": "sum(delta(cache_code_total{result=\"hit\",name=\"rpc\",instance=~\"$instance\"}[1m]))/sum(delta(cache_code_total{name=\"rpc\",instance=~\"$instance\"}[1m])) ", "hide": false, @@ -3611,6 +4025,10 @@ }, { "collapsed": true, + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, "gridPos": { "h": 1, "w": 24, @@ -3670,12 +4088,20 @@ "steppedLine": false, "targets": [ { + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, "expr": "trie_subtrieloader_flatdb{quantile=\"$quantile\",instance=~\"$instance\"}", "interval": "", "legendFormat": "trie_subtrieloader_flatdb: {{quantile}}, {{instance}}", "refId": "B" }, { + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, "expr": "trie_subtrieloader_witnessdb{quantile=\"$quantile\",instance=~\"$instance\"}", "interval": "", "legendFormat": "trie_subtrieloader_witnessdb: {{quantile}}, {{instance}}", @@ -3764,24 +4190,40 @@ "steppedLine": false, "targets": [ { + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, "expr": "rate(db_op_set_count{instance=~\"$instance\"}[1m])", "interval": "", "legendFormat": "", "refId": "E" }, { + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, "expr": "rate(db_op_set_range_count{instance=~\"$instance\"}[1m])", "interval": "", "legendFormat": "", "refId": "F" }, { + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, "expr": "rate(db_op_get_count{instance=~\"$instance\"}[1m])", "interval": "", "legendFormat": "", "refId": "G" }, { + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, "expr": "rate(db_op_get_both{instance=~\"$instance\"}[1m])", "hide": false, "interval": "", @@ -3789,6 +4231,10 @@ "refId": "A" }, { + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, "expr": "rate(db_op_get_both_range_count{instance=~\"$instance\"}[1m])", "hide": false, "interval": "", @@ -3796,6 +4242,10 @@ "refId": "B" }, { + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, "expr": "db_op_put{quantile=\"$quantile\",instance=~\"$instance\"}", "hide": false, "interval": "", @@ -3803,6 +4253,10 @@ "refId": "C" }, { + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, "expr": "db_op_put_current{quantile=\"$quantile\",instance=~\"$instance\"}", "hide": false, "interval": "", @@ -3810,6 +4264,10 @@ "refId": "D" }, { + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, "expr": "db_op_put_no_overwrite{quantile=\"$quantile\",instance=~\"$instance\"}", "hide": false, "interval": "", @@ -3854,6 +4312,10 @@ "bars": false, "dashLength": 10, "dashes": false, + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, "fill": 1, "fillGradient": 0, "gridPos": { @@ -3890,6 +4352,10 @@ "steppedLine": false, "targets": [ { + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, "expr": "db_get{quantile=\"$quantile\",instance=~\"$instance\"}", "interval": "", "legendFormat": "get: {{quantile}}, {{instance}}", @@ -3977,24 +4443,40 @@ "steppedLine": false, "targets": [ { + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, "expr": "db_op_set{quantile=\"$quantile\",instance=~\"$instance\"}", "interval": "", "legendFormat": "", "refId": "E" }, { + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, "expr": "db_op_set_range{quantile=\"$quantile\",instance=~\"$instance\"}", "interval": "", "legendFormat": "", "refId": "F" }, { + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, "expr": "db_op_get{quantile=\"$quantile\",instance=~\"$instance\"}", "interval": "", "legendFormat": "", "refId": "G" }, { + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, "expr": "db_op_get_both{quantile=\"$quantile\",instance=~\"$instance\"}", "hide": false, "interval": "", @@ -4002,6 +4484,10 @@ "refId": "A" }, { + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, "expr": "db_op_get_both_range{quantile=\"$quantile\",instance=~\"$instance\"}", "hide": false, "interval": "", @@ -4009,6 +4495,10 @@ "refId": "B" }, { + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, "expr": "db_op_put{quantile=\"$quantile\",instance=~\"$instance\"}", "hide": false, "interval": "", @@ -4016,6 +4506,10 @@ "refId": "C" }, { + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, "expr": "db_op_put_current{quantile=\"$quantile\",instance=~\"$instance\"}", "hide": false, "interval": "", @@ -4023,6 +4517,10 @@ "refId": "D" }, { + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, "expr": "db_op_put_no_overwrite{quantile=\"$quantile\",instance=~\"$instance\"}", "hide": false, "interval": "", @@ -4114,6 +4612,10 @@ "steppedLine": false, "targets": [ { + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, "expr": "mdbx_put_direct{quantile=\"$quantile\",instance=~\"$instance\"}", "instant": false, "interval": "", @@ -4121,12 +4623,20 @@ "refId": "A" }, { + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, "expr": "mdbx_put_direct{quantile=\"$quantile\",instance=~\"$instance\"}", "interval": "", "legendFormat": "mdbx_put_direct: {{quantile}}, {{instance}}", "refId": "C" }, { + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, "expr": "mdbx_put_both_range{quantile=\"$quantile\",instance=~\"$instance\"}", "instant": false, "interval": "", @@ -4134,12 +4644,20 @@ "refId": "B" }, { + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, "expr": "mdbx_put_both_range{quantile=\"$quantile\",instance=~\"$instance\"}", "interval": "", "legendFormat": "mdbx_put_both_range: {{quantile}}, {{instance}}", "refId": "D" }, { + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, "expr": "mdbx_seek_exact{quantile=\"$quantile\",instance=~\"$instance\"}", "hide": false, "interval": "", @@ -4147,6 +4665,10 @@ "refId": "I" }, { + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, "expr": "mdbx_seek_exact{quantile=\"$quantile\",instance=~\"$instance\"}", "hide": false, "interval": "", @@ -4154,66 +4676,110 @@ "refId": "J" }, { + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, "expr": "mdbx_put_no_overwrite{quantile=\"$quantile\",instance=~\"$instance\"}", "interval": "", "legendFormat": "mdbx_put_no_overwrite: {{quantile}}, {{instance}}", "refId": "F" }, { + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, "expr": "mdbx_put_no_overwrite{quantile=\"$quantile\",instance=~\"$instance\"}", "interval": "", "legendFormat": "mdbx_put_no_overwrite: {{quantile}}, {{instance}}", "refId": "E" }, { + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, "expr": "mdbx_put_upsert{quantile=\"$quantile\",instance=~\"$instance\"}", "interval": "", "legendFormat": "mdbx_put_upsert: {{quantile}}, {{instance}}", "refId": "G" }, { + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, "expr": "mdbx_put_upsert{quantile=\"$quantile\",instance=~\"$instance\"}", "interval": "", "legendFormat": "mdbx_put_upsert: {{quantile}}, {{instance}}", "refId": "H" }, { + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, "expr": "mdbx_put_current2{quantile=\"$quantile\",instance=~\"$instance\"}", "interval": "", "legendFormat": "mdbx_put_current2: {{quantile}}, {{instance}}", "refId": "K" }, { + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, "expr": "mdbx_put_current2{quantile=\"$quantile\",instance=~\"$instance\"}", "interval": "", "legendFormat": "mdbx_put_current2: {{quantile}}, {{instance}}", "refId": "L" }, { + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, "expr": "mdbx_put_upsert2{quantile=\"$quantile\",instance=~\"$instance\"}", "interval": "", "legendFormat": "mdbx_put_upsert2: {{quantile}}, {{instance}}", "refId": "M" }, { + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, "expr": "mdbx_put_upsert2{quantile=\"$quantile\",instance=~\"$instance\"}", "interval": "", "legendFormat": "mdbx_put_upsert2: {{quantile}}, {{instance}}", "refId": "N" }, { + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, "expr": "mdbx_del_current{quantile=\"$quantile\",instance=~\"$instance\"}", "interval": "", "legendFormat": "mdbx_put_current: {{quantile}}, {{instance}}", "refId": "O" }, { + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, "expr": "mdbx_del_current{quantile=\"$quantile\",instance=~\"$instance\"}", "interval": "", "legendFormat": "mdbx_del_current: {{quantile}}, {{instance}}", "refId": "P" }, { + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, "expr": "mdbx_seek_exact2{quantile=\"$quantile\",instance=~\"$instance\"}", "hide": false, "interval": "", @@ -4221,6 +4787,10 @@ "refId": "Q" }, { + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, "expr": "mdbx_seek_exact2{quantile=\"$quantile\",instance=~\"$instance\"}", "hide": false, "interval": "", @@ -4261,11 +4831,24 @@ } } ], + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, + "refId": "A" + } + ], "title": "Hidden", "type": "row" }, { "collapsed": false, + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, "gridPos": { "h": 1, "w": 24, @@ -4274,6 +4857,15 @@ }, "id": 75, "panels": [], + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, + "refId": "A" + } + ], "title": "Network", "type": "row" }, @@ -4320,8 +4912,7 @@ "mode": "absolute", "steps": [ { - "color": "green", - "value": null + "color": "green" }, { "color": "red", @@ -4359,6 +4950,10 @@ "pluginVersion": "8.0.6", "targets": [ { + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, "exemplar": true, "expr": "rate(p2p_ingress{instance=~\"$instance\"}[$rate_interval])", "format": "time_series", @@ -4368,6 +4963,10 @@ "refId": "B" }, { + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, "exemplar": true, "expr": "rate(p2p_egress{instance=~\"$instance\"}[$rate_interval])", "format": "time_series", @@ -4424,8 +5023,7 @@ "mode": "absolute", "steps": [ { - "color": "green", - "value": null + "color": "green" }, { "color": "red", @@ -4463,6 +5061,10 @@ "pluginVersion": "8.0.6", "targets": [ { + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, "expr": "p2p_peers{instance=~\"$instance\"}", "format": "time_series", "interval": "", @@ -4471,6 +5073,10 @@ "refId": "A" }, { + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, "expr": "rate(p2p_dials{instance=~\"$instance\"}[1m])", "format": "time_series", "interval": "", @@ -4479,6 +5085,10 @@ "refId": "B" }, { + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, "expr": "rate(p2p_serves{instance=~\"$instance\"}[1m])", "format": "time_series", "interval": "", @@ -4492,6 +5102,10 @@ }, { "collapsed": false, + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, "gridPos": { "h": 1, "w": 24, @@ -4500,10 +5114,23 @@ }, "id": 4, "panels": [], + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, + "refId": "A" + } + ], "title": "Blockchain", "type": "row" }, { + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, "fieldConfig": { "defaults": { "mappings": [ @@ -4521,8 +5148,7 @@ "mode": "absolute", "steps": [ { - "color": "green", - "value": null + "color": "green" }, { "color": "red", @@ -4566,6 +5192,10 @@ "pluginVersion": "8.3.3", "targets": [ { + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, "expr": "sync{instance=~\"$instance\",stage=\"headers\"}", "format": "time_series", "interval": "", @@ -4578,6 +5208,10 @@ "type": "stat" }, { + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, "fieldConfig": { "defaults": { "mappings": [ @@ -4595,8 +5229,7 @@ "mode": "absolute", "steps": [ { - "color": "green", - "value": null + "color": "green" }, { "color": "red", @@ -4640,6 +5273,10 @@ "pluginVersion": "8.3.3", "targets": [ { + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, "exemplar": true, "expr": "stage_headers{instance=~\"$instance\"}", "format": "time_series", @@ -4653,6 +5290,10 @@ "type": "stat" }, { + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, "fieldConfig": { "defaults": { "mappings": [ @@ -4670,8 +5311,7 @@ "mode": "absolute", "steps": [ { - "color": "green", - "value": null + "color": "green" }, { "color": "red", @@ -4715,6 +5355,10 @@ "pluginVersion": "8.3.3", "targets": [ { + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, "expr": "txpool_pending{instance=~\"$instance\"}", "format": "time_series", "interval": "", @@ -4727,6 +5371,10 @@ "type": "stat" }, { + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, "fieldConfig": { "defaults": { "mappings": [ @@ -4744,8 +5392,7 @@ "mode": "absolute", "steps": [ { - "color": "green", - "value": null + "color": "green" }, { "color": "red", @@ -4789,6 +5436,10 @@ "pluginVersion": "8.3.3", "targets": [ { + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, "expr": "txpool_queued{instance=~\"$instance\"}", "format": "time_series", "interval": "", @@ -4801,6 +5452,10 @@ "type": "stat" }, { + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, "fieldConfig": { "defaults": { "mappings": [ @@ -4818,8 +5473,7 @@ "mode": "absolute", "steps": [ { - "color": "green", - "value": null + "color": "green" }, { "color": "red", @@ -4863,6 +5517,10 @@ "pluginVersion": "8.3.3", "targets": [ { + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, "expr": "txpool_local{instance=~\"$instance\"}", "format": "time_series", "interval": "", @@ -4875,6 +5533,10 @@ "type": "stat" }, { + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, "fieldConfig": { "defaults": { "color": { @@ -4913,8 +5575,7 @@ "mode": "absolute", "steps": [ { - "color": "green", - "value": null + "color": "green" }, { "color": "red", @@ -4947,6 +5608,10 @@ "pluginVersion": "8.0.6", "targets": [ { + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, "expr": "chain_head_header{instance=~\"$instance\"}", "format": "time_series", "interval": "", @@ -4955,6 +5620,10 @@ "refId": "A" }, { + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, "expr": "chain_head_receipt{instance=~\"$instance\"}", "format": "time_series", "interval": "", @@ -4963,6 +5632,10 @@ "refId": "B" }, { + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, "expr": "chain_head_block{instance=~\"$instance\"}", "format": "time_series", "interval": "", @@ -4975,6 +5648,10 @@ "type": "timeseries" }, { + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, "fieldConfig": { "defaults": { "color": { @@ -5013,8 +5690,7 @@ "mode": "absolute", "steps": [ { - "color": "green", - "value": null + "color": "green" }, { "color": "red", @@ -5047,6 +5723,10 @@ "pluginVersion": "8.0.6", "targets": [ { + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, "expr": "txpool_pending{instance=~\"$instance\"}", "format": "time_series", "interval": "", @@ -5055,6 +5735,10 @@ "refId": "A" }, { + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, "expr": "txpool_queued{instance=~\"$instance\"}", "format": "time_series", "interval": "", @@ -5063,6 +5747,10 @@ "refId": "B" }, { + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, "expr": "txpool_local{instance=~\"$instance\"}", "format": "time_series", "interval": "", @@ -5075,6 +5763,10 @@ "type": "timeseries" }, { + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, "fieldConfig": { "defaults": { "color": { @@ -5113,8 +5805,7 @@ "mode": "absolute", "steps": [ { - "color": "green", - "value": null + "color": "green" }, { "color": "red", @@ -5152,6 +5843,10 @@ "pluginVersion": "8.0.6", "targets": [ { + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, "expr": "rate(txpool_valid{instance=~\"$instance\"}[1m])", "format": "time_series", "interval": "", @@ -5160,6 +5855,10 @@ "refId": "K" }, { + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, "expr": "rate(txpool_invalid{instance=~\"$instance\"}[1m])", "format": "time_series", "interval": "", @@ -5168,6 +5867,10 @@ "refId": "A" }, { + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, "expr": "rate(txpool_underpriced{instance=\"$instance\"}[1m])", "format": "time_series", "hide": false, @@ -5177,6 +5880,10 @@ "refId": "B" }, { + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, "expr": "rate(txpool_pending_discard{instance=\"$instance\"}[1m])", "format": "time_series", "hide": false, @@ -5186,6 +5893,10 @@ "refId": "C" }, { + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, "expr": "rate(txpool_pending_replace{instance=\"$instance\"}[1m])", "format": "time_series", "interval": "", @@ -5194,6 +5905,10 @@ "refId": "D" }, { + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, "expr": "rate(txpool_pending_ratelimit{instance=\"$instance\"}[1m])", "format": "time_series", "interval": "", @@ -5202,6 +5917,10 @@ "refId": "E" }, { + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, "expr": "rate(txpool_pending_nofunds{instance=\"$instance\"}[1m])", "format": "time_series", "interval": "", @@ -5210,6 +5929,10 @@ "refId": "F" }, { + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, "expr": "rate(txpool_queued_discard{instance=\"$instance\"}[1m])", "format": "time_series", "hide": false, @@ -5219,6 +5942,10 @@ "refId": "G" }, { + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, "expr": "rate(txpool_queued_replace{instance=\"$instance\"}[1m])", "format": "time_series", "hide": false, @@ -5228,6 +5955,10 @@ "refId": "H" }, { + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, "expr": "rate(txpool_queued_ratelimit{instance=\"$instance\"}[1m])", "format": "time_series", "hide": false, @@ -5237,6 +5968,10 @@ "refId": "I" }, { + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, "expr": "rate(txpool_queued_nofunds{instance=\"$instance\"}[1m])", "format": "time_series", "hide": false, @@ -5251,6 +5986,10 @@ }, { "collapsed": true, + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, "gridPos": { "h": 1, "w": 24, @@ -5264,6 +6003,10 @@ "bars": false, "dashLength": 10, "dashes": false, + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, "fieldConfig": { "defaults": { "custom": {} @@ -5306,12 +6049,20 @@ "steppedLine": false, "targets": [ { + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, "expr": "sum by (grpc_service, grpc_method, instance) (rate(grpc_server_started_total{instance=~\"$instance\"}[1m]))", "interval": "", "legendFormat": "Calls: {{grpc_service}}.{{grpc_method}}, {{instance}}", "refId": "A" }, { + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, "expr": "sum by (grpc_service, grpc_method, instance) (rate(grpc_server_handled_total{instance=~\"$instance\",grpc_code!=\"OK\"}[1m])) ", "interval": "", "legendFormat": "Errors: {{grpc_service}}.{{grpc_method}}, {{instance}}", @@ -5349,12 +6100,21 @@ } } ], + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, + "refId": "A" + } + ], "title": "Private api", "type": "row" } ], "refresh": "30s", - "schemaVersion": 34, + "schemaVersion": 36, "style": "dark", "tags": [], "templating": { @@ -5539,6 +6299,6 @@ "timezone": "", "title": "Erigon Prometheus", "uid": "FPpjH6Hik", - "version": 57, + "version": 68, "weekStart": "" } \ No newline at end of file diff --git a/cmd/rpcdaemon/README.md b/cmd/rpcdaemon/README.md index 341bc7c8c14..e3133132740 100644 --- a/cmd/rpcdaemon/README.md +++ b/cmd/rpcdaemon/README.md @@ -236,6 +236,7 @@ The following table shows the current implementation status of Erigon's RPC daem | eth_getTransactionCount | Yes | | | eth_getStorageAt | Yes | | | eth_call | Yes | | +| eth_callMany | Yes | Erigon Method PR#4567 | | eth_callBundle | Yes | | | eth_createAccessList | Yes | | | | | | @@ -263,7 +264,8 @@ The following table shows the current implementation status of Erigon's RPC daem | eth_submitWork | Yes | | | | | | | eth_subscribe | Limited | Websock Only - newHeads, | -| | | newPendingTransactions | +| | | newPendingTransactions, | +| | | newPendingBlock | | eth_unsubscribe | Yes | Websock Only | | | | | | engine_newPayloadV1 | Yes | | @@ -280,6 +282,7 @@ The following table shows the current implementation status of Erigon's RPC daem | debug_traceBlockByNumber | Yes | Streaming (can handle huge results) | | debug_traceTransaction | Yes | Streaming (can handle huge results) | | debug_traceCall | Yes | Streaming (can handle huge results) | +| debug_traceCallMany | Yes | Erigon Method PR#4567. | | | | | | trace_call | Yes | | | trace_callMany | Yes | | @@ -310,8 +313,7 @@ The following table shows the current implementation status of Erigon's RPC daem | erigon_forks | Yes | Erigon only | | erigon_issuance | Yes | Erigon only | | erigon_GetBlockByTimestamp | Yes | Erigon only | -| | | | -| starknet_call | Yes | Starknet only | +| erigon_BlockNumber | Yes | Erigon only | | | | | | bor_getSnapshot | Yes | Bor only | | bor_getAuthor | Yes | Bor only | diff --git a/cmd/rpcdaemon/cli/config.go b/cmd/rpcdaemon/cli/config.go index a34b28a10d6..e6c498c2820 100644 --- a/cmd/rpcdaemon/cli/config.go +++ b/cmd/rpcdaemon/cli/config.go @@ -10,10 +10,12 @@ import ( "net/http" "os" "path/filepath" - "runtime" "strings" "time" + "github.com/ledgerwatch/erigon-lib/common/dir" + libstate "github.com/ledgerwatch/erigon-lib/state" + "github.com/ledgerwatch/erigon/eth/ethconfig" "github.com/ledgerwatch/erigon/rpc/rpccfg" "github.com/ledgerwatch/erigon-lib/direct" @@ -57,25 +59,21 @@ var rootCmd = &cobra.Command{ Short: "rpcdaemon is JSON RPC server that connects to Erigon node for remote DB access", } -const JwtDefaultFile = "jwt.hex" - func RootCommand() (*cobra.Command, *httpcfg.HttpCfg) { utils.CobraFlags(rootCmd, append(debug.Flags, utils.MetricFlags...)) - cfg := &httpcfg.HttpCfg{StateCache: kvcache.DefaultCoherentConfig} + cfg := &httpcfg.HttpCfg{Enabled: true, StateCache: kvcache.DefaultCoherentConfig} rootCmd.PersistentFlags().StringVar(&cfg.PrivateApiAddr, "private.api.addr", "127.0.0.1:9090", "private api network address, for example: 127.0.0.1:9090") rootCmd.PersistentFlags().StringVar(&cfg.DataDir, "datadir", "", "path to Erigon working directory") rootCmd.PersistentFlags().StringVar(&cfg.HttpListenAddress, "http.addr", nodecfg.DefaultHTTPHost, "HTTP-RPC server listening interface") - rootCmd.PersistentFlags().StringVar(&cfg.EngineHTTPListenAddress, "engine.addr", nodecfg.DefaultHTTPHost, "HTTP-RPC server listening interface for engineAPI") rootCmd.PersistentFlags().StringVar(&cfg.TLSCertfile, "tls.cert", "", "certificate for client side TLS handshake") rootCmd.PersistentFlags().StringVar(&cfg.TLSKeyFile, "tls.key", "", "key file for client side TLS handshake") rootCmd.PersistentFlags().StringVar(&cfg.TLSCACert, "tls.cacert", "", "CA certificate for client side TLS handshake") rootCmd.PersistentFlags().IntVar(&cfg.HttpPort, "http.port", nodecfg.DefaultHTTPPort, "HTTP-RPC server listening port") - rootCmd.PersistentFlags().IntVar(&cfg.EnginePort, "engine.port", nodecfg.DefaultEngineHTTPPort, "HTTP-RPC server listening port for the engineAPI") rootCmd.PersistentFlags().StringSliceVar(&cfg.HttpCORSDomain, "http.corsdomain", []string{}, "Comma separated list of domains from which to accept cross origin requests (browser enforced)") rootCmd.PersistentFlags().StringSliceVar(&cfg.HttpVirtualHost, "http.vhosts", nodecfg.DefaultConfig.HTTPVirtualHosts, "Comma separated list of virtual hostnames from which to accept requests (server enforced). Accepts '*' wildcard.") rootCmd.PersistentFlags().BoolVar(&cfg.HttpCompression, "http.compression", true, "Disable http compression") - rootCmd.PersistentFlags().StringSliceVar(&cfg.API, "http.api", []string{"eth", "erigon", "engine"}, "API's offered over the HTTP-RPC interface: eth,engine,erigon,web3,net,debug,trace,txpool,db,starknet. Supported methods: https://github.com/ledgerwatch/erigon/tree/devel/cmd/rpcdaemon") + rootCmd.PersistentFlags().StringSliceVar(&cfg.API, "http.api", []string{"eth", "erigon"}, "API's offered over the HTTP-RPC interface: eth,erigon,web3,net,debug,trace,txpool,db. Supported methods: https://github.com/ledgerwatch/erigon/tree/devel/cmd/rpcdaemon") rootCmd.PersistentFlags().Uint64Var(&cfg.Gascap, "rpc.gascap", 50000000, "Sets a cap on gas that can be used in eth_call/estimateGas") rootCmd.PersistentFlags().Uint64Var(&cfg.MaxTraces, "trace.maxtraces", 200, "Sets a limit on traces that can be returned in trace_filter") rootCmd.PersistentFlags().BoolVar(&cfg.WebsocketEnabled, "ws", false, "Enable Websockets") @@ -83,25 +81,20 @@ func RootCommand() (*cobra.Command, *httpcfg.HttpCfg) { rootCmd.PersistentFlags().StringVar(&cfg.RpcAllowListFilePath, "rpc.accessList", "", "Specify granular (method-by-method) API allowlist") rootCmd.PersistentFlags().UintVar(&cfg.RpcBatchConcurrency, utils.RpcBatchConcurrencyFlag.Name, 2, utils.RpcBatchConcurrencyFlag.Usage) rootCmd.PersistentFlags().BoolVar(&cfg.RpcStreamingDisable, utils.RpcStreamingDisableFlag.Name, false, utils.RpcStreamingDisableFlag.Usage) - rootCmd.PersistentFlags().IntVar(&cfg.DBReadConcurrency, "db.read.concurrency", runtime.GOMAXPROCS(-1), "Does limit amount of parallel db reads") + rootCmd.PersistentFlags().IntVar(&cfg.DBReadConcurrency, utils.DBReadConcurrencyFlag.Name, utils.DBReadConcurrencyFlag.Value, utils.DBReadConcurrencyFlag.Usage) rootCmd.PersistentFlags().BoolVar(&cfg.TraceCompatibility, "trace.compat", false, "Bug for bug compatibility with OE for trace_ routines") rootCmd.PersistentFlags().StringVar(&cfg.TxPoolApiAddr, "txpool.api.addr", "", "txpool api network address, for example: 127.0.0.1:9090 (default: use value of --private.api.addr)") - rootCmd.PersistentFlags().BoolVar(&cfg.TevmEnabled, utils.TevmFlag.Name, false, utils.TevmFlag.Usage) rootCmd.PersistentFlags().BoolVar(&cfg.Sync.UseSnapshots, "snapshot", true, utils.SnapshotFlag.Usage) rootCmd.PersistentFlags().IntVar(&cfg.StateCache.KeysLimit, "state.cache", kvcache.DefaultCoherentConfig.KeysLimit, "Amount of keys to store in StateCache (enabled if no --datadir set). Set 0 to disable StateCache. 1_000_000 keys ~ equal to 2Gb RAM (maybe we will add RAM accounting in future versions).") rootCmd.PersistentFlags().BoolVar(&cfg.GRPCServerEnabled, "grpc", false, "Enable GRPC server") rootCmd.PersistentFlags().StringVar(&cfg.GRPCListenAddress, "grpc.addr", nodecfg.DefaultGRPCHost, "GRPC server listening interface") rootCmd.PersistentFlags().IntVar(&cfg.GRPCPort, "grpc.port", nodecfg.DefaultGRPCPort, "GRPC server listening port") rootCmd.PersistentFlags().BoolVar(&cfg.GRPCHealthCheckEnabled, "grpc.healthcheck", false, "Enable GRPC health check") - rootCmd.PersistentFlags().StringVar(&cfg.StarknetGRPCAddress, "starknet.grpc.address", "127.0.0.1:6066", "Starknet GRPC address") - rootCmd.PersistentFlags().StringVar(&cfg.JWTSecretPath, utils.JWTSecretPath.Name, utils.JWTSecretPath.Value, "Token to ensure safe connection between CL and EL") rootCmd.PersistentFlags().BoolVar(&cfg.TraceRequests, utils.HTTPTraceFlag.Name, false, "Trace HTTP requests with INFO level") rootCmd.PersistentFlags().DurationVar(&cfg.HTTPTimeouts.ReadTimeout, "http.timeouts.read", rpccfg.DefaultHTTPTimeouts.ReadTimeout, "Maximum duration for reading the entire request, including the body.") rootCmd.PersistentFlags().DurationVar(&cfg.HTTPTimeouts.WriteTimeout, "http.timeouts.write", rpccfg.DefaultHTTPTimeouts.WriteTimeout, "Maximum duration before timing out writes of the response. It is reset whenever a new request's header is read") rootCmd.PersistentFlags().DurationVar(&cfg.HTTPTimeouts.IdleTimeout, "http.timeouts.idle", rpccfg.DefaultHTTPTimeouts.IdleTimeout, "Maximum amount of time to wait for the next request when keep-alives are enabled. If http.timeouts.idle is zero, the value of http.timeouts.read is used") - rootCmd.PersistentFlags().DurationVar(&cfg.EngineTimeouts.ReadTimeout, "engine.timeouts.read", rpccfg.DefaultHTTPTimeouts.ReadTimeout, "Maximum duration for reading the entire request, including the body.") - rootCmd.PersistentFlags().DurationVar(&cfg.EngineTimeouts.WriteTimeout, "engine.timeouts.write", rpccfg.DefaultHTTPTimeouts.WriteTimeout, "Maximum duration before timing out writes of the response. It is reset whenever a new request's header is read.") - rootCmd.PersistentFlags().DurationVar(&cfg.EngineTimeouts.IdleTimeout, "engine.timeouts.idle", rpccfg.DefaultHTTPTimeouts.IdleTimeout, "Maximum amount of time to wait for the next request when keep-alives are enabled. If engine.timeouts.idle is zero, the value of engine.timeouts.read is used.") + rootCmd.PersistentFlags().DurationVar(&cfg.EvmCallTimeout, "rpc.evmtimeout", rpccfg.DefaultEvmCallTimeout, "Maximum amount of time to wait for the answer from EVM call.") if err := rootCmd.MarkPersistentFlagFilename("rpc.accessList", "json"); err != nil { panic(err) @@ -215,17 +208,22 @@ func checkDbCompatibility(ctx context.Context, db kv.RoDB) error { return nil } -func EmbeddedServices(ctx context.Context, erigonDB kv.RoDB, stateCacheCfg kvcache.CoherentConfig, blockReader services.FullBlockReader, snapshots remotedbserver.Snapsthots, ethBackendServer remote.ETHBACKENDServer, - txPoolServer txpool.TxpoolServer, miningServer txpool.MiningServer, -) ( - eth rpchelper.ApiBackend, txPool txpool.TxpoolClient, mining txpool.MiningClient, starknet *rpcservices.StarknetService, stateCache kvcache.Cache, ff *rpchelper.Filters, err error, -) { +func EmbeddedServices(ctx context.Context, + erigonDB kv.RoDB, stateCacheCfg kvcache.CoherentConfig, + blockReader services.FullBlockReader, snapshots *snapshotsync.RoSnapshots, agg *libstate.Aggregator22, + ethBackendServer remote.ETHBACKENDServer, txPoolServer txpool.TxpoolServer, miningServer txpool.MiningServer, +) (eth rpchelper.ApiBackend, txPool txpool.TxpoolClient, mining txpool.MiningClient, stateCache kvcache.Cache, ff *rpchelper.Filters, err error) { if stateCacheCfg.KeysLimit > 0 { - stateCache = kvcache.New(stateCacheCfg) + stateCache = kvcache.NewDummy() + // notification about new blocks (state stream) doesn't work now inside erigon - because + // erigon does send this stream to privateAPI (erigon with enabled rpc, still have enabled privateAPI). + // without this state stream kvcache can't work and only slow-down things + // + //stateCache = kvcache.New(stateCacheCfg) } else { stateCache = kvcache.NewDummy() } - kvRPC := remotedbserver.NewKvServer(ctx, erigonDB, snapshots) + kvRPC := remotedbserver.NewKvServer(ctx, erigonDB, snapshots, agg) stateDiffClient := direct.NewStateDiffClientDirect(kvRPC) subscribeToStateChangesLoop(ctx, stateDiffClient, stateCache) @@ -235,6 +233,7 @@ func EmbeddedServices(ctx context.Context, erigonDB kv.RoDB, stateCacheCfg kvcac txPool = direct.NewTxPoolClient(txPoolServer) mining = direct.NewMiningClient(miningServer) ff = rpchelper.New(ctx, eth, txPool, mining, func() {}) + return } @@ -243,12 +242,12 @@ func EmbeddedServices(ctx context.Context, erigonDB kv.RoDB, stateCacheCfg kvcac func RemoteServices(ctx context.Context, cfg httpcfg.HttpCfg, logger log.Logger, rootCancel context.CancelFunc) ( db kv.RoDB, borDb kv.RoDB, eth rpchelper.ApiBackend, txPool txpool.TxpoolClient, mining txpool.MiningClient, - starknet *rpcservices.StarknetService, stateCache kvcache.Cache, blockReader services.FullBlockReader, - ff *rpchelper.Filters, err error) { + ff *rpchelper.Filters, agg *libstate.Aggregator22, err error) { if !cfg.WithDatadir && cfg.PrivateApiAddr == "" { - return nil, nil, nil, nil, nil, nil, nil, nil, ff, fmt.Errorf("either remote db or local db must be specified") + return nil, nil, nil, nil, nil, nil, nil, ff, nil, fmt.Errorf("either remote db or local db must be specified") } + dir.MustExist(cfg.Dirs.SnapHistory) // Do not change the order of these checks. Chaindata needs to be checked first, because PrivateApiAddr has default value which is not "" // If PrivateApiAddr is checked first, the Chaindata option will never work @@ -258,10 +257,10 @@ func RemoteServices(ctx context.Context, cfg httpcfg.HttpCfg, logger log.Logger, limiter := semaphore.NewWeighted(int64(cfg.DBReadConcurrency)) rwKv, err = kv2.NewMDBX(logger).RoTxsLimiter(limiter).Path(cfg.Dirs.Chaindata).Readonly().Open() if err != nil { - return nil, nil, nil, nil, nil, nil, nil, nil, ff, err + return nil, nil, nil, nil, nil, nil, nil, ff, nil, err } if compatErr := checkDbCompatibility(ctx, rwKv); compatErr != nil { - return nil, nil, nil, nil, nil, nil, nil, nil, ff, compatErr + return nil, nil, nil, nil, nil, nil, nil, ff, nil, compatErr } db = rwKv stateCache = kvcache.NewDummy() @@ -274,20 +273,21 @@ func RemoteServices(ctx context.Context, cfg httpcfg.HttpCfg, logger log.Logger, // ensure db exist tmpDb, err := kv2.NewMDBX(logger).Path(borDbPath).Label(kv.ConsensusDB).Open() if err != nil { - return nil, nil, nil, nil, nil, nil, nil, nil, ff, err + return nil, nil, nil, nil, nil, nil, nil, ff, nil, err } tmpDb.Close() } log.Trace("Creating consensus db", "path", borDbPath) borKv, err = kv2.NewMDBX(logger).Path(borDbPath).Label(kv.ConsensusDB).Readonly().Open() if err != nil { - return nil, nil, nil, nil, nil, nil, nil, nil, ff, err + return nil, nil, nil, nil, nil, nil, nil, ff, nil, err } // Skip the compatibility check, until we have a schema in erigon-lib borDb = borKv } else { if cfg.StateCache.KeysLimit > 0 { - stateCache = kvcache.New(cfg.StateCache) + stateCache = kvcache.NewDummy() + //stateCache = kvcache.New(cfg.StateCache) } else { stateCache = kvcache.NewDummy() } @@ -314,50 +314,27 @@ func RemoteServices(ctx context.Context, cfg httpcfg.HttpCfg, logger log.Logger, } return nil }); err != nil { - return nil, nil, nil, nil, nil, nil, nil, nil, ff, err + return nil, nil, nil, nil, nil, nil, nil, ff, nil, err } if cc == nil { - return nil, nil, nil, nil, nil, nil, nil, nil, ff, fmt.Errorf("chain config not found in db. Need start erigon at least once on this db") + return nil, nil, nil, nil, nil, nil, nil, ff, nil, fmt.Errorf("chain config not found in db. Need start erigon at least once on this db") } cfg.Snap.Enabled = cfg.Snap.Enabled || cfg.Sync.UseSnapshots - - // if chain config has terminal total difficulty then rpc must have eth and engine APIs enableds - if cc.TerminalTotalDifficulty != nil { - hasEthApiEnabled := false - hasEngineApiEnabled := false - - for _, api := range cfg.API { - switch api { - case "eth": - hasEthApiEnabled = true - case "engine": - hasEngineApiEnabled = true - } - } - - if !hasEthApiEnabled { - cfg.API = append(cfg.API, "eth") - } - - if !hasEngineApiEnabled { - cfg.API = append(cfg.API, "engine") - } - } } creds, err := grpcutil.TLS(cfg.TLSCACert, cfg.TLSCertfile, cfg.TLSKeyFile) if err != nil { - return nil, nil, nil, nil, nil, nil, nil, nil, ff, fmt.Errorf("open tls cert: %w", err) + return nil, nil, nil, nil, nil, nil, nil, ff, nil, fmt.Errorf("open tls cert: %w", err) } conn, err := grpcutil.Connect(creds, cfg.PrivateApiAddr) if err != nil { - return nil, nil, nil, nil, nil, nil, nil, nil, ff, fmt.Errorf("could not connect to execution service privateApi: %w", err) + return nil, nil, nil, nil, nil, nil, nil, ff, nil, fmt.Errorf("could not connect to execution service privateApi: %w", err) } kvClient := remote.NewKVClient(conn) remoteKv, err := remotedb.NewRemote(gointerfaces.VersionFromProto(remotedbserver.KvServiceAPIVersion), logger, kvClient).Open() if err != nil { - return nil, nil, nil, nil, nil, nil, nil, nil, ff, fmt.Errorf("could not connect to remoteKv: %w", err) + return nil, nil, nil, nil, nil, nil, nil, ff, nil, fmt.Errorf("could not connect to remoteKv: %w", err) } subscribeToStateChangesLoop(ctx, kvClient, stateCache) @@ -365,7 +342,28 @@ func RemoteServices(ctx context.Context, cfg httpcfg.HttpCfg, logger log.Logger, onNewSnapshot := func() {} if cfg.WithDatadir { if cfg.Snap.Enabled { + allSnapshots := snapshotsync.NewRoSnapshots(cfg.Snap, cfg.Dirs.Snap) + // To povide good UX - immediatly can read snapshots after RPCDaemon start, even if Erigon is down + // Erigon does store list of snapshots in db: means RPCDaemon can read this list now, but read by `kvClient.Snapshots` after establish grpc connection + allSnapshots.OptimisticReopenWithDB(db) + allSnapshots.LogStat() + + if agg, err = libstate.NewAggregator22(cfg.Dirs.SnapHistory, ethconfig.HistoryV3AggregationStep); err != nil { + return nil, nil, nil, nil, nil, nil, nil, ff, nil, fmt.Errorf("create aggregator: %w", err) + } + if err = agg.ReopenFiles(); err != nil { + return nil, nil, nil, nil, nil, nil, nil, ff, nil, fmt.Errorf("create aggregator: %w", err) + } + + db.View(context.Background(), func(tx kv.Tx) error { + agg.LogStats(tx, func(endTxNumMinimax uint64) uint64 { + _, histBlockNumProgress, _ := rawdb.TxNums.FindBlockNum(tx, endTxNumMinimax) + return histBlockNumProgress + }) + return nil + }) + onNewSnapshot = func() { go func() { // don't block events processing by network communication reply, err := kvClient.Snapshots(ctx, &remote.SnapshotsRequest{}, grpc.WaitForReady(true)) @@ -373,14 +371,28 @@ func RemoteServices(ctx context.Context, cfg httpcfg.HttpCfg, logger log.Logger, log.Warn("[Snapshots] reopen", "err", err) return } - if err := allSnapshots.ReopenList(reply.Files, true); err != nil { + if err := allSnapshots.ReopenList(reply.BlockFiles, true); err != nil { log.Error("[Snapshots] reopen", "err", err) } else { allSnapshots.LogStat() } + + if err = agg.ReopenFiles(); err != nil { + log.Error("[Snapshots] reopen", "err", err) + } else { + db.View(context.Background(), func(tx kv.Tx) error { + agg.LogStats(tx, func(endTxNumMinimax uint64) uint64 { + _, histBlockNumProgress, _ := rawdb.TxNums.FindBlockNum(tx, endTxNumMinimax) + return histBlockNumProgress + }) + return nil + }) + } }() } onNewSnapshot() + // TODO: how to don't block startup on remote RPCDaemon? + // txNums = exec22.TxNumsFromDB(allSnapshots, db) blockReader = snapshotsync.NewBlockReaderWithSnapshots(allSnapshots) } else { log.Info("Use --snapshots=false") @@ -397,7 +409,7 @@ func RemoteServices(ctx context.Context, cfg httpcfg.HttpCfg, logger log.Logger, if cfg.TxPoolApiAddr != cfg.PrivateApiAddr { txpoolConn, err = grpcutil.Connect(creds, cfg.TxPoolApiAddr) if err != nil { - return nil, nil, nil, nil, nil, nil, nil, nil, ff, fmt.Errorf("could not connect to txpool api: %w", err) + return nil, nil, nil, nil, nil, nil, nil, ff, nil, fmt.Errorf("could not connect to txpool api: %w", err) } } @@ -424,28 +436,31 @@ func RemoteServices(ctx context.Context, cfg httpcfg.HttpCfg, logger log.Logger, } }() - if cfg.StarknetGRPCAddress != "" { - starknetConn, err := grpcutil.Connect(creds, cfg.StarknetGRPCAddress) + ff = rpchelper.New(ctx, eth, txPool, mining, onNewSnapshot) + return db, borDb, eth, txPool, mining, stateCache, blockReader, ff, agg, err +} + +func StartRpcServer(ctx context.Context, cfg httpcfg.HttpCfg, rpcAPI []rpc.API, authAPI []rpc.API) error { + if len(authAPI) > 0 { + engineInfo, err := startAuthenticatedRpcServer(cfg, authAPI) if err != nil { - return nil, nil, nil, nil, nil, nil, nil, nil, ff, fmt.Errorf("could not connect to starknet api: %w", err) + return err } - starknet = rpcservices.NewStarknetService(starknetConn) + go stopAuthenticatedRpcServer(ctx, engineInfo) } - ff = rpchelper.New(ctx, eth, txPool, mining, onNewSnapshot) + if cfg.Enabled { + return startRegularRpcServer(ctx, cfg, rpcAPI) + } - return db, borDb, eth, txPool, mining, starknet, stateCache, blockReader, ff, err + return nil } -func StartRpcServer(ctx context.Context, cfg httpcfg.HttpCfg, rpcAPI []rpc.API) error { - var engineListener *http.Server - var engineSrv *rpc.Server - var engineHttpEndpoint string - +func startRegularRpcServer(ctx context.Context, cfg httpcfg.HttpCfg, rpcAPI []rpc.API) error { // register apis and create handler stack httpEndpoint := fmt.Sprintf("%s:%d", cfg.HttpListenAddress, cfg.HttpPort) - fmt.Printf("TraceRequests = %t\n", cfg.TraceRequests) + log.Trace("TraceRequests = %t\n", cfg.TraceRequests) srv := rpc.NewServer(cfg.RpcBatchConcurrency, cfg.TraceRequests, cfg.RpcStreamingDisable) allowListForRPC, err := parseAllowListForRPC(cfg.RpcAllowListFilePath) @@ -455,22 +470,10 @@ func StartRpcServer(ctx context.Context, cfg httpcfg.HttpCfg, rpcAPI []rpc.API) srv.SetAllowList(allowListForRPC) var defaultAPIList []rpc.API - var engineAPI []rpc.API for _, api := range rpcAPI { if api.Namespace != "engine" { defaultAPIList = append(defaultAPIList, api) - } else { - engineAPI = append(engineAPI, api) - } - } - - if len(engineAPI) != 0 { - // eth API should also be exposed on the same port as engine API - for _, api := range rpcAPI { - if api.Namespace == "eth" { - engineAPI = append(engineAPI, api) - } } } @@ -503,13 +506,6 @@ func StartRpcServer(ctx context.Context, cfg httpcfg.HttpCfg, rpcAPI []rpc.API) info := []interface{}{"url", httpEndpoint, "ws", cfg.WebsocketEnabled, "ws.compression", cfg.WebsocketCompression, "grpc", cfg.GRPCServerEnabled} - if len(engineAPI) > 0 { - engineListener, engineSrv, engineHttpEndpoint, err = createEngineListener(cfg, engineAPI) - if err != nil { - return fmt.Errorf("could not start RPC api for engine: %w", err) - } - } - var ( healthServer *grpcHealth.Server grpcServer *grpc.Server @@ -534,19 +530,11 @@ func StartRpcServer(ctx context.Context, cfg httpcfg.HttpCfg, rpcAPI []rpc.API) defer func() { srv.Stop() - if engineSrv != nil { - engineSrv.Stop() - } shutdownCtx, cancel := context.WithTimeout(context.Background(), 5*time.Second) defer cancel() _ = listener.Shutdown(shutdownCtx) log.Info("HTTP endpoint closed", "url", httpEndpoint) - if engineListener != nil { - _ = engineListener.Shutdown(shutdownCtx) - log.Info("Engine HTTP endpoint close", "url", engineHttpEndpoint) - } - if cfg.GRPCServerEnabled { if cfg.GRPCHealthCheckEnabled { healthServer.Shutdown() @@ -561,9 +549,45 @@ func StartRpcServer(ctx context.Context, cfg httpcfg.HttpCfg, rpcAPI []rpc.API) return nil } +type engineInfo struct { + Srv *rpc.Server + EngineSrv *rpc.Server + EngineListener *http.Server + EngineHttpEndpoint string +} + +func startAuthenticatedRpcServer(cfg httpcfg.HttpCfg, rpcAPI []rpc.API) (*engineInfo, error) { + log.Trace("TraceRequests = %t\n", cfg.TraceRequests) + srv := rpc.NewServer(cfg.RpcBatchConcurrency, cfg.TraceRequests, cfg.RpcStreamingDisable) + + engineListener, engineSrv, engineHttpEndpoint, err := createEngineListener(cfg, rpcAPI) + if err != nil { + return nil, fmt.Errorf("could not start RPC api for engine: %w", err) + } + return &engineInfo{Srv: srv, EngineSrv: engineSrv, EngineListener: engineListener, EngineHttpEndpoint: engineHttpEndpoint}, nil +} + +func stopAuthenticatedRpcServer(ctx context.Context, engineInfo *engineInfo) { + defer func() { + engineInfo.Srv.Stop() + if engineInfo.EngineSrv != nil { + engineInfo.EngineSrv.Stop() + } + shutdownCtx, cancel := context.WithTimeout(context.Background(), 5*time.Second) + defer cancel() + + if engineInfo.EngineListener != nil { + _ = engineInfo.EngineListener.Shutdown(shutdownCtx) + log.Info("Engine HTTP endpoint close", "url", engineInfo.EngineHttpEndpoint) + } + }() + <-ctx.Done() + log.Info("Exiting Engine...") +} + // isWebsocket checks the header of a http request for a websocket upgrade request. func isWebsocket(r *http.Request) bool { - return strings.ToLower(r.Header.Get("Upgrade")) == "websocket" && + return strings.EqualFold(r.Header.Get("Upgrade"), "websocket") && strings.Contains(strings.ToLower(r.Header.Get("Connection")), "upgrade") } @@ -618,16 +642,10 @@ func createHandler(cfg httpcfg.HttpCfg, apiList []rpc.API, httpHandler http.Hand } func createEngineListener(cfg httpcfg.HttpCfg, engineApi []rpc.API) (*http.Server, *rpc.Server, string, error) { - engineHttpEndpoint := fmt.Sprintf("%s:%d", cfg.EngineHTTPListenAddress, cfg.EnginePort) + engineHttpEndpoint := fmt.Sprintf("%s:%d", cfg.AuthRpcHTTPListenAddress, cfg.AuthRpcPort) engineSrv := rpc.NewServer(cfg.RpcBatchConcurrency, cfg.TraceRequests, true) - allowListForRPC, err := parseAllowListForRPC(cfg.RpcAllowListFilePath) - if err != nil { - return nil, nil, "", err - } - engineSrv.SetAllowList(allowListForRPC) - if err := node.RegisterApisFromWhitelist(engineApi, nil, engineSrv, true); err != nil { return nil, nil, "", fmt.Errorf("could not start register RPC engine api: %w", err) } @@ -637,24 +655,21 @@ func createEngineListener(cfg httpcfg.HttpCfg, engineApi []rpc.API) (*http.Serve return nil, nil, "", err } - var wsHandler http.Handler - if cfg.WebsocketEnabled { - wsHandler = engineSrv.WebsocketHandler([]string{"*"}, jwtSecret, cfg.WebsocketCompression) - } + wsHandler := engineSrv.WebsocketHandler([]string{"*"}, jwtSecret, cfg.WebsocketCompression) - engineHttpHandler := node.NewHTTPHandlerStack(engineSrv, cfg.HttpCORSDomain, cfg.HttpVirtualHost, cfg.HttpCompression) + engineHttpHandler := node.NewHTTPHandlerStack(engineSrv, nil /* authCors */, cfg.AuthRpcVirtualHost, cfg.HttpCompression) engineApiHandler, err := createHandler(cfg, engineApi, engineHttpHandler, wsHandler, jwtSecret) if err != nil { return nil, nil, "", err } - engineListener, _, err := node.StartHTTPEndpoint(engineHttpEndpoint, cfg.EngineTimeouts, engineApiHandler) + engineListener, _, err := node.StartHTTPEndpoint(engineHttpEndpoint, cfg.AuthRpcTimeouts, engineApiHandler) if err != nil { return nil, nil, "", fmt.Errorf("could not start RPC api: %w", err) } - engineInfo := []interface{}{"url", engineHttpEndpoint, "ws", cfg.WebsocketEnabled} + engineInfo := []interface{}{"url", engineHttpEndpoint, "ws", true, "ws.compression", cfg.WebsocketCompression} log.Info("HTTP endpoint opened for Engine API", engineInfo...) return engineListener, engineSrv, engineHttpEndpoint, nil diff --git a/cmd/rpcdaemon/cli/httpcfg/http_cfg.go b/cmd/rpcdaemon/cli/httpcfg/http_cfg.go index db6cfd2004a..6d490d80e09 100644 --- a/cmd/rpcdaemon/cli/httpcfg/http_cfg.go +++ b/cmd/rpcdaemon/cli/httpcfg/http_cfg.go @@ -5,46 +5,48 @@ import ( "github.com/ledgerwatch/erigon/eth/ethconfig" "github.com/ledgerwatch/erigon/node/nodecfg/datadir" "github.com/ledgerwatch/erigon/rpc/rpccfg" + "time" ) type HttpCfg struct { - Enabled bool - PrivateApiAddr string - WithDatadir bool // Erigon's database can be read by separated processes on same machine - in read-only mode - with full support of transactions. It will share same "OS PageCache" with Erigon process. - DataDir string - Dirs datadir.Dirs - HttpListenAddress string - EngineHTTPListenAddress string - TLSCertfile string - TLSCACert string - TLSKeyFile string - HttpPort int - EnginePort int - HttpCORSDomain []string - HttpVirtualHost []string - HttpCompression bool - API []string - Gascap uint64 - MaxTraces uint64 - WebsocketEnabled bool - WebsocketCompression bool - RpcAllowListFilePath string - RpcBatchConcurrency uint - RpcStreamingDisable bool - DBReadConcurrency int - TraceCompatibility bool // Bug for bug compatibility for trace_ routines with OpenEthereum - TxPoolApiAddr string - TevmEnabled bool - StateCache kvcache.CoherentConfig - Snap ethconfig.Snapshot - Sync ethconfig.Sync - GRPCServerEnabled bool - GRPCListenAddress string - GRPCPort int - GRPCHealthCheckEnabled bool - StarknetGRPCAddress string - JWTSecretPath string // Engine API Authentication - TraceRequests bool // Always trace requests in INFO level - HTTPTimeouts rpccfg.HTTPTimeouts - EngineTimeouts rpccfg.HTTPTimeouts + Enabled bool + PrivateApiAddr string + WithDatadir bool // Erigon's database can be read by separated processes on same machine - in read-only mode - with full support of transactions. It will share same "OS PageCache" with Erigon process. + DataDir string + Dirs datadir.Dirs + HttpListenAddress string + AuthRpcHTTPListenAddress string + TLSCertfile string + TLSCACert string + TLSKeyFile string + HttpPort int + AuthRpcPort int + HttpCORSDomain []string + HttpVirtualHost []string + AuthRpcVirtualHost []string + HttpCompression bool + API []string + Gascap uint64 + MaxTraces uint64 + WebsocketEnabled bool + WebsocketCompression bool + RpcAllowListFilePath string + RpcBatchConcurrency uint + RpcStreamingDisable bool + DBReadConcurrency int + TraceCompatibility bool // Bug for bug compatibility for trace_ routines with OpenEthereum + TxPoolApiAddr string + StateCache kvcache.CoherentConfig + Snap ethconfig.Snapshot + Sync ethconfig.Sync + GRPCServerEnabled bool + GRPCListenAddress string + GRPCPort int + GRPCHealthCheckEnabled bool + StarknetGRPCAddress string + JWTSecretPath string // Engine API Authentication + TraceRequests bool // Always trace requests in INFO level + HTTPTimeouts rpccfg.HTTPTimeouts + AuthRpcTimeouts rpccfg.HTTPTimeouts + EvmCallTimeout time.Duration } diff --git a/cmd/rpcdaemon/commands/bor_helper.go b/cmd/rpcdaemon/commands/bor_helper.go index ef5cf774b4a..5c9ecf482df 100644 --- a/cmd/rpcdaemon/commands/bor_helper.go +++ b/cmd/rpcdaemon/commands/bor_helper.go @@ -131,7 +131,7 @@ func getUpdatedValidatorSet(oldValidatorSet *ValidatorSet, newVals []*bor.Valida v := oldValidatorSet oldVals := v.Validators - var changes []*bor.Validator + changes := make([]*bor.Validator, 0, len(oldVals)) for _, ov := range oldVals { if f, ok := validatorContains(newVals, ov); ok { ov.VotingPower = f.VotingPower diff --git a/cmd/rpcdaemon/commands/bor_snapshot.go b/cmd/rpcdaemon/commands/bor_snapshot.go index 2915afe9513..4e9f13aa31d 100644 --- a/cmd/rpcdaemon/commands/bor_snapshot.go +++ b/cmd/rpcdaemon/commands/bor_snapshot.go @@ -193,7 +193,7 @@ func (api *BorImpl) GetCurrentValidators() ([]*bor.Validator, error) { // GetRootHash returns the merkle root of the start to end block headers func (api *BorImpl) GetRootHash(start, end uint64) (string, error) { - length := uint64(end - start + 1) + length := end - start + 1 if length > bor.MaxCheckpointLength { return "", &bor.MaxCheckpointLengthExceededError{Start: start, End: end} } diff --git a/cmd/rpcdaemon/commands/call_traces_test.go b/cmd/rpcdaemon/commands/call_traces_test.go index 1146855aad4..1b2968f0181 100644 --- a/cmd/rpcdaemon/commands/call_traces_test.go +++ b/cmd/rpcdaemon/commands/call_traces_test.go @@ -10,6 +10,7 @@ import ( jsoniter "github.com/json-iterator/go" "github.com/ledgerwatch/erigon-lib/kv/kvcache" "github.com/ledgerwatch/erigon/cmd/rpcdaemon/cli/httpcfg" + "github.com/ledgerwatch/erigon/rpc/rpccfg" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "github.com/valyala/fastjson" @@ -23,6 +24,7 @@ import ( ) func blockNumbersFromTraces(t *testing.T, b []byte) []int { + t.Helper() var err error var p fastjson.Parser response := b @@ -34,7 +36,7 @@ func blockNumbersFromTraces(t *testing.T, b []byte) []int { if elems, err = v.Array(); err != nil { t.Fatalf("expected array in the response: %v", err) } - var numbers []int + numbers := make([]int, 0, len(elems)) for _, elem := range elems { bn := elem.GetInt("blockNumber") numbers = append(numbers, bn) @@ -44,15 +46,16 @@ func blockNumbersFromTraces(t *testing.T, b []byte) []int { func TestCallTraceOneByOne(t *testing.T) { m := stages.Mock(t) - defer m.DB.Close() chain, err := core.GenerateChain(m.ChainConfig, m.Genesis, m.Engine, m.DB, 10, func(i int, gen *core.BlockGen) { gen.SetCoinbase(common.Address{1}) }, false /* intermediateHashes */) if err != nil { t.Fatalf("generate chain: %v", err) } + + agg := m.HistoryV3Components() api := NewTraceAPI( - NewBaseApi(nil, kvcache.New(kvcache.DefaultCoherentConfig), snapshotsync.NewBlockReader(), false), + NewBaseApi(nil, kvcache.New(kvcache.DefaultCoherentConfig), snapshotsync.NewBlockReader(), agg, false, rpccfg.DefaultEvmCallTimeout), m.DB, &httpcfg.HttpCfg{}) // Insert blocks 1 by 1, to tirgget possible "off by one" errors for i := 0; i < chain.Length(); i++ { @@ -79,7 +82,6 @@ func TestCallTraceOneByOne(t *testing.T) { func TestCallTraceUnwind(t *testing.T) { m := stages.Mock(t) - defer m.DB.Close() var chainA, chainB *core.ChainPack var err error chainA, err = core.GenerateChain(m.ChainConfig, m.Genesis, m.Engine, m.DB, 10, func(i int, gen *core.BlockGen) { @@ -98,7 +100,9 @@ func TestCallTraceUnwind(t *testing.T) { if err != nil { t.Fatalf("generate chainB: %v", err) } - api := NewTraceAPI(NewBaseApi(nil, kvcache.New(kvcache.DefaultCoherentConfig), snapshotsync.NewBlockReader(), false), m.DB, &httpcfg.HttpCfg{}) + + agg := m.HistoryV3Components() + api := NewTraceAPI(NewBaseApi(nil, kvcache.New(kvcache.DefaultCoherentConfig), snapshotsync.NewBlockReader(), agg, false, rpccfg.DefaultEvmCallTimeout), m.DB, &httpcfg.HttpCfg{}) if err = m.InsertChain(chainA); err != nil { t.Fatalf("inserting chainA: %v", err) } @@ -116,8 +120,8 @@ func TestCallTraceUnwind(t *testing.T) { if err = api.Filter(context.Background(), traceReq1, stream); err != nil { t.Fatalf("trace_filter failed: %v", err) } - assert.Equal(t, []int{1, 2, 3, 4, 5, 6, 7, 8, 9, 10}, blockNumbersFromTraces(t, buf.Bytes())) + if err = m.InsertChain(chainB.Slice(0, 12)); err != nil { t.Fatalf("inserting chainB: %v", err) } @@ -132,6 +136,7 @@ func TestCallTraceUnwind(t *testing.T) { t.Fatalf("trace_filter failed: %v", err) } assert.Equal(t, []int{1, 2, 3, 4, 5, 11, 12}, blockNumbersFromTraces(t, buf.Bytes())) + if err = m.InsertChain(chainB.Slice(12, 20)); err != nil { t.Fatalf("inserting chainB: %v", err) } @@ -151,14 +156,17 @@ func TestCallTraceUnwind(t *testing.T) { func TestFilterNoAddresses(t *testing.T) { m := stages.Mock(t) - defer m.DB.Close() + if m.HistoryV3 { + t.Skip() + } chain, err := core.GenerateChain(m.ChainConfig, m.Genesis, m.Engine, m.DB, 10, func(i int, gen *core.BlockGen) { gen.SetCoinbase(common.Address{1}) }, false /* intermediateHashes */) if err != nil { t.Fatalf("generate chain: %v", err) } - api := NewTraceAPI(NewBaseApi(nil, kvcache.New(kvcache.DefaultCoherentConfig), snapshotsync.NewBlockReader(), false), m.DB, &httpcfg.HttpCfg{}) + agg := m.HistoryV3Components() + api := NewTraceAPI(NewBaseApi(nil, kvcache.New(kvcache.DefaultCoherentConfig), snapshotsync.NewBlockReader(), agg, false, rpccfg.DefaultEvmCallTimeout), m.DB, &httpcfg.HttpCfg{}) // Insert blocks 1 by 1, to tirgget possible "off by one" errors for i := 0; i < chain.Length(); i++ { if err = m.InsertChain(chain.Slice(i, i+1)); err != nil { @@ -182,9 +190,8 @@ func TestFilterNoAddresses(t *testing.T) { func TestFilterAddressIntersection(t *testing.T) { m := stages.Mock(t) - defer m.DB.Close() - - api := NewTraceAPI(NewBaseApi(nil, kvcache.New(kvcache.DefaultCoherentConfig), snapshotsync.NewBlockReader(), false), m.DB, &httpcfg.HttpCfg{}) + agg := m.HistoryV3Components() + api := NewTraceAPI(NewBaseApi(nil, kvcache.New(kvcache.DefaultCoherentConfig), snapshotsync.NewBlockReader(), agg, false, rpccfg.DefaultEvmCallTimeout), m.DB, &httpcfg.HttpCfg{}) toAddress1, toAddress2, other := common.Address{1}, common.Address{2}, common.Address{3} diff --git a/cmd/rpcdaemon/commands/corner_cases_support_test.go b/cmd/rpcdaemon/commands/corner_cases_support_test.go index 1ab11b66caa..f2871a51477 100644 --- a/cmd/rpcdaemon/commands/corner_cases_support_test.go +++ b/cmd/rpcdaemon/commands/corner_cases_support_test.go @@ -8,6 +8,7 @@ import ( "github.com/ledgerwatch/erigon/cmd/rpcdaemon/rpcdaemontest" "github.com/ledgerwatch/erigon/common" "github.com/ledgerwatch/erigon/rpc" + "github.com/ledgerwatch/erigon/rpc/rpccfg" "github.com/ledgerwatch/erigon/turbo/snapshotsync" "github.com/stretchr/testify/require" ) @@ -16,12 +17,12 @@ import ( // see https://github.com/ledgerwatch/erigon/issues/1645 func TestNotFoundMustReturnNil(t *testing.T) { require := require.New(t) - db := rpcdaemontest.CreateTestKV(t) - defer db.Close() + m, _, _ := rpcdaemontest.CreateTestSentry(t) + agg := m.HistoryV3Components() stateCache := kvcache.New(kvcache.DefaultCoherentConfig) api := NewEthAPI( - NewBaseApi(nil, stateCache, snapshotsync.NewBlockReader(), false), - db, nil, nil, nil, 5000000) + NewBaseApi(nil, stateCache, snapshotsync.NewBlockReader(), agg, false, rpccfg.DefaultEvmCallTimeout), + m.DB, nil, nil, nil, 5000000) ctx := context.Background() a, err := api.GetTransactionByBlockNumberAndIndex(ctx, 10_000, 1) diff --git a/cmd/rpcdaemon/commands/daemon.go b/cmd/rpcdaemon/commands/daemon.go index b77afa729b5..698b758def3 100644 --- a/cmd/rpcdaemon/commands/daemon.go +++ b/cmd/rpcdaemon/commands/daemon.go @@ -1,10 +1,10 @@ package commands import ( - "github.com/ledgerwatch/erigon-lib/gointerfaces/starknet" "github.com/ledgerwatch/erigon-lib/gointerfaces/txpool" "github.com/ledgerwatch/erigon-lib/kv" "github.com/ledgerwatch/erigon-lib/kv/kvcache" + libstate "github.com/ledgerwatch/erigon-lib/state" "github.com/ledgerwatch/erigon/cmd/rpcdaemon/cli/httpcfg" "github.com/ledgerwatch/erigon/rpc" "github.com/ledgerwatch/erigon/turbo/rpchelper" @@ -13,23 +13,18 @@ import ( // APIList describes the list of available RPC apis func APIList(db kv.RoDB, borDb kv.RoDB, eth rpchelper.ApiBackend, txPool txpool.TxpoolClient, mining txpool.MiningClient, - starknet starknet.CAIROVMClient, filters *rpchelper.Filters, stateCache kvcache.Cache, - blockReader services.FullBlockReader, cfg httpcfg.HttpCfg) (list []rpc.API) { + filters *rpchelper.Filters, stateCache kvcache.Cache, + blockReader services.FullBlockReader, agg *libstate.Aggregator22, cfg httpcfg.HttpCfg) (list []rpc.API) { - base := NewBaseApi(filters, stateCache, blockReader, cfg.WithDatadir) - if cfg.TevmEnabled { - base.EnableTevmExperiment() - } + base := NewBaseApi(filters, stateCache, blockReader, agg, cfg.WithDatadir, cfg.EvmCallTimeout) ethImpl := NewEthAPI(base, db, eth, txPool, mining, cfg.Gascap) erigonImpl := NewErigonAPI(base, db, eth) - starknetImpl := NewStarknetAPI(base, db, starknet, txPool) txpoolImpl := NewTxPoolAPI(base, db, txPool) netImpl := NewNetAPIImpl(eth) debugImpl := NewPrivateDebugAPI(base, db, cfg.Gascap) traceImpl := NewTraceAPI(base, db, &cfg) web3Impl := NewWeb3APIImpl(eth) dbImpl := NewDBAPIImpl() /* deprecated */ - engineImpl := NewEngineAPI(base, db, eth) adminImpl := NewAdminAPI(eth) parityImpl := NewParityAPIImpl(db) borImpl := NewBorAPI(base, db, borDb) // bor (consensus) specific @@ -92,20 +87,6 @@ func APIList(db kv.RoDB, borDb kv.RoDB, eth rpchelper.ApiBackend, txPool txpool. Service: ErigonAPI(erigonImpl), Version: "1.0", }) - case "starknet": - list = append(list, rpc.API{ - Namespace: "starknet", - Public: true, - Service: StarknetAPI(starknetImpl), - Version: "1.0", - }) - case "engine": - list = append(list, rpc.API{ - Namespace: "engine", - Public: true, - Service: EngineAPI(engineImpl), - Version: "1.0", - }) case "bor": list = append(list, rpc.API{ Namespace: "bor", @@ -132,3 +113,27 @@ func APIList(db kv.RoDB, borDb kv.RoDB, eth rpchelper.ApiBackend, txPool txpool. return list } + +func AuthAPIList(db kv.RoDB, eth rpchelper.ApiBackend, txPool txpool.TxpoolClient, mining txpool.MiningClient, + filters *rpchelper.Filters, stateCache kvcache.Cache, blockReader services.FullBlockReader, + agg *libstate.Aggregator22, + cfg httpcfg.HttpCfg) (list []rpc.API) { + base := NewBaseApi(filters, stateCache, blockReader, agg, cfg.WithDatadir, cfg.EvmCallTimeout) + + ethImpl := NewEthAPI(base, db, eth, txPool, mining, cfg.Gascap) + engineImpl := NewEngineAPI(base, db, eth) + + list = append(list, rpc.API{ + Namespace: "eth", + Public: true, + Service: EthAPI(ethImpl), + Version: "1.0", + }, rpc.API{ + Namespace: "engine", + Public: true, + Service: EngineAPI(engineImpl), + Version: "1.0", + }) + + return list +} diff --git a/cmd/rpcdaemon/commands/debug_api.go b/cmd/rpcdaemon/commands/debug_api.go index f692da238d5..a3c5debbd6c 100644 --- a/cmd/rpcdaemon/commands/debug_api.go +++ b/cmd/rpcdaemon/commands/debug_api.go @@ -15,7 +15,6 @@ import ( "github.com/ledgerwatch/erigon/core/types" "github.com/ledgerwatch/erigon/eth/stagedsync/stages" "github.com/ledgerwatch/erigon/eth/tracers" - "github.com/ledgerwatch/erigon/ethdb" "github.com/ledgerwatch/erigon/internal/ethapi" "github.com/ledgerwatch/erigon/rpc" "github.com/ledgerwatch/erigon/turbo/transactions" @@ -82,12 +81,7 @@ func (api *PrivateDebugAPIImpl) StorageRangeAt(ctx context.Context, blockHash co return h } - contractHasTEVM := func(contractHash common.Hash) (bool, error) { return false, nil } - if api.TevmEnabled { - contractHasTEVM = ethdb.GetHasTEVM(tx) - } - - _, _, _, _, stateReader, err := transactions.ComputeTxEnv(ctx, block, chainConfig, getHeader, contractHasTEVM, ethash.NewFaker(), tx, blockHash, txIndex) + _, _, _, _, stateReader, err := transactions.ComputeTxEnv(ctx, block, chainConfig, getHeader, ethash.NewFaker(), tx, blockHash, txIndex) if err != nil { return StorageRangeResult{}, err } @@ -249,11 +243,7 @@ func (api *PrivateDebugAPIImpl) AccountAt(ctx context.Context, blockHash common. getHeader := func(hash common.Hash, number uint64) *types.Header { return rawdb.ReadHeader(tx, hash, number) } - contractHasTEVM := func(contractHash common.Hash) (bool, error) { return false, nil } - if api.TevmEnabled { - contractHasTEVM = ethdb.GetHasTEVM(tx) - } - _, _, _, ibs, _, err := transactions.ComputeTxEnv(ctx, block, chainConfig, getHeader, contractHasTEVM, ethash.NewFaker(), tx, blockHash, txIndex) + _, _, _, ibs, _, err := transactions.ComputeTxEnv(ctx, block, chainConfig, getHeader, ethash.NewFaker(), tx, blockHash, txIndex) if err != nil { return nil, err } diff --git a/cmd/rpcdaemon/commands/debug_api_test.go b/cmd/rpcdaemon/commands/debug_api_test.go index 8cc7bfa0dcf..b35f873582f 100644 --- a/cmd/rpcdaemon/commands/debug_api_test.go +++ b/cmd/rpcdaemon/commands/debug_api_test.go @@ -13,6 +13,7 @@ import ( "github.com/ledgerwatch/erigon/eth/tracers" "github.com/ledgerwatch/erigon/internal/ethapi" "github.com/ledgerwatch/erigon/rpc" + "github.com/ledgerwatch/erigon/rpc/rpccfg" "github.com/ledgerwatch/erigon/turbo/snapshotsync" ) @@ -39,11 +40,12 @@ var debugTraceTransactionNoRefundTests = []struct { } func TestTraceBlockByNumber(t *testing.T) { - db := rpcdaemontest.CreateTestKV(t) + m, _, _ := rpcdaemontest.CreateTestSentry(t) + agg := m.HistoryV3Components() stateCache := kvcache.New(kvcache.DefaultCoherentConfig) - baseApi := NewBaseApi(nil, stateCache, snapshotsync.NewBlockReader(), false) - ethApi := NewEthAPI(baseApi, db, nil, nil, nil, 5000000) - api := NewPrivateDebugAPI(baseApi, db, 0) + baseApi := NewBaseApi(nil, stateCache, snapshotsync.NewBlockReader(), agg, false, rpccfg.DefaultEvmCallTimeout) + ethApi := NewEthAPI(baseApi, m.DB, nil, nil, nil, 5000000) + api := NewPrivateDebugAPI(baseApi, m.DB, 0) for _, tt := range debugTraceTransactionTests { var buf bytes.Buffer stream := jsoniter.NewStream(jsoniter.ConfigDefault, &buf, 4096) @@ -72,7 +74,7 @@ func TestTraceBlockByNumber(t *testing.T) { } var buf bytes.Buffer stream := jsoniter.NewStream(jsoniter.ConfigDefault, &buf, 4096) - err := api.TraceBlockByNumber(context.Background(), rpc.BlockNumber(rpc.LatestBlockNumber), &tracers.TraceConfig{}, stream) + err := api.TraceBlockByNumber(context.Background(), rpc.LatestBlockNumber, &tracers.TraceConfig{}, stream) if err != nil { t.Errorf("traceBlock %v: %v", rpc.LatestBlockNumber, err) } @@ -86,11 +88,12 @@ func TestTraceBlockByNumber(t *testing.T) { } func TestTraceBlockByHash(t *testing.T) { - db := rpcdaemontest.CreateTestKV(t) + m, _, _ := rpcdaemontest.CreateTestSentry(t) + agg := m.HistoryV3Components() stateCache := kvcache.New(kvcache.DefaultCoherentConfig) - baseApi := NewBaseApi(nil, stateCache, snapshotsync.NewBlockReader(), false) - ethApi := NewEthAPI(baseApi, db, nil, nil, nil, 5000000) - api := NewPrivateDebugAPI(baseApi, db, 0) + baseApi := NewBaseApi(nil, stateCache, snapshotsync.NewBlockReader(), agg, false, rpccfg.DefaultEvmCallTimeout) + ethApi := NewEthAPI(baseApi, m.DB, nil, nil, nil, 5000000) + api := NewPrivateDebugAPI(baseApi, m.DB, 0) for _, tt := range debugTraceTransactionTests { var buf bytes.Buffer stream := jsoniter.NewStream(jsoniter.ConfigDefault, &buf, 4096) @@ -120,11 +123,12 @@ func TestTraceBlockByHash(t *testing.T) { } func TestTraceTransaction(t *testing.T) { - db := rpcdaemontest.CreateTestKV(t) + m, _, _ := rpcdaemontest.CreateTestSentry(t) + agg := m.HistoryV3Components() stateCache := kvcache.New(kvcache.DefaultCoherentConfig) api := NewPrivateDebugAPI( - NewBaseApi(nil, stateCache, snapshotsync.NewBlockReader(), false), - db, 0) + NewBaseApi(nil, stateCache, snapshotsync.NewBlockReader(), agg, false, rpccfg.DefaultEvmCallTimeout), + m.DB, 0) for _, tt := range debugTraceTransactionTests { var buf bytes.Buffer stream := jsoniter.NewStream(jsoniter.ConfigDefault, &buf, 4096) @@ -152,11 +156,12 @@ func TestTraceTransaction(t *testing.T) { } func TestTraceTransactionNoRefund(t *testing.T) { - db := rpcdaemontest.CreateTestKV(t) + m, _, _ := rpcdaemontest.CreateTestSentry(t) + agg := m.HistoryV3Components() stateCache := kvcache.New(kvcache.DefaultCoherentConfig) api := NewPrivateDebugAPI( - NewBaseApi(nil, stateCache, snapshotsync.NewBlockReader(), false), - db, 0) + NewBaseApi(nil, stateCache, snapshotsync.NewBlockReader(), agg, false, rpccfg.DefaultEvmCallTimeout), + m.DB, 0) for _, tt := range debugTraceTransactionNoRefundTests { var buf bytes.Buffer stream := jsoniter.NewStream(jsoniter.ConfigDefault, &buf, 4096) diff --git a/cmd/rpcdaemon/commands/engine_api.go b/cmd/rpcdaemon/commands/engine_api.go index 706739813c1..5413620239a 100644 --- a/cmd/rpcdaemon/commands/engine_api.go +++ b/cmd/rpcdaemon/commands/engine_api.go @@ -158,14 +158,14 @@ func (e *EngineImpl) NewPayloadV1(ctx context.Context, payload *ExecutionPayload // Convert slice of hexutil.Bytes to a slice of slice of bytes transactions := make([][]byte, len(payload.Transactions)) for i, transaction := range payload.Transactions { - transactions[i] = ([]byte)(transaction) + transactions[i] = transaction } res, err := e.api.EngineNewPayloadV1(ctx, &types2.ExecutionPayload{ ParentHash: gointerfaces.ConvertHashToH256(payload.ParentHash), Coinbase: gointerfaces.ConvertAddressToH160(payload.FeeRecipient), StateRoot: gointerfaces.ConvertHashToH256(payload.StateRoot), ReceiptRoot: gointerfaces.ConvertHashToH256(payload.ReceiptsRoot), - LogsBloom: gointerfaces.ConvertBytesToH2048(([]byte)(payload.LogsBloom)), + LogsBloom: gointerfaces.ConvertBytesToH2048(payload.LogsBloom), PrevRandao: gointerfaces.ConvertHashToH256(payload.PrevRandao), BlockNumber: uint64(payload.BlockNumber), GasLimit: uint64(payload.GasLimit), @@ -266,23 +266,10 @@ func (e *EngineImpl) ExchangeTransitionConfigurationV1(ctx context.Context, beac return TransitionConfiguration{}, fmt.Errorf("the execution layer has a wrong terminal total difficulty. expected %v, but instead got: %d", beaconConfig.TerminalTotalDifficulty, terminalTotalDifficulty) } - if chainConfig.TerminalBlockHash != beaconConfig.TerminalBlockHash { - return TransitionConfiguration{}, fmt.Errorf("the execution layer has a wrong terminal block hash. expected %s, but instead got: %s", beaconConfig.TerminalBlockHash, chainConfig.TerminalBlockHash) - } - - terminalBlockNumber := chainConfig.TerminalBlockNumber - if terminalBlockNumber == nil { - terminalBlockNumber = common.Big0 - } - - if terminalBlockNumber.Cmp((*big.Int)(beaconConfig.TerminalBlockNumber)) != 0 { - return TransitionConfiguration{}, fmt.Errorf("the execution layer has a wrong terminal block number. expected %v, but instead got: %d", beaconConfig.TerminalBlockNumber, terminalBlockNumber) - } - return TransitionConfiguration{ TerminalTotalDifficulty: (*hexutil.Big)(terminalTotalDifficulty), - TerminalBlockHash: chainConfig.TerminalBlockHash, - TerminalBlockNumber: (*hexutil.Big)(terminalBlockNumber), + TerminalBlockHash: common.Hash{}, + TerminalBlockNumber: (*hexutil.Big)(common.Big0), }, nil } diff --git a/cmd/rpcdaemon/commands/erigon_api.go b/cmd/rpcdaemon/commands/erigon_api.go index 87781938642..ec35f04e8a7 100644 --- a/cmd/rpcdaemon/commands/erigon_api.go +++ b/cmd/rpcdaemon/commands/erigon_api.go @@ -3,6 +3,8 @@ package commands import ( "context" + ethFilters "github.com/ledgerwatch/erigon/eth/filters" + "github.com/ledgerwatch/erigon-lib/kv" "github.com/ledgerwatch/erigon/common" "github.com/ledgerwatch/erigon/common/hexutil" @@ -16,6 +18,7 @@ import ( type ErigonAPI interface { // System related (see ./erigon_system.go) Forks(ctx context.Context) (Forks, error) + BlockNumber(ctx context.Context, rpcBlockNumPtr *rpc.BlockNumber) (hexutil.Uint64, error) // Blocks related (see ./erigon_blocks.go) GetHeaderByNumber(ctx context.Context, number rpc.BlockNumber) (*types.Header, error) @@ -26,6 +29,7 @@ type ErigonAPI interface { // Receipt related (see ./erigon_receipts.go) GetLogsByHash(ctx context.Context, hash common.Hash) ([][]*types.Log, error) //GetLogsByNumber(ctx context.Context, number rpc.BlockNumber) ([][]*types.Log, error) + GetLogs(ctx context.Context, crit ethFilters.FilterCriteria) (types.ErigonLogs, error) // WatchTheBurn / reward related (see ./erigon_issuance.go) WatchTheBurn(ctx context.Context, blockNr rpc.BlockNumber) (Issuance, error) diff --git a/cmd/rpcdaemon/commands/erigon_block.go b/cmd/rpcdaemon/commands/erigon_block.go index e56cba2a1d3..b017e38a556 100644 --- a/cmd/rpcdaemon/commands/erigon_block.go +++ b/cmd/rpcdaemon/commands/erigon_block.go @@ -199,12 +199,15 @@ func (api *ErigonImpl) GetBalanceChangesInBlock(ctx context.Context, blockNrOrHa balancesMapping := make(map[common.Address]*hexutil.Big) - newReader, err := rpchelper.CreateStateReader(ctx, tx, blockNrOrHash, api.filters, api.stateCache) + newReader, err := rpchelper.CreateStateReader(ctx, tx, blockNrOrHash, api.filters, api.stateCache, api.historyV3(tx), api._agg) if err != nil { return nil, err } - for dbKey, dbValue, _ := c.Seek(startkey); bytes.Equal(dbKey, startkey) && dbKey != nil; dbKey, dbValue, _ = c.Next() { + for dbKey, dbValue, err := c.Seek(startkey); bytes.Equal(dbKey, startkey) && dbKey != nil; dbKey, dbValue, err = c.Next() { + if err != nil { + return nil, err + } _, addressBytes, v, err := decodeFn(dbKey, dbValue) if err != nil { return nil, err diff --git a/cmd/rpcdaemon/commands/erigon_issuance.go b/cmd/rpcdaemon/commands/erigon_issuance.go index 7aef3595e2a..10a0361524b 100644 --- a/cmd/rpcdaemon/commands/erigon_issuance.go +++ b/cmd/rpcdaemon/commands/erigon_issuance.go @@ -50,16 +50,15 @@ func (api *ErigonImpl) WatchTheBurn(ctx context.Context, blockNr rpc.BlockNumber // Clique for example has no issuance return Issuance{}, nil } - hash, err := rawdb.ReadCanonicalHash(tx, uint64(blockNr)) + header, err := api._blockReader.HeaderByNumber(ctx, tx, uint64(blockNr)) if err != nil { return Issuance{}, err } - header := rawdb.ReadHeader(tx, hash, uint64(blockNr)) - if header == nil { - return Issuance{}, fmt.Errorf("could not find block header") - } - body := rawdb.ReadCanonicalBodyWithTransactions(tx, hash, uint64(blockNr)) + body, _, err := api._blockReader.Body(ctx, tx, header.Hash(), uint64(blockNr)) + if err != nil { + return Issuance{}, err + } if body == nil { return Issuance{}, fmt.Errorf("could not find block body") @@ -102,7 +101,7 @@ func (api *ErigonImpl) WatchTheBurn(ctx context.Context, blockNr rpc.BlockNumber tips := big.NewInt(0) if header.BaseFee != nil { - receipts, err := rawdb.ReadReceiptsByHash(tx, hash) + receipts, err := rawdb.ReadReceiptsByHash(tx, header.Hash()) if err != nil { return Issuance{}, err } diff --git a/cmd/rpcdaemon/commands/erigon_receipts.go b/cmd/rpcdaemon/commands/erigon_receipts.go index cc9a6bae69f..8405b0a5ad0 100644 --- a/cmd/rpcdaemon/commands/erigon_receipts.go +++ b/cmd/rpcdaemon/commands/erigon_receipts.go @@ -1,11 +1,22 @@ package commands import ( + "bytes" "context" + "encoding/binary" "fmt" + "github.com/RoaringBitmap/roaring" + "github.com/ledgerwatch/erigon-lib/kv" "github.com/ledgerwatch/erigon/common" + "github.com/ledgerwatch/erigon/common/dbutils" + "github.com/ledgerwatch/erigon/core/rawdb" "github.com/ledgerwatch/erigon/core/types" + "github.com/ledgerwatch/erigon/eth/filters" + "github.com/ledgerwatch/erigon/ethdb/bitmapdb" + "github.com/ledgerwatch/erigon/ethdb/cbor" + "github.com/ledgerwatch/erigon/rpc" + "github.com/ledgerwatch/erigon/turbo/rpchelper" ) // GetLogsByHash implements erigon_getLogsByHash. Returns an array of arrays of logs generated by the transactions in the block given by the block's hash. @@ -40,6 +51,161 @@ func (api *ErigonImpl) GetLogsByHash(ctx context.Context, hash common.Hash) ([][ return logs, nil } +// GetLogs implements eth_getLogs. Returns an array of logs matching a given filter object. +func (api *ErigonImpl) GetLogs(ctx context.Context, crit filters.FilterCriteria) (types.ErigonLogs, error) { + var begin, end uint64 + erigonLogs := types.ErigonLogs{} + + tx, beginErr := api.db.BeginRo(ctx) + if beginErr != nil { + return erigonLogs, beginErr + } + defer tx.Rollback() + + if crit.BlockHash != nil { + number := rawdb.ReadHeaderNumber(tx, *crit.BlockHash) + if number == nil { + return nil, fmt.Errorf("block not found: %x", *crit.BlockHash) + } + begin = *number + end = *number + } else { + // Convert the RPC block numbers into internal representations + latest, err := rpchelper.GetLatestBlockNumber(tx) + if err != nil { + return nil, err + } + + begin = latest + if crit.FromBlock != nil { + if crit.FromBlock.Sign() >= 0 { + begin = crit.FromBlock.Uint64() + } else if !crit.FromBlock.IsInt64() || crit.FromBlock.Int64() != int64(rpc.LatestBlockNumber) { + return nil, fmt.Errorf("negative value for FromBlock: %v", crit.FromBlock) + } + } + end = latest + if crit.ToBlock != nil { + if crit.ToBlock.Sign() >= 0 { + end = crit.ToBlock.Uint64() + } else if !crit.ToBlock.IsInt64() || crit.ToBlock.Int64() != int64(rpc.LatestBlockNumber) { + return nil, fmt.Errorf("negative value for ToBlock: %v", crit.ToBlock) + } + } + } + if end < begin { + return nil, fmt.Errorf("end (%d) < begin (%d)", end, begin) + } + if end > roaring.MaxUint32 { + return nil, fmt.Errorf("end (%d) > MaxUint32", end) + } + blockNumbers := bitmapdb.NewBitmap() + defer bitmapdb.ReturnToPool(blockNumbers) + blockNumbers.AddRange(begin, end+1) // [min,max) + + topicsBitmap, err := getTopicsBitmap(tx, crit.Topics, uint32(begin), uint32(end)) + if err != nil { + return nil, err + } + if topicsBitmap != nil { + blockNumbers.And(topicsBitmap) + } + + rx := make([]*roaring.Bitmap, len(crit.Addresses)) + for idx, addr := range crit.Addresses { + m, err := bitmapdb.Get(tx, kv.LogAddressIndex, addr[:], uint32(begin), uint32(end)) + if err != nil { + return nil, err + } + rx[idx] = m + } + addrBitmap := roaring.FastOr(rx...) + + if len(rx) > 0 { + blockNumbers.And(addrBitmap) + } + + if blockNumbers.GetCardinality() == 0 { + return erigonLogs, nil + } + + iter := blockNumbers.Iterator() + for iter.HasNext() { + if err = ctx.Err(); err != nil { + return nil, err + } + + blockNumber := uint64(iter.Next()) + var logIndex uint + var txIndex uint + var blockLogs []*types.Log + err := tx.ForPrefix(kv.Log, dbutils.EncodeBlockNumber(blockNumber), func(k, v []byte) error { + var logs types.Logs + if err := cbor.Unmarshal(&logs, bytes.NewReader(v)); err != nil { + return fmt.Errorf("receipt unmarshal failed: %w", err) + } + for _, log := range logs { + log.Index = logIndex + logIndex++ + } + filtered := filterLogs(logs, crit.Addresses, crit.Topics) + if len(filtered) == 0 { + return nil + } + txIndex = uint(binary.BigEndian.Uint32(k[8:])) + for _, log := range filtered { + log.TxIndex = txIndex + } + blockLogs = append(blockLogs, filtered...) + + return nil + }) + if err != nil { + return erigonLogs, err + } + if len(blockLogs) == 0 { + continue + } + + header, err := api._blockReader.HeaderByNumber(ctx, tx, blockNumber) + if err != nil { + return nil, err + } + if header == nil { + return nil, fmt.Errorf("block header not found: %d", blockNumber) + } + timestamp := header.Time + + blockHash, err := rawdb.ReadCanonicalHash(tx, blockNumber) + if err != nil { + return nil, err + } + + body, err := api._blockReader.BodyWithTransactions(ctx, tx, blockHash, blockNumber) + if err != nil { + return nil, err + } + if body == nil { + return nil, fmt.Errorf("block not found %d", blockNumber) + } + for _, log := range blockLogs { + erigonLog := &types.ErigonLog{} + erigonLog.BlockNumber = blockNumber + erigonLog.BlockHash = blockHash + erigonLog.TxHash = body.Transactions[log.TxIndex].Hash() + erigonLog.Timestamp = timestamp + erigonLog.Address = log.Address + erigonLog.Topics = log.Topics + erigonLog.Data = log.Data + erigonLog.Index = log.Index + erigonLog.Removed = log.Removed + erigonLogs = append(erigonLogs, erigonLog) + } + } + + return erigonLogs, nil +} + // GetLogsByNumber implements erigon_getLogsByHash. Returns all the logs that appear in a block given the block's hash. // func (api *ErigonImpl) GetLogsByNumber(ctx context.Context, number rpc.BlockNumber) ([][]*types.Log, error) { // tx, err := api.db.Begin(ctx, false) diff --git a/cmd/rpcdaemon/commands/erigon_system.go b/cmd/rpcdaemon/commands/erigon_system.go index 67f4190fc3d..495055ec4b1 100644 --- a/cmd/rpcdaemon/commands/erigon_system.go +++ b/cmd/rpcdaemon/commands/erigon_system.go @@ -4,7 +4,10 @@ import ( "context" "github.com/ledgerwatch/erigon/common" + "github.com/ledgerwatch/erigon/common/hexutil" "github.com/ledgerwatch/erigon/core/forkid" + "github.com/ledgerwatch/erigon/rpc" + "github.com/ledgerwatch/erigon/turbo/rpchelper" ) // Forks is a data type to record a list of forks passed by this node @@ -29,3 +32,46 @@ func (api *ErigonImpl) Forks(ctx context.Context) (Forks, error) { return Forks{genesis.Hash(), forksBlocks}, nil } + +// Post the merge eth_blockNumber will return latest forkChoiceHead block number +// erigon_blockNumber will return latest executed block number or any block number requested +func (api *ErigonImpl) BlockNumber(ctx context.Context, rpcBlockNumPtr *rpc.BlockNumber) (hexutil.Uint64, error) { + tx, err := api.db.BeginRo(ctx) + if err != nil { + return 0, err + } + defer tx.Rollback() + + var rpcBlockNum rpc.BlockNumber + if rpcBlockNumPtr == nil { + rpcBlockNum = rpc.LatestExecutedBlockNumber + } + + var blockNum uint64 + switch rpcBlockNum { + case rpc.LatestBlockNumber: + blockNum, err = rpchelper.GetLatestBlockNumber(tx) + if err != nil { + return 0, err + } + case rpc.EarliestBlockNumber: + blockNum = 0 + case rpc.SafeBlockNumber: + blockNum, err = rpchelper.GetSafeBlockNumber(tx) + if err != nil { + return 0, err + } + case rpc.FinalizedBlockNumber: + blockNum, err = rpchelper.GetFinalizedBlockNumber(tx) + if err != nil { + return 0, err + } + default: + blockNum, err = rpchelper.GetLatestExecutedBlockNumber(tx) + if err != nil { + return 0, err + } + } + + return hexutil.Uint64(blockNum), nil +} diff --git a/cmd/rpcdaemon/commands/eth_accounts.go b/cmd/rpcdaemon/commands/eth_accounts.go index c55869d3999..62e8ad419a7 100644 --- a/cmd/rpcdaemon/commands/eth_accounts.go +++ b/cmd/rpcdaemon/commands/eth_accounts.go @@ -22,7 +22,7 @@ func (api *APIImpl) GetBalance(ctx context.Context, address common.Address, bloc return nil, fmt.Errorf("getBalance cannot open tx: %w", err1) } defer tx.Rollback() - reader, err := rpchelper.CreateStateReader(ctx, tx, blockNrOrHash, api.filters, api.stateCache) + reader, err := rpchelper.CreateStateReader(ctx, tx, blockNrOrHash, api.filters, api.stateCache, api.historyV3(tx), api._agg) if err != nil { return nil, err } @@ -58,7 +58,7 @@ func (api *APIImpl) GetTransactionCount(ctx context.Context, address common.Addr return nil, fmt.Errorf("getTransactionCount cannot open tx: %w", err1) } defer tx.Rollback() - reader, err := rpchelper.CreateStateReader(ctx, tx, blockNrOrHash, api.filters, api.stateCache) + reader, err := rpchelper.CreateStateReader(ctx, tx, blockNrOrHash, api.filters, api.stateCache, api.historyV3(tx), api._agg) if err != nil { return nil, err } @@ -77,7 +77,7 @@ func (api *APIImpl) GetCode(ctx context.Context, address common.Address, blockNr return nil, fmt.Errorf("getCode cannot open tx: %w", err1) } defer tx.Rollback() - reader, err := rpchelper.CreateStateReader(ctx, tx, blockNrOrHash, api.filters, api.stateCache) + reader, err := rpchelper.CreateStateReader(ctx, tx, blockNrOrHash, api.filters, api.stateCache, api.historyV3(tx), api._agg) if err != nil { return nil, err } @@ -103,7 +103,7 @@ func (api *APIImpl) GetStorageAt(ctx context.Context, address common.Address, in } defer tx.Rollback() - reader, err := rpchelper.CreateStateReader(ctx, tx, blockNrOrHash, api.filters, api.stateCache) + reader, err := rpchelper.CreateStateReader(ctx, tx, blockNrOrHash, api.filters, api.stateCache, api.historyV3(tx), api._agg) if err != nil { return hexutil.Encode(common.LeftPadBytes(empty, 32)), err } diff --git a/cmd/rpcdaemon/commands/eth_api.go b/cmd/rpcdaemon/commands/eth_api.go index e84ec4c1295..aa05c310c12 100644 --- a/cmd/rpcdaemon/commands/eth_api.go +++ b/cmd/rpcdaemon/commands/eth_api.go @@ -5,12 +5,14 @@ import ( "context" "math/big" "sync" + "time" lru "github.com/hashicorp/golang-lru" "github.com/holiman/uint256" "github.com/ledgerwatch/erigon-lib/gointerfaces/txpool" "github.com/ledgerwatch/erigon-lib/kv" "github.com/ledgerwatch/erigon-lib/kv/kvcache" + libstate "github.com/ledgerwatch/erigon-lib/state" "github.com/ledgerwatch/erigon/common" "github.com/ledgerwatch/erigon/common/hexutil" "github.com/ledgerwatch/erigon/common/math" @@ -23,6 +25,7 @@ import ( "github.com/ledgerwatch/erigon/rpc" "github.com/ledgerwatch/erigon/turbo/rpchelper" "github.com/ledgerwatch/erigon/turbo/services" + "github.com/ledgerwatch/log/v3" ) // EthAPI is a collection of functions that are exposed in the @@ -43,7 +46,7 @@ type EthAPI interface { // Receipt related (see ./eth_receipts.go) GetTransactionReceipt(ctx context.Context, hash common.Hash) (map[string]interface{}, error) - GetLogs(ctx context.Context, crit ethFilters.FilterCriteria) ([]*types.Log, error) + GetLogs(ctx context.Context, crit ethFilters.FilterCriteria) (types.Logs, error) GetBlockReceipts(ctx context.Context, number rpc.BlockNumber) ([]map[string]interface{}, error) // Uncle related (see ./eth_uncles.go) @@ -100,12 +103,17 @@ type BaseAPI struct { _genesis *types.Block _genesisLock sync.RWMutex + _historyV3 *bool + _historyV3Lock sync.RWMutex + _blockReader services.FullBlockReader _txnReader services.TxnReader - TevmEnabled bool // experiment + _agg *libstate.Aggregator22 + + evmCallTimeout time.Duration } -func NewBaseApi(f *rpchelper.Filters, stateCache kvcache.Cache, blockReader services.FullBlockReader, singleNodeMode bool) *BaseAPI { +func NewBaseApi(f *rpchelper.Filters, stateCache kvcache.Cache, blockReader services.FullBlockReader, agg *libstate.Aggregator22, singleNodeMode bool, evmCallTimeout time.Duration) *BaseAPI { blocksLRUSize := 128 // ~32Mb if !singleNodeMode { blocksLRUSize = 512 @@ -115,7 +123,7 @@ func NewBaseApi(f *rpchelper.Filters, stateCache kvcache.Cache, blockReader serv panic(err) } - return &BaseAPI{filters: f, stateCache: stateCache, blocksLRU: blocksLRU, _blockReader: blockReader, _txnReader: blockReader} + return &BaseAPI{filters: f, stateCache: stateCache, blocksLRU: blocksLRU, _blockReader: blockReader, _txnReader: blockReader, _agg: agg, evmCallTimeout: evmCallTimeout} } func (api *BaseAPI) chainConfig(tx kv.Tx) (*params.ChainConfig, error) { @@ -123,8 +131,6 @@ func (api *BaseAPI) chainConfig(tx kv.Tx) (*params.ChainConfig, error) { return cfg, err } -func (api *BaseAPI) EnableTevmExperiment() { api.TevmEnabled = true } - // nolint:unused func (api *BaseAPI) genesis(tx kv.Tx) (*types.Block, error) { _, genesis, err := api.chainConfigWithGenesis(tx) @@ -152,6 +158,7 @@ func (api *BaseAPI) blockByHashWithSenders(tx kv.Tx, hash common.Hash) (*types.B if number == nil { return nil, nil } + return api.blockWithSenders(tx, hash, *number) } func (api *BaseAPI) blockWithSenders(tx kv.Tx, hash common.Hash, number uint64) (*types.Block, error) { @@ -183,6 +190,25 @@ func (api *BaseAPI) blockWithSenders(tx kv.Tx, hash common.Hash, number uint64) return block, nil } +func (api *BaseAPI) historyV3(tx kv.Tx) bool { + api._historyV3Lock.RLock() + historyV3 := api._historyV3 + api._historyV3Lock.RUnlock() + + if historyV3 != nil { + return *historyV3 + } + enabled, err := rawdb.HistoryV3.Enabled(tx) + if err != nil { + log.Warn("HisoryV2Enabled: read", "err", err) + return false + } + api._historyV3Lock.Lock() + api._historyV3 = &enabled + api._historyV3Lock.Unlock() + return enabled +} + func (api *BaseAPI) chainConfigWithGenesis(tx kv.Tx) (*params.ChainConfig, *types.Block, error) { api._genesisLock.RLock() cc, genesisBlock := api._chainConfig, api._genesis diff --git a/cmd/rpcdaemon/commands/eth_api_test.go b/cmd/rpcdaemon/commands/eth_api_test.go index 043f620db7b..ad7a4831508 100644 --- a/cmd/rpcdaemon/commands/eth_api_test.go +++ b/cmd/rpcdaemon/commands/eth_api_test.go @@ -10,6 +10,7 @@ import ( "github.com/ledgerwatch/erigon/core" "github.com/ledgerwatch/erigon/internal/ethapi" "github.com/ledgerwatch/erigon/rpc" + "github.com/ledgerwatch/erigon/rpc/rpccfg" "github.com/ledgerwatch/erigon/turbo/snapshotsync" "github.com/stretchr/testify/assert" @@ -21,10 +22,11 @@ import ( func TestGetBalanceChangesInBlock(t *testing.T) { assert := assert.New(t) myBlockNum := rpc.BlockNumberOrHashWithNumber(0) - - db := rpcdaemontest.CreateTestKV(t) + m, _, _ := rpcdaemontest.CreateTestSentry(t) stateCache := kvcache.New(kvcache.DefaultCoherentConfig) - api := NewErigonAPI(NewBaseApi(nil, stateCache, snapshotsync.NewBlockReader(), false), db, nil) + db := m.DB + agg := m.HistoryV3Components() + api := NewErigonAPI(NewBaseApi(nil, stateCache, snapshotsync.NewBlockReader(), agg, false, rpccfg.DefaultEvmCallTimeout), db, nil) balances, err := api.GetBalanceChangesInBlock(context.Background(), myBlockNum) if err != nil { t.Errorf("calling GetBalanceChangesInBlock resulted in an error: %v", err) @@ -42,9 +44,11 @@ func TestGetBalanceChangesInBlock(t *testing.T) { } func TestGetTransactionReceipt(t *testing.T) { - db := rpcdaemontest.CreateTestKV(t) + m, _, _ := rpcdaemontest.CreateTestSentry(t) + db := m.DB + agg := m.HistoryV3Components() stateCache := kvcache.New(kvcache.DefaultCoherentConfig) - api := NewEthAPI(NewBaseApi(nil, stateCache, snapshotsync.NewBlockReader(), false), db, nil, nil, nil, 5000000) + api := NewEthAPI(NewBaseApi(nil, stateCache, snapshotsync.NewBlockReader(), agg, false, rpccfg.DefaultEvmCallTimeout), db, nil, nil, nil, 5000000) // Call GetTransactionReceipt for transaction which is not in the database if _, err := api.GetTransactionReceipt(context.Background(), common.Hash{}); err != nil { t.Errorf("calling GetTransactionReceipt with empty hash: %v", err) @@ -52,9 +56,10 @@ func TestGetTransactionReceipt(t *testing.T) { } func TestGetTransactionReceiptUnprotected(t *testing.T) { - db := rpcdaemontest.CreateTestKV(t) + m, _, _ := rpcdaemontest.CreateTestSentry(t) + agg := m.HistoryV3Components() stateCache := kvcache.New(kvcache.DefaultCoherentConfig) - api := NewEthAPI(NewBaseApi(nil, stateCache, snapshotsync.NewBlockReader(), false), db, nil, nil, nil, 5000000) + api := NewEthAPI(NewBaseApi(nil, stateCache, snapshotsync.NewBlockReader(), agg, false, rpccfg.DefaultEvmCallTimeout), m.DB, nil, nil, nil, 5000000) // Call GetTransactionReceipt for un-protected transaction if _, err := api.GetTransactionReceipt(context.Background(), common.HexToHash("0x3f3cb8a0e13ed2481f97f53f7095b9cbc78b6ffb779f2d3e565146371a8830ea")); err != nil { t.Errorf("calling GetTransactionReceipt for unprotected tx: %v", err) @@ -65,9 +70,10 @@ func TestGetTransactionReceiptUnprotected(t *testing.T) { func TestGetStorageAt_ByBlockNumber_WithRequireCanonicalDefault(t *testing.T) { assert := assert.New(t) - db := rpcdaemontest.CreateTestKV(t) + m, _, _ := rpcdaemontest.CreateTestSentry(t) + agg := m.HistoryV3Components() stateCache := kvcache.New(kvcache.DefaultCoherentConfig) - api := NewEthAPI(NewBaseApi(nil, stateCache, snapshotsync.NewBlockReader(), false), db, nil, nil, nil, 5000000) + api := NewEthAPI(NewBaseApi(nil, stateCache, snapshotsync.NewBlockReader(), agg, false, rpccfg.DefaultEvmCallTimeout), m.DB, nil, nil, nil, 5000000) addr := common.HexToAddress("0x71562b71999873db5b286df957af199ec94617f7") result, err := api.GetStorageAt(context.Background(), addr, "0x0", rpc.BlockNumberOrHashWithNumber(0)) @@ -81,9 +87,9 @@ func TestGetStorageAt_ByBlockNumber_WithRequireCanonicalDefault(t *testing.T) { func TestGetStorageAt_ByBlockHash_WithRequireCanonicalDefault(t *testing.T) { assert := assert.New(t) m, _, _ := rpcdaemontest.CreateTestSentry(t) - db := m.DB + agg := m.HistoryV3Components() stateCache := kvcache.New(kvcache.DefaultCoherentConfig) - api := NewEthAPI(NewBaseApi(nil, stateCache, snapshotsync.NewBlockReader(), false), db, nil, nil, nil, 5000000) + api := NewEthAPI(NewBaseApi(nil, stateCache, snapshotsync.NewBlockReader(), agg, false, rpccfg.DefaultEvmCallTimeout), m.DB, nil, nil, nil, 5000000) addr := common.HexToAddress("0x71562b71999873db5b286df957af199ec94617f7") result, err := api.GetStorageAt(context.Background(), addr, "0x0", rpc.BlockNumberOrHashWithHash(m.Genesis.Hash(), false)) @@ -97,9 +103,9 @@ func TestGetStorageAt_ByBlockHash_WithRequireCanonicalDefault(t *testing.T) { func TestGetStorageAt_ByBlockHash_WithRequireCanonicalTrue(t *testing.T) { assert := assert.New(t) m, _, _ := rpcdaemontest.CreateTestSentry(t) - db := m.DB + agg := m.HistoryV3Components() stateCache := kvcache.New(kvcache.DefaultCoherentConfig) - api := NewEthAPI(NewBaseApi(nil, stateCache, snapshotsync.NewBlockReader(), false), db, nil, nil, nil, 5000000) + api := NewEthAPI(NewBaseApi(nil, stateCache, snapshotsync.NewBlockReader(), agg, false, rpccfg.DefaultEvmCallTimeout), m.DB, nil, nil, nil, 5000000) addr := common.HexToAddress("0x71562b71999873db5b286df957af199ec94617f7") result, err := api.GetStorageAt(context.Background(), addr, "0x0", rpc.BlockNumberOrHashWithHash(m.Genesis.Hash(), true)) @@ -112,9 +118,9 @@ func TestGetStorageAt_ByBlockHash_WithRequireCanonicalTrue(t *testing.T) { func TestGetStorageAt_ByBlockHash_WithRequireCanonicalDefault_BlockNotFoundError(t *testing.T) { m, _, _ := rpcdaemontest.CreateTestSentry(t) - db := m.DB + agg := m.HistoryV3Components() stateCache := kvcache.New(kvcache.DefaultCoherentConfig) - api := NewEthAPI(NewBaseApi(nil, stateCache, snapshotsync.NewBlockReader(), false), db, nil, nil, nil, 5000000) + api := NewEthAPI(NewBaseApi(nil, stateCache, snapshotsync.NewBlockReader(), agg, false, rpccfg.DefaultEvmCallTimeout), m.DB, nil, nil, nil, 5000000) addr := common.HexToAddress("0x71562b71999873db5b286df957af199ec94617f7") offChain, err := core.GenerateChain(m.ChainConfig, m.Genesis, m.Engine, m.DB, 1, func(i int, block *core.BlockGen) { @@ -135,9 +141,9 @@ func TestGetStorageAt_ByBlockHash_WithRequireCanonicalDefault_BlockNotFoundError func TestGetStorageAt_ByBlockHash_WithRequireCanonicalTrue_BlockNotFoundError(t *testing.T) { m, _, _ := rpcdaemontest.CreateTestSentry(t) - db := m.DB + agg := m.HistoryV3Components() stateCache := kvcache.New(kvcache.DefaultCoherentConfig) - api := NewEthAPI(NewBaseApi(nil, stateCache, snapshotsync.NewBlockReader(), false), db, nil, nil, nil, 5000000) + api := NewEthAPI(NewBaseApi(nil, stateCache, snapshotsync.NewBlockReader(), agg, false, rpccfg.DefaultEvmCallTimeout), m.DB, nil, nil, nil, 5000000) addr := common.HexToAddress("0x71562b71999873db5b286df957af199ec94617f7") offChain, err := core.GenerateChain(m.ChainConfig, m.Genesis, m.Engine, m.DB, 1, func(i int, block *core.BlockGen) { @@ -159,9 +165,9 @@ func TestGetStorageAt_ByBlockHash_WithRequireCanonicalTrue_BlockNotFoundError(t func TestGetStorageAt_ByBlockHash_WithRequireCanonicalDefault_NonCanonicalBlock(t *testing.T) { assert := assert.New(t) m, _, orphanedChain := rpcdaemontest.CreateTestSentry(t) - db := m.DB + agg := m.HistoryV3Components() stateCache := kvcache.New(kvcache.DefaultCoherentConfig) - api := NewEthAPI(NewBaseApi(nil, stateCache, snapshotsync.NewBlockReader(), false), db, nil, nil, nil, 5000000) + api := NewEthAPI(NewBaseApi(nil, stateCache, snapshotsync.NewBlockReader(), agg, false, rpccfg.DefaultEvmCallTimeout), m.DB, nil, nil, nil, 5000000) addr := common.HexToAddress("0x71562b71999873db5b286df957af199ec94617f7") orphanedBlock := orphanedChain[0].Blocks[0] @@ -180,9 +186,9 @@ func TestGetStorageAt_ByBlockHash_WithRequireCanonicalDefault_NonCanonicalBlock( func TestGetStorageAt_ByBlockHash_WithRequireCanonicalTrue_NonCanonicalBlock(t *testing.T) { m, _, orphanedChain := rpcdaemontest.CreateTestSentry(t) - db := m.DB + agg := m.HistoryV3Components() stateCache := kvcache.New(kvcache.DefaultCoherentConfig) - api := NewEthAPI(NewBaseApi(nil, stateCache, snapshotsync.NewBlockReader(), false), db, nil, nil, nil, 5000000) + api := NewEthAPI(NewBaseApi(nil, stateCache, snapshotsync.NewBlockReader(), agg, false, rpccfg.DefaultEvmCallTimeout), m.DB, nil, nil, nil, 5000000) addr := common.HexToAddress("0x71562b71999873db5b286df957af199ec94617f7") orphanedBlock := orphanedChain[0].Blocks[0] @@ -198,9 +204,9 @@ func TestGetStorageAt_ByBlockHash_WithRequireCanonicalTrue_NonCanonicalBlock(t * func TestCall_ByBlockHash_WithRequireCanonicalDefault_NonCanonicalBlock(t *testing.T) { m, _, orphanedChain := rpcdaemontest.CreateTestSentry(t) - db := m.DB + agg := m.HistoryV3Components() stateCache := kvcache.New(kvcache.DefaultCoherentConfig) - api := NewEthAPI(NewBaseApi(nil, stateCache, snapshotsync.NewBlockReader(), false), db, nil, nil, nil, 5000000) + api := NewEthAPI(NewBaseApi(nil, stateCache, snapshotsync.NewBlockReader(), agg, false, rpccfg.DefaultEvmCallTimeout), m.DB, nil, nil, nil, 5000000) from := common.HexToAddress("0x71562b71999873db5b286df957af199ec94617f7") to := common.HexToAddress("0x0d3ab14bbad3d99f4203bd7a11acb94882050e7e") @@ -223,9 +229,9 @@ func TestCall_ByBlockHash_WithRequireCanonicalDefault_NonCanonicalBlock(t *testi func TestCall_ByBlockHash_WithRequireCanonicalTrue_NonCanonicalBlock(t *testing.T) { m, _, orphanedChain := rpcdaemontest.CreateTestSentry(t) - db := m.DB + agg := m.HistoryV3Components() stateCache := kvcache.New(kvcache.DefaultCoherentConfig) - api := NewEthAPI(NewBaseApi(nil, stateCache, snapshotsync.NewBlockReader(), false), db, nil, nil, nil, 5000000) + api := NewEthAPI(NewBaseApi(nil, stateCache, snapshotsync.NewBlockReader(), agg, false, rpccfg.DefaultEvmCallTimeout), m.DB, nil, nil, nil, 5000000) from := common.HexToAddress("0x71562b71999873db5b286df957af199ec94617f7") to := common.HexToAddress("0x0d3ab14bbad3d99f4203bd7a11acb94882050e7e") diff --git a/cmd/rpcdaemon/commands/eth_block.go b/cmd/rpcdaemon/commands/eth_block.go index a7610ede2c5..5efe9d38ebd 100644 --- a/cmd/rpcdaemon/commands/eth_block.go +++ b/cmd/rpcdaemon/commands/eth_block.go @@ -6,6 +6,7 @@ import ( "math/big" "time" + "github.com/ledgerwatch/erigon-lib/kv" "github.com/ledgerwatch/erigon/common" "github.com/ledgerwatch/erigon/common/hexutil" "github.com/ledgerwatch/erigon/common/math" @@ -14,7 +15,6 @@ import ( "github.com/ledgerwatch/erigon/core/state" "github.com/ledgerwatch/erigon/core/types" "github.com/ledgerwatch/erigon/core/vm" - "github.com/ledgerwatch/erigon/ethdb" "github.com/ledgerwatch/erigon/rpc" "github.com/ledgerwatch/erigon/turbo/adapter/ethapi" "github.com/ledgerwatch/erigon/turbo/rpchelper" @@ -114,12 +114,7 @@ func (api *APIImpl) CallBundle(ctx context.Context, txHashes []common.Hash, stat return nil, err } - contractHasTEVM := func(contractHash common.Hash) (bool, error) { return false, nil } - if api.TevmEnabled { - contractHasTEVM = ethdb.GetHasTEVM(tx) - } - - blockCtx, txCtx := transactions.GetEvmContext(firstMsg, header, stateBlockNumberOrHash.RequireCanonical, tx, contractHasTEVM, api._blockReader) + blockCtx, txCtx := transactions.GetEvmContext(firstMsg, header, stateBlockNumberOrHash.RequireCanonical, tx, api._blockReader) evm := vm.NewEVM(blockCtx, txCtx, st, chainConfig, vm.Config{Debug: false}) timeoutMilliSeconds := int64(5000) @@ -196,7 +191,7 @@ func (api *APIImpl) GetBlockByNumber(ctx context.Context, number rpc.BlockNumber return nil, err } defer tx.Rollback() - b, err := api.blockByRPCNumber(number, tx) + b, err := api.blockByNumber(ctx, number, tx) if err != nil { return nil, err } @@ -314,19 +309,22 @@ func (api *APIImpl) GetBlockTransactionCountByNumber(ctx context.Context, blockN n := hexutil.Uint(len(b.Transactions())) return &n, nil } - blockNum, _, _, err := rpchelper.GetBlockNumber(rpc.BlockNumberOrHashWithNumber(blockNr), tx, api.filters) + blockNum, blockHash, _, err := rpchelper.GetBlockNumber(rpc.BlockNumberOrHashWithNumber(blockNr), tx, api.filters) if err != nil { return nil, err } - body, _, txAmount, err := rawdb.ReadBodyByNumber(tx, blockNum) + _, txAmount, err := api._blockReader.Body(ctx, tx, blockHash, blockNum) if err != nil { return nil, err } - if body == nil { + + if txAmount == 0 { return nil, nil } - n := hexutil.Uint(txAmount) - return &n, nil + + numOfTx := hexutil.Uint(txAmount) + + return &numOfTx, nil } // GetBlockTransactionCountByHash implements eth_getBlockTransactionCountByHash. Returns the number of transactions in a block given the block's block hash. @@ -336,15 +334,42 @@ func (api *APIImpl) GetBlockTransactionCountByHash(ctx context.Context, blockHas return nil, err } defer tx.Rollback() - - num := rawdb.ReadHeaderNumber(tx, blockHash) - if num == nil { + blockNum, _, _, err := rpchelper.GetBlockNumber(rpc.BlockNumberOrHash{BlockHash: &blockHash}, tx, nil) + if err != nil { + // (Compatibility) Every other node just return `null` for when the block does not exist. + log.Debug("eth_getBlockTransactionCountByHash GetBlockNumber failed", "err", err) return nil, nil } - body, _, txAmount := rawdb.ReadBody(tx, blockHash, *num) - if body == nil { + _, txAmount, err := api._blockReader.Body(ctx, tx, blockHash, blockNum) + if err != nil { + return nil, err + } + + if txAmount == 0 { return nil, nil } - n := hexutil.Uint(txAmount) - return &n, nil + + numOfTx := hexutil.Uint(txAmount) + + return &numOfTx, nil +} + +func (api *APIImpl) blockByNumber(ctx context.Context, number rpc.BlockNumber, tx kv.Tx) (*types.Block, error) { + if number != rpc.PendingBlockNumber { + return api.blockByRPCNumber(number, tx) + } + + if block := api.pendingBlock(); block != nil { + return block, nil + } + + block, err := api.ethBackend.PendingBlock(ctx) + if err != nil { + return nil, err + } + if block != nil { + return block, nil + } + + return api.blockByRPCNumber(number, tx) } diff --git a/cmd/rpcdaemon/commands/eth_block_test.go b/cmd/rpcdaemon/commands/eth_block_test.go index e335d44cc0b..4253cdb1c54 100644 --- a/cmd/rpcdaemon/commands/eth_block_test.go +++ b/cmd/rpcdaemon/commands/eth_block_test.go @@ -14,6 +14,7 @@ import ( "github.com/ledgerwatch/erigon/core/types" "github.com/ledgerwatch/erigon/rlp" "github.com/ledgerwatch/erigon/rpc" + "github.com/ledgerwatch/erigon/rpc/rpccfg" "github.com/ledgerwatch/erigon/turbo/rpchelper" "github.com/ledgerwatch/erigon/turbo/snapshotsync" "github.com/ledgerwatch/erigon/turbo/stages" @@ -22,9 +23,10 @@ import ( // Gets the latest block number with the latest tag func TestGetBlockByNumberWithLatestTag(t *testing.T) { - db := rpcdaemontest.CreateTestKV(t) + m, _, _ := rpcdaemontest.CreateTestSentry(t) + agg := m.HistoryV3Components() stateCache := kvcache.New(kvcache.DefaultCoherentConfig) - api := NewEthAPI(NewBaseApi(nil, stateCache, snapshotsync.NewBlockReader(), false), db, nil, nil, nil, 5000000) + api := NewEthAPI(NewBaseApi(nil, stateCache, snapshotsync.NewBlockReader(), agg, false, rpccfg.DefaultEvmCallTimeout), m.DB, nil, nil, nil, 5000000) b, err := api.GetBlockByNumber(context.Background(), rpc.LatestBlockNumber, false) expected := common.HexToHash("0x6804117de2f3e6ee32953e78ced1db7b20214e0d8c745a03b8fecf7cc8ee76ef") if err != nil { @@ -34,10 +36,11 @@ func TestGetBlockByNumberWithLatestTag(t *testing.T) { } func TestGetBlockByNumberWithLatestTag_WithHeadHashInDb(t *testing.T) { - db := rpcdaemontest.CreateTestKV(t) + m, _, _ := rpcdaemontest.CreateTestSentry(t) + agg := m.HistoryV3Components() ctx := context.Background() stateCache := kvcache.New(kvcache.DefaultCoherentConfig) - tx, err := db.BeginRw(ctx) + tx, err := m.DB.BeginRw(ctx) if err != nil { t.Errorf("could not begin read write transaction: %s", err) } @@ -55,7 +58,7 @@ func TestGetBlockByNumberWithLatestTag_WithHeadHashInDb(t *testing.T) { } tx.Commit() - api := NewEthAPI(NewBaseApi(nil, stateCache, snapshotsync.NewBlockReader(), false), db, nil, nil, nil, 5000000) + api := NewEthAPI(NewBaseApi(nil, stateCache, snapshotsync.NewBlockReader(), agg, false, rpccfg.DefaultEvmCallTimeout), m.DB, nil, nil, nil, 5000000) block, err := api.GetBlockByNumber(ctx, rpc.LatestBlockNumber, false) if err != nil { t.Errorf("error retrieving block by number: %s", err) @@ -65,8 +68,8 @@ func TestGetBlockByNumberWithLatestTag_WithHeadHashInDb(t *testing.T) { } func TestGetBlockByNumberWithPendingTag(t *testing.T) { - db := rpcdaemontest.CreateTestKV(t) m := stages.MockWithTxPool(t) + agg := m.HistoryV3Components() stateCache := kvcache.New(kvcache.DefaultCoherentConfig) ctx, conn := rpcdaemontest.CreateTestGrpcConn(t, m) @@ -86,7 +89,7 @@ func TestGetBlockByNumberWithPendingTag(t *testing.T) { RplBlock: rlpBlock, }) - api := NewEthAPI(NewBaseApi(ff, stateCache, snapshotsync.NewBlockReader(), false), db, nil, nil, nil, 5000000) + api := NewEthAPI(NewBaseApi(ff, stateCache, snapshotsync.NewBlockReader(), agg, false, rpccfg.DefaultEvmCallTimeout), m.DB, nil, nil, nil, 5000000) b, err := api.GetBlockByNumber(context.Background(), rpc.PendingBlockNumber, false) if err != nil { t.Errorf("error getting block number with pending tag: %s", err) @@ -95,21 +98,22 @@ func TestGetBlockByNumberWithPendingTag(t *testing.T) { } func TestGetBlockByNumber_WithFinalizedTag_NoFinalizedBlockInDb(t *testing.T) { - db := rpcdaemontest.CreateTestKV(t) + m, _, _ := rpcdaemontest.CreateTestSentry(t) + agg := m.HistoryV3Components() ctx := context.Background() - stateCache := kvcache.New(kvcache.DefaultCoherentConfig) - api := NewEthAPI(NewBaseApi(nil, stateCache, snapshotsync.NewBlockReader(), false), db, nil, nil, nil, 5000000) + api := NewEthAPI(NewBaseApi(nil, stateCache, snapshotsync.NewBlockReader(), agg, false, rpccfg.DefaultEvmCallTimeout), m.DB, nil, nil, nil, 5000000) if _, err := api.GetBlockByNumber(ctx, rpc.FinalizedBlockNumber, false); err != nil { assert.ErrorIs(t, rpchelper.UnknownBlockError, err) } } func TestGetBlockByNumber_WithFinalizedTag_WithFinalizedBlockInDb(t *testing.T) { - db := rpcdaemontest.CreateTestKV(t) + m, _, _ := rpcdaemontest.CreateTestSentry(t) + agg := m.HistoryV3Components() ctx := context.Background() stateCache := kvcache.New(kvcache.DefaultCoherentConfig) - tx, err := db.BeginRw(ctx) + tx, err := m.DB.BeginRw(ctx) if err != nil { t.Errorf("could not begin read write transaction: %s", err) } @@ -127,7 +131,7 @@ func TestGetBlockByNumber_WithFinalizedTag_WithFinalizedBlockInDb(t *testing.T) } tx.Commit() - api := NewEthAPI(NewBaseApi(nil, stateCache, snapshotsync.NewBlockReader(), false), db, nil, nil, nil, 5000000) + api := NewEthAPI(NewBaseApi(nil, stateCache, snapshotsync.NewBlockReader(), agg, false, rpccfg.DefaultEvmCallTimeout), m.DB, nil, nil, nil, 5000000) block, err := api.GetBlockByNumber(ctx, rpc.FinalizedBlockNumber, false) if err != nil { t.Errorf("error retrieving block by number: %s", err) @@ -137,21 +141,22 @@ func TestGetBlockByNumber_WithFinalizedTag_WithFinalizedBlockInDb(t *testing.T) } func TestGetBlockByNumber_WithSafeTag_NoSafeBlockInDb(t *testing.T) { - db := rpcdaemontest.CreateTestKV(t) + m, _, _ := rpcdaemontest.CreateTestSentry(t) + agg := m.HistoryV3Components() ctx := context.Background() - stateCache := kvcache.New(kvcache.DefaultCoherentConfig) - api := NewEthAPI(NewBaseApi(nil, stateCache, snapshotsync.NewBlockReader(), false), db, nil, nil, nil, 5000000) + api := NewEthAPI(NewBaseApi(nil, stateCache, snapshotsync.NewBlockReader(), agg, false, rpccfg.DefaultEvmCallTimeout), m.DB, nil, nil, nil, 5000000) if _, err := api.GetBlockByNumber(ctx, rpc.SafeBlockNumber, false); err != nil { assert.ErrorIs(t, rpchelper.UnknownBlockError, err) } } func TestGetBlockByNumber_WithSafeTag_WithSafeBlockInDb(t *testing.T) { - db := rpcdaemontest.CreateTestKV(t) + m, _, _ := rpcdaemontest.CreateTestSentry(t) + agg := m.HistoryV3Components() ctx := context.Background() stateCache := kvcache.New(kvcache.DefaultCoherentConfig) - tx, err := db.BeginRw(ctx) + tx, err := m.DB.BeginRw(ctx) if err != nil { t.Errorf("could not begin read write transaction: %s", err) } @@ -169,7 +174,7 @@ func TestGetBlockByNumber_WithSafeTag_WithSafeBlockInDb(t *testing.T) { } tx.Commit() - api := NewEthAPI(NewBaseApi(nil, stateCache, snapshotsync.NewBlockReader(), false), db, nil, nil, nil, 5000000) + api := NewEthAPI(NewBaseApi(nil, stateCache, snapshotsync.NewBlockReader(), agg, false, rpccfg.DefaultEvmCallTimeout), m.DB, nil, nil, nil, 5000000) block, err := api.GetBlockByNumber(ctx, rpc.SafeBlockNumber, false) if err != nil { t.Errorf("error retrieving block by number: %s", err) @@ -177,3 +182,72 @@ func TestGetBlockByNumber_WithSafeTag_WithSafeBlockInDb(t *testing.T) { expectedHash := common.HexToHash("0x71b89b6ca7b65debfd2fbb01e4f07de7bba343e6617559fa81df19b605f84662") assert.Equal(t, expectedHash, block["hash"]) } + +func TestGetBlockTransactionCountByHash(t *testing.T) { + m, _, _ := rpcdaemontest.CreateTestSentry(t) + agg := m.HistoryV3Components() + ctx := context.Background() + stateCache := kvcache.New(kvcache.DefaultCoherentConfig) + + api := NewEthAPI(NewBaseApi(nil, stateCache, snapshotsync.NewBlockReader(), agg, false, rpccfg.DefaultEvmCallTimeout), m.DB, nil, nil, nil, 5000000) + blockHash := common.HexToHash("0x6804117de2f3e6ee32953e78ced1db7b20214e0d8c745a03b8fecf7cc8ee76ef") + + tx, err := m.DB.BeginRw(ctx) + if err != nil { + t.Errorf("could not begin read write transaction: %s", err) + } + header, err := rawdb.ReadHeaderByHash(tx, blockHash) + if err != nil { + tx.Rollback() + t.Errorf("failed reading block by hash: %s", err) + } + bodyWithTx, err := rawdb.ReadBodyWithTransactions(tx, blockHash, header.Number.Uint64()) + if err != nil { + tx.Rollback() + t.Errorf("failed getting body with transactions: %s", err) + } + tx.Rollback() + + expectedAmount := hexutil.Uint(len(bodyWithTx.Transactions)) + + txAmount, err := api.GetBlockTransactionCountByHash(ctx, blockHash) + if err != nil { + t.Errorf("failed getting the transaction count, err=%s", err) + } + + assert.Equal(t, expectedAmount, *txAmount) +} + +func TestGetBlockTransactionCountByNumber(t *testing.T) { + m, _, _ := rpcdaemontest.CreateTestSentry(t) + agg := m.HistoryV3Components() + ctx := context.Background() + stateCache := kvcache.New(kvcache.DefaultCoherentConfig) + api := NewEthAPI(NewBaseApi(nil, stateCache, snapshotsync.NewBlockReader(), agg, false, rpccfg.DefaultEvmCallTimeout), m.DB, nil, nil, nil, 5000000) + blockHash := common.HexToHash("0x6804117de2f3e6ee32953e78ced1db7b20214e0d8c745a03b8fecf7cc8ee76ef") + + tx, err := m.DB.BeginRw(ctx) + if err != nil { + t.Errorf("could not begin read write transaction: %s", err) + } + header, err := rawdb.ReadHeaderByHash(tx, blockHash) + if err != nil { + tx.Rollback() + t.Errorf("failed reading block by hash: %s", err) + } + bodyWithTx, err := rawdb.ReadBodyWithTransactions(tx, blockHash, header.Number.Uint64()) + if err != nil { + tx.Rollback() + t.Errorf("failed getting body with transactions: %s", err) + } + tx.Rollback() + + expectedAmount := hexutil.Uint(len(bodyWithTx.Transactions)) + + txAmount, err := api.GetBlockTransactionCountByNumber(ctx, rpc.BlockNumber(header.Number.Uint64())) + if err != nil { + t.Errorf("failed getting the transaction count, err=%s", err) + } + + assert.Equal(t, expectedAmount, *txAmount) +} diff --git a/cmd/rpcdaemon/commands/eth_call.go b/cmd/rpcdaemon/commands/eth_call.go index e32d7d814bb..d78b56f2ce3 100644 --- a/cmd/rpcdaemon/commands/eth_call.go +++ b/cmd/rpcdaemon/commands/eth_call.go @@ -10,6 +10,9 @@ import ( "github.com/ledgerwatch/erigon-lib/gointerfaces" txpool_proto "github.com/ledgerwatch/erigon-lib/gointerfaces/txpool" "github.com/ledgerwatch/erigon-lib/kv" + "github.com/ledgerwatch/log/v3" + "google.golang.org/grpc" + "github.com/ledgerwatch/erigon/common" "github.com/ledgerwatch/erigon/common/hexutil" "github.com/ledgerwatch/erigon/core" @@ -18,14 +21,11 @@ import ( "github.com/ledgerwatch/erigon/core/vm" "github.com/ledgerwatch/erigon/crypto" "github.com/ledgerwatch/erigon/eth/tracers/logger" - "github.com/ledgerwatch/erigon/ethdb" "github.com/ledgerwatch/erigon/internal/ethapi" "github.com/ledgerwatch/erigon/params" "github.com/ledgerwatch/erigon/rpc" "github.com/ledgerwatch/erigon/turbo/rpchelper" "github.com/ledgerwatch/erigon/turbo/transactions" - "github.com/ledgerwatch/log/v3" - "google.golang.org/grpc" ) // Call implements eth_call. Executes a new message call immediately without creating a transaction on the block chain. @@ -45,11 +45,6 @@ func (api *APIImpl) Call(ctx context.Context, args ethapi.CallArgs, blockNrOrHas args.Gas = (*hexutil.Uint64)(&api.GasCap) } - contractHasTEVM := func(contractHash common.Hash) (bool, error) { return false, nil } - if api.TevmEnabled { - contractHasTEVM = ethdb.GetHasTEVM(tx) - } - blockNumber, hash, _, err := rpchelper.GetCanonicalBlockNumber(blockNrOrHash, tx, api.filters) // DoCall cannot be executed on non-canonical blocks if err != nil { return nil, err @@ -62,7 +57,11 @@ func (api *APIImpl) Call(ctx context.Context, args ethapi.CallArgs, blockNrOrHas return nil, nil } - result, err := transactions.DoCall(ctx, args, tx, blockNrOrHash, block, overrides, api.GasCap, chainConfig, api.filters, api.stateCache, contractHasTEVM, api._blockReader) + stateReader, err := rpchelper.CreateStateReader(ctx, tx, blockNrOrHash, api.filters, api.stateCache, api.historyV3(tx), api._agg) + if err != nil { + return nil, err + } + result, err := transactions.DoCall(ctx, args, tx, blockNrOrHash, block, overrides, api.GasCap, chainConfig, stateReader, api._blockReader, api.evmCallTimeout) if err != nil { return nil, err } @@ -129,7 +128,20 @@ func (api *APIImpl) EstimateGas(ctx context.Context, argsOrNil *ethapi.CallArgs, return 0, err } if h == nil { - return 0, nil + // if a block number was supplied and there is no header return 0 + if blockNrOrHash != nil { + return 0, nil + } + + // block number not supplied, so we haven't found a pending block, read the latest block instead + bNrOrHash = rpc.BlockNumberOrHashWithNumber(rpc.LatestBlockNumber) + h, err = headerByNumberOrHash(ctx, dbtx, bNrOrHash, api) + if err != nil { + return 0, err + } + if h == nil { + return 0, nil + } } hi = h.GasLimit } @@ -191,11 +203,6 @@ func (api *APIImpl) EstimateGas(ctx context.Context, argsOrNil *ethapi.CallArgs, return 0, err } - contractHasTEVM := func(contractHash common.Hash) (bool, error) { return false, nil } - if api.TevmEnabled { - contractHasTEVM = ethdb.GetHasTEVM(dbtx) - } - // Create a helper to check if a gas allowance results in an executable transaction executable := func(gas uint64) (bool, *core.ExecutionResult, error) { args.Gas = (*hexutil.Uint64)(&gas) @@ -213,8 +220,12 @@ func (api *APIImpl) EstimateGas(ctx context.Context, argsOrNil *ethapi.CallArgs, return false, nil, nil } + stateReader, err := rpchelper.CreateStateReader(ctx, dbtx, numOrHash, api.filters, api.stateCache, api.historyV3(dbtx), api._agg) + if err != nil { + return false, nil, err + } result, err := transactions.DoCall(ctx, args, dbtx, numOrHash, block, nil, - api.GasCap, chainConfig, api.filters, api.stateCache, contractHasTEVM, api._blockReader) + api.GasCap, chainConfig, stateReader, api._blockReader, api.evmCallTimeout) if err != nil { if errors.Is(err, core.ErrIntrinsicGas) { // Special case, raise gas limit @@ -297,10 +308,6 @@ func (api *APIImpl) CreateAccessList(ctx context.Context, args ethapi.CallArgs, if err != nil { return nil, err } - contractHasTEVM := func(contractHash common.Hash) (bool, error) { return false, nil } - if api.TevmEnabled { - contractHasTEVM = ethdb.GetHasTEVM(tx) - } blockNumber, hash, latest, err := rpchelper.GetCanonicalBlockNumber(bNrOrHash, tx, api.filters) // DoCall cannot be executed on non-canonical blocks if err != nil { return nil, err @@ -371,8 +378,16 @@ func (api *APIImpl) CreateAccessList(ctx context.Context, args ethapi.CallArgs, } // Set the accesslist to the last al args.AccessList = &accessList - baseFee, _ := uint256.FromBig(header.BaseFee) - msg, err := args.ToMessage(api.GasCap, baseFee) + + var msg types.Message + + var baseFee *uint256.Int = nil + // check if EIP-1559 + if header.BaseFee != nil { + baseFee, _ = uint256.FromBig(header.BaseFee) + } + + msg, err = args.ToMessage(api.GasCap, baseFee) if err != nil { return nil, err } @@ -380,7 +395,7 @@ func (api *APIImpl) CreateAccessList(ctx context.Context, args ethapi.CallArgs, // Apply the transaction with the access list tracer tracer := logger.NewAccessListTracer(accessList, *args.From, to, precompiles) config := vm.Config{Tracer: tracer, Debug: true, NoBaseFee: true} - blockCtx, txCtx := transactions.GetEvmContext(msg, header, bNrOrHash.RequireCanonical, tx, contractHasTEVM, api._blockReader) + blockCtx, txCtx := transactions.GetEvmContext(msg, header, bNrOrHash.RequireCanonical, tx, api._blockReader) evm := vm.NewEVM(blockCtx, txCtx, state, chainConfig, config) gp := new(core.GasPool).AddGas(msg.Gas()) diff --git a/cmd/rpcdaemon/commands/eth_callMany.go b/cmd/rpcdaemon/commands/eth_callMany.go index 9de19308369..b445eb200e7 100644 --- a/cmd/rpcdaemon/commands/eth_callMany.go +++ b/cmd/rpcdaemon/commands/eth_callMany.go @@ -16,7 +16,6 @@ import ( "github.com/ledgerwatch/erigon/core/state" "github.com/ledgerwatch/erigon/core/types" "github.com/ledgerwatch/erigon/core/vm" - "github.com/ledgerwatch/erigon/ethdb" rpcapi "github.com/ledgerwatch/erigon/internal/ethapi" "github.com/ledgerwatch/erigon/rpc" "github.com/ledgerwatch/erigon/turbo/adapter/ethapi" @@ -131,7 +130,7 @@ func (api *APIImpl) CallMany(ctx context.Context, bundles []Bundle, simulateCont replayTransactions = block.Transactions()[:transactionIndex] - stateReader, err := rpchelper.CreateStateReader(ctx, tx, rpc.BlockNumberOrHashWithNumber(rpc.BlockNumber(blockNum-1)), api.filters, api.stateCache) + stateReader, err := rpchelper.CreateStateReader(ctx, tx, rpc.BlockNumberOrHashWithNumber(rpc.BlockNumber(blockNum-1)), api.filters, api.stateCache, api.historyV3(tx), api._agg) if err != nil { return nil, err @@ -149,12 +148,6 @@ func (api *APIImpl) CallMany(ctx context.Context, bundles []Bundle, simulateCont signer := types.MakeSigner(chainConfig, blockNum) rules := chainConfig.Rules(blockNum) - contractHasTEVM := func(contractHash common.Hash) (bool, error) { return false, nil } - - if api.TevmEnabled { - contractHasTEVM = ethdb.GetHasTEVM(tx) - } - getHash := func(i uint64) common.Hash { if hash, ok := overrideBlockHash[i]; ok { return hash @@ -171,16 +164,15 @@ func (api *APIImpl) CallMany(ctx context.Context, bundles []Bundle, simulateCont } blockCtx = vm.BlockContext{ - CanTransfer: core.CanTransfer, - Transfer: core.Transfer, - GetHash: getHash, - ContractHasTEVM: contractHasTEVM, - Coinbase: parent.Coinbase, - BlockNumber: parent.Number.Uint64(), - Time: parent.Time, - Difficulty: new(big.Int).Set(parent.Difficulty), - GasLimit: parent.GasLimit, - BaseFee: &baseFee, + CanTransfer: core.CanTransfer, + Transfer: core.Transfer, + GetHash: getHash, + Coinbase: parent.Coinbase, + BlockNumber: parent.Number.Uint64(), + Time: parent.Time, + Difficulty: new(big.Int).Set(parent.Difficulty), + GasLimit: parent.GasLimit, + BaseFee: &baseFee, } evm = vm.NewEVM(blockCtx, txCtx, st, chainConfig, vm.Config{Debug: false}) diff --git a/cmd/rpcdaemon/commands/eth_callMany_test.go b/cmd/rpcdaemon/commands/eth_callMany_test.go index 1dfa837ba72..eeb1b0be2f4 100644 --- a/cmd/rpcdaemon/commands/eth_callMany_test.go +++ b/cmd/rpcdaemon/commands/eth_callMany_test.go @@ -18,6 +18,7 @@ import ( "github.com/ledgerwatch/erigon/internal/ethapi" "github.com/ledgerwatch/erigon/params" "github.com/ledgerwatch/erigon/rpc" + "github.com/ledgerwatch/erigon/rpc/rpccfg" "github.com/ledgerwatch/erigon/turbo/snapshotsync" ) @@ -78,7 +79,7 @@ func TestCallMany(t *testing.T) { var secondNonce hexutil.Uint64 = 2 db := contractBackend.DB() - api := NewEthAPI(NewBaseApi(nil, stateCache, snapshotsync.NewBlockReader(), false), db, nil, nil, nil, 5000000) + api := NewEthAPI(NewBaseApi(nil, stateCache, snapshotsync.NewBlockReader(), contractBackend.Agg(), false, rpccfg.DefaultEvmCallTimeout), db, nil, nil, nil, 5000000) callArgAddr1 := ethapi.CallArgs{From: &address, To: &tokenAddr, Nonce: &nonce, MaxPriorityFeePerGas: (*hexutil.Big)(big.NewInt(1e9)), diff --git a/cmd/rpcdaemon/commands/eth_call_test.go b/cmd/rpcdaemon/commands/eth_call_test.go index 05478190a20..66fb96ef88c 100644 --- a/cmd/rpcdaemon/commands/eth_call_test.go +++ b/cmd/rpcdaemon/commands/eth_call_test.go @@ -7,6 +7,8 @@ import ( "testing" "time" + "github.com/ledgerwatch/erigon/rpc/rpccfg" + "github.com/holiman/uint256" "github.com/stretchr/testify/assert" @@ -31,12 +33,13 @@ import ( ) func TestEstimateGas(t *testing.T) { - db := rpcdaemontest.CreateTestKV(t) + m, _, _ := rpcdaemontest.CreateTestSentry(t) + agg := m.HistoryV3Components() stateCache := kvcache.New(kvcache.DefaultCoherentConfig) ctx, conn := rpcdaemontest.CreateTestGrpcConn(t, stages.Mock(t)) mining := txpool.NewMiningClient(conn) ff := rpchelper.New(ctx, nil, nil, mining, func() {}) - api := NewEthAPI(NewBaseApi(ff, stateCache, snapshotsync.NewBlockReader(), false), db, nil, nil, nil, 5000000) + api := NewEthAPI(NewBaseApi(ff, stateCache, snapshotsync.NewBlockReader(), agg, false, rpccfg.DefaultEvmCallTimeout), m.DB, nil, nil, nil, 5000000) var from = common.HexToAddress("0x71562b71999873db5b286df957af199ec94617f7") var to = common.HexToAddress("0x0d3ab14bbad3d99f4203bd7a11acb94882050e7e") if _, err := api.EstimateGas(context.Background(), ðapi.CallArgs{ @@ -48,9 +51,10 @@ func TestEstimateGas(t *testing.T) { } func TestEthCallNonCanonical(t *testing.T) { - db := rpcdaemontest.CreateTestKV(t) + m, _, _ := rpcdaemontest.CreateTestSentry(t) + agg := m.HistoryV3Components() stateCache := kvcache.New(kvcache.DefaultCoherentConfig) - api := NewEthAPI(NewBaseApi(nil, stateCache, snapshotsync.NewBlockReader(), false), db, nil, nil, nil, 5000000) + api := NewEthAPI(NewBaseApi(nil, stateCache, snapshotsync.NewBlockReader(), agg, false, rpccfg.DefaultEvmCallTimeout), m.DB, nil, nil, nil, 5000000) var from = common.HexToAddress("0x71562b71999873db5b286df957af199ec94617f7") var to = common.HexToAddress("0x0d3ab14bbad3d99f4203bd7a11acb94882050e7e") if _, err := api.Call(context.Background(), ethapi.CallArgs{ @@ -67,12 +71,14 @@ func TestEthCallToPrunedBlock(t *testing.T) { pruneTo := uint64(3) ethCallBlockNumber := rpc.BlockNumber(2) - db, bankAddress, contractAddress := chainWithDeployedContract(t) + m, bankAddress, contractAddress := chainWithDeployedContract(t) + + prune(t, m.DB, pruneTo) - prune(t, db, pruneTo) + agg := m.HistoryV3Components() stateCache := kvcache.New(kvcache.DefaultCoherentConfig) - api := NewEthAPI(NewBaseApi(nil, stateCache, snapshotsync.NewBlockReader(), false), db, nil, nil, nil, 5000000) + api := NewEthAPI(NewBaseApi(nil, stateCache, snapshotsync.NewBlockReader(), agg, false, rpccfg.DefaultEvmCallTimeout), m.DB, nil, nil, nil, 5000000) callData := hexutil.MustDecode("0x2e64cec1") callDataBytes := hexutil.Bytes(callData) @@ -88,16 +94,16 @@ func TestEthCallToPrunedBlock(t *testing.T) { func TestGetBlockByTimestampLatestTime(t *testing.T) { ctx := context.Background() - db := rpcdaemontest.CreateTestKV(t) - - tx, err := db.BeginRo(ctx) + m, _, _ := rpcdaemontest.CreateTestSentry(t) + agg := m.HistoryV3Components() + tx, err := m.DB.BeginRo(ctx) if err != nil { t.Errorf("fail at beginning tx") } defer tx.Rollback() stateCache := kvcache.New(kvcache.DefaultCoherentConfig) - api := NewErigonAPI(NewBaseApi(nil, stateCache, snapshotsync.NewBlockReader(), false), db, nil) + api := NewErigonAPI(NewBaseApi(nil, stateCache, snapshotsync.NewBlockReader(), agg, false, rpccfg.DefaultEvmCallTimeout), m.DB, nil) latestBlock := rawdb.ReadCurrentBlock(tx) response, err := ethapi.RPCMarshalBlock(latestBlock, true, false) @@ -125,16 +131,16 @@ func TestGetBlockByTimestampLatestTime(t *testing.T) { func TestGetBlockByTimestampOldestTime(t *testing.T) { ctx := context.Background() - db := rpcdaemontest.CreateTestKV(t) - - tx, err := db.BeginRo(ctx) + m, _, _ := rpcdaemontest.CreateTestSentry(t) + agg := m.HistoryV3Components() + tx, err := m.DB.BeginRo(ctx) if err != nil { t.Errorf("failed at beginning tx") } defer tx.Rollback() stateCache := kvcache.New(kvcache.DefaultCoherentConfig) - api := NewErigonAPI(NewBaseApi(nil, stateCache, snapshotsync.NewBlockReader(), false), db, nil) + api := NewErigonAPI(NewBaseApi(nil, stateCache, snapshotsync.NewBlockReader(), agg, false, rpccfg.DefaultEvmCallTimeout), m.DB, nil) oldestBlock, err := rawdb.ReadBlockByNumber(tx, 0) if err != nil { @@ -166,16 +172,16 @@ func TestGetBlockByTimestampOldestTime(t *testing.T) { func TestGetBlockByTimeHigherThanLatestBlock(t *testing.T) { ctx := context.Background() - db := rpcdaemontest.CreateTestKV(t) - - tx, err := db.BeginRo(ctx) + m, _, _ := rpcdaemontest.CreateTestSentry(t) + agg := m.HistoryV3Components() + tx, err := m.DB.BeginRo(ctx) if err != nil { t.Errorf("fail at beginning tx") } defer tx.Rollback() stateCache := kvcache.New(kvcache.DefaultCoherentConfig) - api := NewErigonAPI(NewBaseApi(nil, stateCache, snapshotsync.NewBlockReader(), false), db, nil) + api := NewErigonAPI(NewBaseApi(nil, stateCache, snapshotsync.NewBlockReader(), agg, false, rpccfg.DefaultEvmCallTimeout), m.DB, nil) latestBlock := rawdb.ReadCurrentBlock(tx) @@ -204,21 +210,21 @@ func TestGetBlockByTimeHigherThanLatestBlock(t *testing.T) { func TestGetBlockByTimeMiddle(t *testing.T) { ctx := context.Background() - db := rpcdaemontest.CreateTestKV(t) - - tx, err := db.BeginRo(ctx) + m, _, _ := rpcdaemontest.CreateTestSentry(t) + agg := m.HistoryV3Components() + tx, err := m.DB.BeginRo(ctx) if err != nil { t.Errorf("fail at beginning tx") } defer tx.Rollback() stateCache := kvcache.New(kvcache.DefaultCoherentConfig) - api := NewErigonAPI(NewBaseApi(nil, stateCache, snapshotsync.NewBlockReader(), false), db, nil) + api := NewErigonAPI(NewBaseApi(nil, stateCache, snapshotsync.NewBlockReader(), agg, false, rpccfg.DefaultEvmCallTimeout), m.DB, nil) currentHeader := rawdb.ReadCurrentHeader(tx) oldestHeader, err := api._blockReader.HeaderByNumber(ctx, tx, 0) if err != nil { - t.Errorf("error getting oldest header %s", err) + t.Errorf("error getting the oldest header %s", err) } if oldestHeader == nil { t.Error("couldn't find oldest header") @@ -255,16 +261,16 @@ func TestGetBlockByTimeMiddle(t *testing.T) { func TestGetBlockByTimestamp(t *testing.T) { ctx := context.Background() - db := rpcdaemontest.CreateTestKV(t) - - tx, err := db.BeginRo(ctx) + m, _, _ := rpcdaemontest.CreateTestSentry(t) + agg := m.HistoryV3Components() + tx, err := m.DB.BeginRo(ctx) if err != nil { t.Errorf("fail at beginning tx") } defer tx.Rollback() stateCache := kvcache.New(kvcache.DefaultCoherentConfig) - api := NewErigonAPI(NewBaseApi(nil, stateCache, snapshotsync.NewBlockReader(), false), db, nil) + api := NewErigonAPI(NewBaseApi(nil, stateCache, snapshotsync.NewBlockReader(), agg, false, rpccfg.DefaultEvmCallTimeout), m.DB, nil) highestBlockNumber := rawdb.ReadCurrentHeader(tx).Number pickedBlock, err := rawdb.ReadBlockByNumber(tx, highestBlockNumber.Uint64()/3) @@ -298,7 +304,7 @@ func TestGetBlockByTimestamp(t *testing.T) { } } -func chainWithDeployedContract(t *testing.T) (kv.RwDB, common.Address, common.Address) { +func chainWithDeployedContract(t *testing.T) (*stages.MockSentry, common.Address, common.Address) { var ( signer = types.LatestSignerForChainID(nil) bankKey, _ = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291") @@ -350,7 +356,7 @@ func chainWithDeployedContract(t *testing.T) (kv.RwDB, common.Address, common.Ad assert.NoError(t, err) assert.True(t, st.Exist(contractAddr), "Contract should exist at block #2") - return db, bankAddress, contractAddr + return m, bankAddress, contractAddr } func prune(t *testing.T, db kv.RwDB, pruneTo uint64) { diff --git a/cmd/rpcdaemon/commands/eth_filters.go b/cmd/rpcdaemon/commands/eth_filters.go index 9ec2a36c3d5..9d87411b0f8 100644 --- a/cmd/rpcdaemon/commands/eth_filters.go +++ b/cmd/rpcdaemon/commands/eth_filters.go @@ -20,14 +20,8 @@ func (api *APIImpl) NewPendingTransactionFilter(_ context.Context) (string, erro txsCh := make(chan []types.Transaction, 1) id := api.filters.SubscribePendingTxs(txsCh) go func() { - for { - select { - case txs, ok := <-txsCh: - if !ok { - return - } - api.filters.AddPendingTxs(id, txs) - } + for txs := range txsCh { + api.filters.AddPendingTxs(id, txs) } }() return "0x" + string(id), nil @@ -41,14 +35,8 @@ func (api *APIImpl) NewBlockFilter(_ context.Context) (string, error) { ch := make(chan *types.Header, 1) id := api.filters.SubscribeNewHeads(ch) go func() { - for { - select { - case block, ok := <-ch: - if !ok { - return - } - api.filters.AddPendingBlock(id, block) - } + for block := range ch { + api.filters.AddPendingBlock(id, block) } }() return "0x" + string(id), nil @@ -62,14 +50,8 @@ func (api *APIImpl) NewFilter(_ context.Context, crit filters.FilterCriteria) (s logs := make(chan *types.Log, 1) id := api.filters.SubscribeLogs(logs, crit) go func() { - for { - select { - case lg, ok := <-logs: - if !ok { - return - } - api.filters.AddLogs(id, lg) - } + for lg := range logs { + api.filters.AddLogs(id, lg) } }() return hexutil.EncodeUint64(uint64(id)), nil @@ -115,8 +97,8 @@ func (api *APIImpl) GetFilterChanges(_ context.Context, index string) ([]interfa return stub, nil } if txs, ok := api.filters.ReadPendingTxs(rpchelper.PendingTxsSubID(cutIndex)); ok { - for _, v := range txs { - for _, tx := range v { + if len(txs) > 0 { + for _, tx := range txs[0] { stub = append(stub, tx.Hash()) } return stub, nil diff --git a/cmd/rpcdaemon/commands/eth_filters_test.go b/cmd/rpcdaemon/commands/eth_filters_test.go index ca4366bb99f..49b25ccc792 100644 --- a/cmd/rpcdaemon/commands/eth_filters_test.go +++ b/cmd/rpcdaemon/commands/eth_filters_test.go @@ -1,26 +1,35 @@ package commands import ( + "math/rand" + "sync" "testing" + "time" + + "github.com/ledgerwatch/erigon/rpc/rpccfg" "github.com/ledgerwatch/erigon-lib/gointerfaces/txpool" "github.com/ledgerwatch/erigon-lib/kv/kvcache" + "github.com/stretchr/testify/assert" + "github.com/ledgerwatch/erigon/cmd/rpcdaemon/rpcdaemontest" + "github.com/ledgerwatch/erigon/common" + "github.com/ledgerwatch/erigon/core/types" "github.com/ledgerwatch/erigon/eth/filters" "github.com/ledgerwatch/erigon/turbo/rpchelper" "github.com/ledgerwatch/erigon/turbo/snapshotsync" "github.com/ledgerwatch/erigon/turbo/stages" - "github.com/stretchr/testify/assert" ) func TestNewFilters(t *testing.T) { assert := assert.New(t) - db := rpcdaemontest.CreateTestKV(t) + m, _, _ := rpcdaemontest.CreateTestSentry(t) + agg := m.HistoryV3Components() stateCache := kvcache.New(kvcache.DefaultCoherentConfig) ctx, conn := rpcdaemontest.CreateTestGrpcConn(t, stages.Mock(t)) mining := txpool.NewMiningClient(conn) ff := rpchelper.New(ctx, nil, nil, mining, func() {}) - api := NewEthAPI(NewBaseApi(ff, stateCache, snapshotsync.NewBlockReader(), false), db, nil, nil, nil, 5000000) + api := NewEthAPI(NewBaseApi(ff, stateCache, snapshotsync.NewBlockReader(), agg, false, rpccfg.DefaultEvmCallTimeout), m.DB, nil, nil, nil, 5000000) ptf, err := api.NewPendingTransactionFilter(ctx) assert.Nil(err) @@ -43,3 +52,50 @@ func TestNewFilters(t *testing.T) { assert.Nil(err) assert.Equal(ok, true) } + +func TestLogsSubscribeAndUnsubscribe_WithoutConcurrentMapIssue(t *testing.T) { + ctx, conn := rpcdaemontest.CreateTestGrpcConn(t, stages.Mock(t)) + mining := txpool.NewMiningClient(conn) + ff := rpchelper.New(ctx, nil, nil, mining, func() {}) + + // generate some random topics + topics := make([][]common.Hash, 0) + for i := 0; i < 10; i++ { + bytes := make([]byte, common.HashLength) + rand.Read(bytes) + toAdd := []common.Hash{common.BytesToHash(bytes)} + topics = append(topics, toAdd) + } + + // generate some addresses + addresses := make([]common.Address, 0) + for i := 0; i < 10; i++ { + bytes := make([]byte, common.AddressLength) + rand.Read(bytes) + addresses = append(addresses, common.BytesToAddress(bytes)) + } + + crit := filters.FilterCriteria{ + Topics: topics, + Addresses: addresses, + } + + ids := make([]rpchelper.LogsSubID, 1000) + + // make a lot of subscriptions + wg := sync.WaitGroup{} + for i := 0; i < 1000; i++ { + wg.Add(1) + go func(idx int) { + out := make(chan *types.Log, 1) + id := ff.SubscribeLogs(out, crit) + defer func() { + time.Sleep(100 * time.Nanosecond) + ff.UnsubscribeLogs(id) + wg.Done() + }() + ids[idx] = id + }(i) + } + wg.Wait() +} diff --git a/cmd/rpcdaemon/commands/eth_ming_test.go b/cmd/rpcdaemon/commands/eth_ming_test.go index 6987ee4ca1b..f36d4515294 100644 --- a/cmd/rpcdaemon/commands/eth_ming_test.go +++ b/cmd/rpcdaemon/commands/eth_ming_test.go @@ -5,6 +5,8 @@ import ( "testing" "time" + "github.com/ledgerwatch/erigon/rpc/rpccfg" + "github.com/ledgerwatch/erigon-lib/gointerfaces/txpool" "github.com/ledgerwatch/erigon-lib/kv/kvcache" "github.com/ledgerwatch/erigon/cmd/rpcdaemon/rpcdaemontest" @@ -21,7 +23,7 @@ func TestPendingBlock(t *testing.T) { mining := txpool.NewMiningClient(conn) ff := rpchelper.New(ctx, nil, nil, mining, func() {}) stateCache := kvcache.New(kvcache.DefaultCoherentConfig) - api := NewEthAPI(NewBaseApi(ff, stateCache, snapshotsync.NewBlockReader(), false), nil, nil, nil, mining, 5000000) + api := NewEthAPI(NewBaseApi(ff, stateCache, snapshotsync.NewBlockReader(), nil, false, rpccfg.DefaultEvmCallTimeout), nil, nil, nil, mining, 5000000) expect := uint64(12345) b, err := rlp.EncodeToBytes(types.NewBlockWithHeader(&types.Header{Number: big.NewInt(int64(expect))})) require.NoError(t, err) diff --git a/cmd/rpcdaemon/commands/eth_mining.go b/cmd/rpcdaemon/commands/eth_mining.go index 9f4cf4982e9..425692f500e 100644 --- a/cmd/rpcdaemon/commands/eth_mining.go +++ b/cmd/rpcdaemon/commands/eth_mining.go @@ -43,10 +43,11 @@ func (api *APIImpl) Mining(ctx context.Context) (bool, error) { // GetWork returns a work package for external miner. // // The work package consists of 3 strings: -// result[0] - 32 bytes hex encoded current block header pow-hash -// result[1] - 32 bytes hex encoded seed hash used for DAG -// result[2] - 32 bytes hex encoded boundary condition ("target"), 2^256/difficulty -// result[3] - hex encoded block number +// +// result[0] - 32 bytes hex encoded current block header pow-hash +// result[1] - 32 bytes hex encoded seed hash used for DAG +// result[2] - 32 bytes hex encoded boundary condition ("target"), 2^256/difficulty +// result[3] - hex encoded block number func (api *APIImpl) GetWork(ctx context.Context) ([4]string, error) { var res [4]string repl, err := api.mining.GetWork(ctx, &txpool.GetWorkRequest{}) diff --git a/cmd/rpcdaemon/commands/eth_receipts.go b/cmd/rpcdaemon/commands/eth_receipts.go index ca43379f2ee..2f7bfbd7ca3 100644 --- a/cmd/rpcdaemon/commands/eth_receipts.go +++ b/cmd/rpcdaemon/commands/eth_receipts.go @@ -7,8 +7,10 @@ import ( "fmt" "math/big" + "github.com/RoaringBitmap/roaring/roaring64" "github.com/holiman/uint256" "github.com/ledgerwatch/erigon-lib/kv" + libstate "github.com/ledgerwatch/erigon-lib/state" "github.com/ledgerwatch/log/v3" "github.com/RoaringBitmap/roaring" @@ -22,7 +24,6 @@ import ( "github.com/ledgerwatch/erigon/core/types" "github.com/ledgerwatch/erigon/core/vm" "github.com/ledgerwatch/erigon/eth/filters" - "github.com/ledgerwatch/erigon/ethdb" "github.com/ledgerwatch/erigon/ethdb/bitmapdb" "github.com/ledgerwatch/erigon/ethdb/cbor" "github.com/ledgerwatch/erigon/params" @@ -43,8 +44,7 @@ func (api *BaseAPI) getReceipts(ctx context.Context, tx kv.Tx, chainConfig *para } return h } - contractHasTEVM := ethdb.GetHasTEVM(tx) - _, _, _, ibs, _, err := transactions.ComputeTxEnv(ctx, block, chainConfig, getHeader, contractHasTEVM, ethash.NewFaker(), tx, block.Hash(), 0) + _, _, _, ibs, _, err := transactions.ComputeTxEnv(ctx, block, chainConfig, getHeader, ethash.NewFaker(), tx, block.Hash(), 0) if err != nil { return nil, err } @@ -60,7 +60,7 @@ func (api *BaseAPI) getReceipts(ctx context.Context, tx kv.Tx, chainConfig *para for i, txn := range block.Transactions() { ibs.Prepare(txn.Hash(), block.Hash(), i) header := block.Header() - receipt, _, err := core.ApplyTransaction(chainConfig, core.GetHashFn(header, getHeader), ethashFaker, nil, gp, ibs, noopWriter, header, txn, usedGas, vm.Config{}, contractHasTEVM) + receipt, _, err := core.ApplyTransaction(chainConfig, core.GetHashFn(header, getHeader), ethashFaker, nil, gp, ibs, noopWriter, header, txn, usedGas, vm.Config{}) if err != nil { return nil, err } @@ -72,9 +72,9 @@ func (api *BaseAPI) getReceipts(ctx context.Context, tx kv.Tx, chainConfig *para } // GetLogs implements eth_getLogs. Returns an array of logs matching a given filter object. -func (api *APIImpl) GetLogs(ctx context.Context, crit filters.FilterCriteria) ([]*types.Log, error) { +func (api *APIImpl) GetLogs(ctx context.Context, crit filters.FilterCriteria) (types.Logs, error) { var begin, end uint64 - logs := []*types.Log{} + logs := types.Logs{} tx, beginErr := api.db.BeginRo(ctx) if beginErr != nil { @@ -83,15 +83,18 @@ func (api *APIImpl) GetLogs(ctx context.Context, crit filters.FilterCriteria) ([ defer tx.Rollback() if crit.BlockHash != nil { - number := rawdb.ReadHeaderNumber(tx, *crit.BlockHash) - if number == nil { + header, err := api._blockReader.HeaderByHash(ctx, tx, *crit.BlockHash) + if err != nil { + return nil, err + } + if header == nil { return nil, fmt.Errorf("block not found: %x", *crit.BlockHash) } - begin = *number - end = *number + begin = header.Number.Uint64() + end = header.Number.Uint64() } else { // Convert the RPC block numbers into internal representations - latest, err := rpchelper.GetLatestBlockNumber(tx) + latest, _, _, err := rpchelper.GetBlockNumber(rpc.BlockNumberOrHashWithNumber(rpc.LatestExecutedBlockNumber), tx, nil) if err != nil { return nil, err } @@ -116,32 +119,45 @@ func (api *APIImpl) GetLogs(ctx context.Context, crit filters.FilterCriteria) ([ if end < begin { return nil, fmt.Errorf("end (%d) < begin (%d)", end, begin) } + if end > roaring.MaxUint32 { + latest, err := rpchelper.GetLatestBlockNumber(tx) + if err != nil { + return nil, err + } + if begin > latest { + return nil, fmt.Errorf("begin (%d) > latest (%d)", begin, latest) + } + end = latest + } - blockNumbers := roaring.New() - blockNumbers.AddRange(begin, end+1) // [min,max) + if api.historyV3(tx) { + return api.getLogsV3(ctx, tx, begin, end, crit) + } + blockNumbers := bitmapdb.NewBitmap() + defer bitmapdb.ReturnToPool(blockNumbers) + blockNumbers.AddRange(begin, end+1) // [min,max) topicsBitmap, err := getTopicsBitmap(tx, crit.Topics, uint32(begin), uint32(end)) if err != nil { return nil, err } + if topicsBitmap != nil { blockNumbers.And(topicsBitmap) } - var addrBitmap *roaring.Bitmap - for _, addr := range crit.Addresses { + rx := make([]*roaring.Bitmap, len(crit.Addresses)) + for idx, addr := range crit.Addresses { m, err := bitmapdb.Get(tx, kv.LogAddressIndex, addr[:], uint32(begin), uint32(end)) if err != nil { return nil, err } - if addrBitmap == nil { - addrBitmap = m - continue - } - addrBitmap = roaring.Or(addrBitmap, m) + rx[idx] = m } - if addrBitmap != nil { + addrBitmap := roaring.FastOr(rx...) + + if len(rx) > 0 { blockNumbers.And(addrBitmap) } @@ -205,14 +221,6 @@ func (api *APIImpl) GetLogs(ctx context.Context, crit filters.FilterCriteria) ([ log.TxHash = body.Transactions[log.TxIndex].Hash() } logs = append(logs, blockLogs...) - - borLogs := rawdb.ReadBorReceiptLogs(tx, blockHash, blockNumber, txIndex+1, logIndex) - if borLogs != nil { - borLogs = filterLogs(borLogs, crit.Addresses, crit.Topics) - if len(borLogs) > 0 { - logs = append(logs, borLogs...) - } - } } return logs, nil @@ -252,11 +260,185 @@ func getTopicsBitmap(c kv.Tx, topics [][]common.Hash, from, to uint32) (*roaring result = bitmapForORing continue } + result = roaring.And(bitmapForORing, result) } return result, nil } +func (api *APIImpl) getLogsV3(ctx context.Context, tx kv.Tx, begin, end uint64, crit filters.FilterCriteria) ([]*types.Log, error) { + logs := []*types.Log{} + + var fromTxNum, toTxNum uint64 + var err error + if begin > 0 { + fromTxNum, err = rawdb.TxNums.Min(tx, begin) + if err != nil { + return nil, err + } + } + toTxNum, err = rawdb.TxNums.Max(tx, end) // end is an inclusive bound + if err != nil { + return nil, err + } + + txNumbers := roaring64.New() + txNumbers.AddRange(fromTxNum, toTxNum) // [min,max) + + ac := api._agg.MakeContext() + ac.SetTx(tx) + + topicsBitmap, err := getTopicsBitmapV3(ac, tx, crit.Topics, fromTxNum, toTxNum) + if err != nil { + return nil, err + } + + if topicsBitmap != nil { + txNumbers.And(topicsBitmap) + } + + var addrBitmap *roaring64.Bitmap + for _, addr := range crit.Addresses { + var bitmapForORing roaring64.Bitmap + it := ac.LogAddrIterator(addr.Bytes(), fromTxNum, toTxNum, tx) + for it.HasNext() { + bitmapForORing.Add(it.Next()) + } + if addrBitmap == nil { + addrBitmap = &bitmapForORing + continue + } + addrBitmap = roaring64.Or(addrBitmap, &bitmapForORing) + } + + if addrBitmap != nil { + txNumbers.And(addrBitmap) + } + + if txNumbers.GetCardinality() == 0 { + return logs, nil + } + var lastBlockNum uint64 + var lastBlockHash common.Hash + var lastHeader *types.Header + var lastSigner *types.Signer + var lastRules *params.Rules + stateReader := state.NewHistoryReader22(ac) + stateReader.SetTx(tx) + //stateReader.SetTrace(true) + iter := txNumbers.Iterator() + + chainConfig, err := api.chainConfig(tx) + if err != nil { + return nil, err + } + for iter.HasNext() { + txNum := iter.Next() + // Find block number + ok, blockNum, err := rawdb.TxNums.FindBlockNum(tx, txNum) + if err != nil { + return nil, err + } + if !ok { + return nil, nil + } + if blockNum > lastBlockNum { + if lastHeader, err = api._blockReader.HeaderByNumber(ctx, tx, blockNum); err != nil { + return nil, err + } + lastBlockNum = blockNum + lastBlockHash = lastHeader.Hash() + lastSigner = types.MakeSigner(chainConfig, blockNum) + lastRules = chainConfig.Rules(blockNum) + } + var startTxNum uint64 + if blockNum > 0 { + startTxNum, err = rawdb.TxNums.Min(tx, blockNum) // end is an inclusive bound + if err != nil { + return nil, err + } + } + + txIndex := int(txNum) - int(startTxNum) - 1 + //fmt.Printf("txNum=%d, blockNum=%d, txIndex=%d\n", txNum, blockNum, txIndex) + txn, err := api._txnReader.TxnByIdxInBlock(ctx, tx, blockNum, txIndex) + if err != nil { + return nil, err + } + if txn == nil { + continue + } + txHash := txn.Hash() + msg, err := txn.AsMessage(*lastSigner, lastHeader.BaseFee, lastRules) + if err != nil { + return nil, err + } + blockCtx, txCtx := transactions.GetEvmContext(msg, lastHeader, true /* requireCanonical */, tx, api._blockReader) + stateReader.SetTxNum(txNum - 1) + vmConfig := vm.Config{} + vmConfig.SkipAnalysis = core.SkipAnalysis(chainConfig, blockNum) + ibs := state.New(stateReader) + evm := vm.NewEVM(blockCtx, txCtx, ibs, chainConfig, vmConfig) + + gp := new(core.GasPool).AddGas(msg.Gas()) + ibs.Prepare(txHash, lastBlockHash, txIndex) + _, err = core.ApplyMessage(evm, msg, gp, true /* refunds */, false /* gasBailout */) + if err != nil { + return nil, fmt.Errorf("%w: blockNum=%d, txNum=%d", err, blockNum, txNum) + } + rawLogs := ibs.GetLogs(txHash) + var logIndex uint + for _, log := range rawLogs { + log.Index = logIndex + logIndex++ + } + filtered := filterLogs(rawLogs, crit.Addresses, crit.Topics) + for _, log := range filtered { + log.BlockNumber = blockNum + log.BlockHash = lastBlockHash + log.TxHash = txHash + } + logs = append(logs, filtered...) + } + //stats := api._agg.GetAndResetStats() + //log.Info("Finished", "duration", time.Since(start), "history queries", stats.HistoryQueries, "ef search duration", stats.EfSearchTime) + return logs, nil +} + +// The Topic list restricts matches to particular event topics. Each event has a list +// of topics. Topics matches a prefix of that list. An empty element slice matches any +// topic. Non-empty elements represent an alternative that matches any of the +// contained topics. +// +// Examples: +// {} or nil matches any topic list +// {{A}} matches topic A in first position +// {{}, {B}} matches any topic in first position AND B in second position +// {{A}, {B}} matches topic A in first position AND B in second position +// {{A, B}, {C, D}} matches topic (A OR B) in first position AND (C OR D) in second position +func getTopicsBitmapV3(ac *libstate.Aggregator22Context, tx kv.Tx, topics [][]common.Hash, from, to uint64) (*roaring64.Bitmap, error) { + var result *roaring64.Bitmap + for _, sub := range topics { + var bitmapForORing roaring64.Bitmap + for _, topic := range sub { + it := ac.LogTopicIterator(topic.Bytes(), from, to, tx) + for it.HasNext() { + bitmapForORing.Add(it.Next()) + } + } + + if bitmapForORing.GetCardinality() == 0 { + continue + } + if result == nil { + result = &bitmapForORing + continue + } + result = roaring64.And(&bitmapForORing, result) + } + return result, nil +} + // GetTransactionReceipt implements eth_getTransactionReceipt. Returns the receipt of a transaction given the transaction's hash. func (api *APIImpl) GetTransactionReceipt(ctx context.Context, txnHash common.Hash) (map[string]interface{}, error) { tx, err := api.db.BeginRo(ctx) @@ -269,15 +451,32 @@ func (api *APIImpl) GetTransactionReceipt(ctx context.Context, txnHash common.Ha var ok bool blockNum, ok, err = api.txnLookup(ctx, tx, txnHash) - if !ok || blockNum == 0 { - // It is not an ideal solution (ideal solution requires extending TxnLookupReply proto type to include bool flag indicating absense of result), - // but 0 block number is used here to mean that the transaction is not found - return nil, nil + if err != nil { + return nil, err } + + cc, err := api.chainConfig(tx) if err != nil { return nil, err } + if !ok && cc.Bor == nil { + return nil, nil + } + + // if not ok and cc.Bor != nil then we might have a bor transaction + if !ok { + blockNumPtr, err := rawdb.ReadBorTxLookupEntry(tx, txnHash) + if err != nil { + return nil, err + } + if blockNumPtr == nil { + return nil, nil + } + + blockNum = *blockNumPtr + } + block, err := api.blockByNumberWithSenders(tx, blockNum) if err != nil { return nil, err @@ -286,10 +485,6 @@ func (api *APIImpl) GetTransactionReceipt(ctx context.Context, txnHash common.Ha return nil, nil // not error, see https://github.com/ledgerwatch/erigon/issues/1645 } - cc, err := api.chainConfig(tx) - if err != nil { - return nil, err - } var txnIndex uint64 var txn types.Transaction for idx, transaction := range block.Transactions() { @@ -301,18 +496,17 @@ func (api *APIImpl) GetTransactionReceipt(ctx context.Context, txnHash common.Ha } if txn == nil { - if cc.Bor == nil { - return nil, nil - } - - borTx, blockHash, _, _, err := rawdb.ReadBorTransactionForBlockNumber(tx, blockNum) + borTx, _, _, _, err := rawdb.ReadBorTransactionForBlockNumber(tx, blockNum) if err != nil { return nil, err } if borTx == nil { return nil, nil } - borReceipt := rawdb.ReadBorReceipt(tx, blockHash, blockNum) + borReceipt, err := rawdb.ReadBorReceipt(tx, blockNum) + if err != nil { + return nil, err + } if borReceipt == nil { return nil, nil } @@ -366,7 +560,10 @@ func (api *APIImpl) GetBlockReceipts(ctx context.Context, number rpc.BlockNumber if chainConfig.Bor != nil { borTx, _, _, _ := rawdb.ReadBorTransactionForBlock(tx, block) if borTx != nil { - borReceipt := rawdb.ReadBorReceipt(tx, block.Hash(), blockNum) + borReceipt, err := rawdb.ReadBorReceipt(tx, blockNum) + if err != nil { + return nil, err + } if borReceipt != nil { result = append(result, marshalReceipt(borReceipt, borTx, chainConfig, block, borReceipt.TxHash, false)) } diff --git a/cmd/rpcdaemon/commands/eth_subscribe_test.go b/cmd/rpcdaemon/commands/eth_subscribe_test.go index 24f38220c45..eb743ac3b47 100644 --- a/cmd/rpcdaemon/commands/eth_subscribe_test.go +++ b/cmd/rpcdaemon/commands/eth_subscribe_test.go @@ -51,7 +51,7 @@ func TestEthSubscribe(t *testing.T) { initialCycle := true highestSeenHeader := chain.TopBlock.NumberU64() - if _, err := stages.StageLoopStep(m.Ctx, m.DB, m.Sync, highestSeenHeader, m.Notifications, initialCycle, m.UpdateHead, nil); err != nil { + if _, err := stages.StageLoopStep(m.Ctx, m.ChainConfig, m.DB, m.Sync, highestSeenHeader, m.Notifications, initialCycle, m.UpdateHead, nil); err != nil { t.Fatal(err) } diff --git a/cmd/rpcdaemon/commands/eth_system.go b/cmd/rpcdaemon/commands/eth_system.go index 20c1e5ef315..194acea2c13 100644 --- a/cmd/rpcdaemon/commands/eth_system.go +++ b/cmd/rpcdaemon/commands/eth_system.go @@ -2,6 +2,7 @@ package commands import ( "context" + "math/big" "github.com/ledgerwatch/erigon-lib/kv" "github.com/ledgerwatch/erigon/common" @@ -115,13 +116,16 @@ func (api *APIImpl) GasPrice(ctx context.Context) (*hexutil.Big, error) { } oracle := gasprice.NewOracle(NewGasPriceOracleBackend(tx, cc, api.BaseAPI), ethconfig.Defaults.GPO) tipcap, err := oracle.SuggestTipCap(ctx) + gasResult := big.NewInt(0) + + gasResult.Set(tipcap) if err != nil { return nil, err } if head := rawdb.ReadCurrentHeader(tx); head != nil && head.BaseFee != nil { - tipcap.Add(tipcap, head.BaseFee) + gasResult.Add(tipcap, head.BaseFee) } - return (*hexutil.Big)(tipcap), err + return (*hexutil.Big)(gasResult), err } // MaxPriorityFeePerGas returns a suggestion for a gas tip cap for dynamic fee transactions. diff --git a/cmd/rpcdaemon/commands/eth_system_test.go b/cmd/rpcdaemon/commands/eth_system_test.go index 394b97a198b..f33e7613328 100644 --- a/cmd/rpcdaemon/commands/eth_system_test.go +++ b/cmd/rpcdaemon/commands/eth_system_test.go @@ -9,6 +9,7 @@ import ( "github.com/holiman/uint256" "github.com/ledgerwatch/erigon-lib/kv" "github.com/ledgerwatch/erigon-lib/kv/kvcache" + "github.com/ledgerwatch/erigon/rpc/rpccfg" "github.com/ledgerwatch/erigon/common" "github.com/ledgerwatch/erigon/core" @@ -43,7 +44,7 @@ func TestGasPrice(t *testing.T) { db := createGasPriceTestKV(t, testCase.chainSize) defer db.Close() stateCache := kvcache.New(kvcache.DefaultCoherentConfig) - base := NewBaseApi(nil, stateCache, snapshotsync.NewBlockReader(), false) + base := NewBaseApi(nil, stateCache, snapshotsync.NewBlockReader(), nil, false, rpccfg.DefaultEvmCallTimeout) eth := NewEthAPI(base, db, nil, nil, nil, 5000000) ctx := context.Background() diff --git a/cmd/rpcdaemon/commands/parity_api_test.go b/cmd/rpcdaemon/commands/parity_api_test.go index 70a607a8f4b..cb6e571e28d 100644 --- a/cmd/rpcdaemon/commands/parity_api_test.go +++ b/cmd/rpcdaemon/commands/parity_api_test.go @@ -16,8 +16,8 @@ var latestBlock = rpc.BlockNumberOrHashWithNumber(rpc.LatestBlockNumber) func TestParityAPIImpl_ListStorageKeys_NoOffset(t *testing.T) { assert := assert.New(t) - db := rpcdaemontest.CreateTestKV(t) - api := NewParityAPIImpl(db) + m, _, _ := rpcdaemontest.CreateTestSentry(t) + api := NewParityAPIImpl(m.DB) answers := []string{ "0000000000000000000000000000000000000000000000000000000000000000", "0000000000000000000000000000000000000000000000000000000000000002", @@ -38,8 +38,8 @@ func TestParityAPIImpl_ListStorageKeys_NoOffset(t *testing.T) { func TestParityAPIImpl_ListStorageKeys_WithOffset_ExistingPrefix(t *testing.T) { assert := assert.New(t) - db := rpcdaemontest.CreateTestKV(t) - api := NewParityAPIImpl(db) + m, _, _ := rpcdaemontest.CreateTestSentry(t) + api := NewParityAPIImpl(m.DB) answers := []string{ "29d05770ca9ee7088a64e18c8e5160fc62c3c2179dc8ef9b4dbc970c9e51b4d8", "29edc84535d98b29835079d685b97b41ee8e831e343cc80793057e462353a26d", @@ -62,8 +62,8 @@ func TestParityAPIImpl_ListStorageKeys_WithOffset_ExistingPrefix(t *testing.T) { func TestParityAPIImpl_ListStorageKeys_WithOffset_NonExistingPrefix(t *testing.T) { assert := assert.New(t) - db := rpcdaemontest.CreateTestKV(t) - api := NewParityAPIImpl(db) + m, _, _ := rpcdaemontest.CreateTestSentry(t) + api := NewParityAPIImpl(m.DB) answers := []string{ "4644be453c81744b6842ddf615d7fca0e14a23b09734be63d44c23452de95631", "4974416255391052161ba8184fe652f3bf8c915592c65f7de127af8e637dce5d", @@ -83,8 +83,8 @@ func TestParityAPIImpl_ListStorageKeys_WithOffset_NonExistingPrefix(t *testing.T func TestParityAPIImpl_ListStorageKeys_WithOffset_EmptyResponse(t *testing.T) { assert := assert.New(t) - db := rpcdaemontest.CreateTestKV(t) - api := NewParityAPIImpl(db) + m, _, _ := rpcdaemontest.CreateTestSentry(t) + api := NewParityAPIImpl(m.DB) addr := common.HexToAddress("0x920fd5070602feaea2e251e9e7238b6c376bcae5") offset := common.Hex2Bytes("ff") b := hexutil.Bytes(offset) @@ -97,8 +97,8 @@ func TestParityAPIImpl_ListStorageKeys_WithOffset_EmptyResponse(t *testing.T) { func TestParityAPIImpl_ListStorageKeys_AccNotFound(t *testing.T) { assert := assert.New(t) - db := rpcdaemontest.CreateTestKV(t) - api := NewParityAPIImpl(db) + m, _, _ := rpcdaemontest.CreateTestSentry(t) + api := NewParityAPIImpl(m.DB) addr := common.HexToAddress("0x920fd5070602feaea2e251e9e7238b6c376bcaef") _, err := api.ListStorageKeys(context.Background(), addr, 2, nil, latestBlock) assert.Error(err, fmt.Errorf("acc not found")) diff --git a/cmd/rpcdaemon/commands/send_transaction.go b/cmd/rpcdaemon/commands/send_transaction.go index dd14c861b05..a41c81695b2 100644 --- a/cmd/rpcdaemon/commands/send_transaction.go +++ b/cmd/rpcdaemon/commands/send_transaction.go @@ -26,13 +26,6 @@ func (api *APIImpl) SendRawTransaction(ctx context.Context, encodedTx hexutil.By return common.Hash{}, err } - txnChainId := txn.GetChainID() - chainId := api._chainConfig.ChainID - - if chainId.Cmp(txnChainId.ToBig()) != 0 { - return common.Hash{}, fmt.Errorf("invalid chain id, expected: %d got: %d", chainId, *txnChainId) - } - // If the transaction fee cap is already specified, ensure the // fee of the given transaction is _reasonable_. if err := checkTxFee(txn.GetPrice().ToBig(), txn.GetGas(), ethconfig.Defaults.RPCTxFeeCap); err != nil { @@ -66,6 +59,14 @@ func (api *APIImpl) SendRawTransaction(ctx context.Context, encodedTx hexutil.By if err != nil { return common.Hash{}, err } + + txnChainId := txn.GetChainID() + chainId := cc.ChainID + + if chainId.Cmp(txnChainId.ToBig()) != 0 { + return common.Hash{}, fmt.Errorf("invalid chain id, expected: %d got: %d", chainId, *txnChainId) + } + signer := types.MakeSigner(cc, *blockNum) from, err := txn.Sender(*signer) if err != nil { diff --git a/cmd/rpcdaemon/commands/send_transaction_test.go b/cmd/rpcdaemon/commands/send_transaction_test.go index 0e848258126..0148ae29e46 100644 --- a/cmd/rpcdaemon/commands/send_transaction_test.go +++ b/cmd/rpcdaemon/commands/send_transaction_test.go @@ -19,6 +19,7 @@ import ( "github.com/ledgerwatch/erigon/eth/protocols/eth" "github.com/ledgerwatch/erigon/params" "github.com/ledgerwatch/erigon/rlp" + "github.com/ledgerwatch/erigon/rpc/rpccfg" "github.com/ledgerwatch/erigon/turbo/rpchelper" "github.com/ledgerwatch/erigon/turbo/snapshotsync" "github.com/ledgerwatch/erigon/turbo/stages" @@ -59,7 +60,7 @@ func TestSendRawTransaction(t *testing.T) { initialCycle := true highestSeenHeader := chain.TopBlock.NumberU64() - if _, err := stages.StageLoopStep(m.Ctx, m.DB, m.Sync, highestSeenHeader, m.Notifications, initialCycle, m.UpdateHead, nil); err != nil { + if _, err := stages.StageLoopStep(m.Ctx, m.ChainConfig, m.DB, m.Sync, highestSeenHeader, m.Notifications, initialCycle, m.UpdateHead, nil); err != nil { t.Fatal(err) } } @@ -72,7 +73,7 @@ func TestSendRawTransaction(t *testing.T) { txPool := txpool.NewTxpoolClient(conn) ff := rpchelper.New(ctx, nil, txPool, txpool.NewMiningClient(conn), func() {}) stateCache := kvcache.New(kvcache.DefaultCoherentConfig) - api := commands.NewEthAPI(commands.NewBaseApi(ff, stateCache, snapshotsync.NewBlockReader(), false), m.DB, nil, txPool, nil, 5000000) + api := commands.NewEthAPI(commands.NewBaseApi(ff, stateCache, snapshotsync.NewBlockReader(), nil, false, rpccfg.DefaultEvmCallTimeout), m.DB, nil, txPool, nil, 5000000) buf := bytes.NewBuffer(nil) err = txn.MarshalBinary(buf) diff --git a/cmd/rpcdaemon/commands/starknet_accounts.go b/cmd/rpcdaemon/commands/starknet_accounts.go deleted file mode 100644 index abe0e5c7903..00000000000 --- a/cmd/rpcdaemon/commands/starknet_accounts.go +++ /dev/null @@ -1,39 +0,0 @@ -package commands - -import ( - "context" - "fmt" - - "github.com/ledgerwatch/erigon/common" - "github.com/ledgerwatch/erigon/common/hexutil" - "github.com/ledgerwatch/erigon/rpc" - "github.com/ledgerwatch/erigon/turbo/adapter" - "github.com/ledgerwatch/erigon/turbo/rpchelper" -) - -// GetCode implements starknet_getCode. Returns the byte code at a given address (if it's a smart contract). -func (api *StarknetImpl) GetCode(ctx context.Context, address common.Address, blockNrOrHash rpc.BlockNumberOrHash) (hexutil.Bytes, error) { - tx, err1 := api.db.BeginRo(ctx) - if err1 != nil { - return nil, fmt.Errorf("getCode cannot open tx: %w", err1) - } - defer tx.Rollback() - blockNumber, _, _, err := rpchelper.GetBlockNumber(blockNrOrHash, tx, api.filters) - if err != nil { - return nil, err - } - - reader := adapter.NewStateReader(tx, blockNumber) - acc, err := reader.ReadAccountData(address) - if acc == nil || err != nil { - return hexutil.Bytes(""), nil - } - res, err := reader.ReadAccountCode(address, acc.Incarnation, acc.CodeHash) - if res == nil || err != nil { - return hexutil.Bytes(""), nil - } - if res == nil { - return hexutil.Bytes(""), nil - } - return res, nil -} diff --git a/cmd/rpcdaemon/commands/starknet_api.go b/cmd/rpcdaemon/commands/starknet_api.go deleted file mode 100644 index 0423e31e725..00000000000 --- a/cmd/rpcdaemon/commands/starknet_api.go +++ /dev/null @@ -1,34 +0,0 @@ -package commands - -import ( - "context" - "github.com/ledgerwatch/erigon-lib/gointerfaces/starknet" - "github.com/ledgerwatch/erigon-lib/gointerfaces/txpool" - "github.com/ledgerwatch/erigon/common/hexutil" - "github.com/ledgerwatch/erigon/rpc" - - "github.com/ledgerwatch/erigon-lib/kv" - "github.com/ledgerwatch/erigon/common" -) - -type StarknetAPI interface { - SendRawTransaction(ctx context.Context, encodedTx hexutil.Bytes) (common.Hash, error) - GetCode(ctx context.Context, address common.Address, blockNrOrHash rpc.BlockNumberOrHash) (hexutil.Bytes, error) - Call(ctx context.Context, request StarknetCallRequest, blockNrOrHash rpc.BlockNumberOrHash) ([]string, error) -} - -type StarknetImpl struct { - *BaseAPI - db kv.RoDB - client starknet.CAIROVMClient - txPool txpool.TxpoolClient -} - -func NewStarknetAPI(base *BaseAPI, db kv.RoDB, client starknet.CAIROVMClient, txPool txpool.TxpoolClient) *StarknetImpl { - return &StarknetImpl{ - BaseAPI: base, - db: db, - client: client, - txPool: txPool, - } -} diff --git a/cmd/rpcdaemon/commands/starknet_call.go b/cmd/rpcdaemon/commands/starknet_call.go deleted file mode 100644 index 4b68eac9c39..00000000000 --- a/cmd/rpcdaemon/commands/starknet_call.go +++ /dev/null @@ -1,96 +0,0 @@ -package commands - -import ( - "context" - "github.com/ledgerwatch/erigon-lib/gointerfaces/starknet" - "github.com/ledgerwatch/erigon/common" - "github.com/ledgerwatch/erigon/rpc" - "google.golang.org/protobuf/types/known/anypb" - "google.golang.org/protobuf/types/known/wrapperspb" - "reflect" - "strings" -) - -type StarknetGrpcCallArgs struct { - Inputs string - Address string - Function string - Code string - BlockHash string - BlockNumber int64 - Network string -} - -type StarknetCallRequest struct { - ContractAddress common.Address32 - EntryPointSelector string - CallData []string -} - -func (s StarknetGrpcCallArgs) ToMapAny() (result map[string]*anypb.Any) { - result = make(map[string]*anypb.Any) - - v := reflect.ValueOf(s) - typeOfS := v.Type() - - for i := 0; i < v.NumField(); i++ { - fieldName := strings.ToLower(typeOfS.Field(i).Name) - switch v.Field(i).Kind() { - case reflect.Int64: - result[fieldName], _ = anypb.New(wrapperspb.Int64(v.Field(i).Interface().(int64))) - default: - result[fieldName], _ = anypb.New(wrapperspb.String(v.Field(i).Interface().(string))) - } - } - return result -} - -// Call implements starknet_call. -func (api *StarknetImpl) Call(ctx context.Context, request StarknetCallRequest, blockNrOrHash rpc.BlockNumberOrHash) ([]string, error) { - tx, err := api.db.BeginRo(ctx) - if err != nil { - return nil, err - } - defer tx.Rollback() - - code, err := api.GetCode(ctx, request.ContractAddress.ToCommonAddress(), blockNrOrHash) - if err != nil { - return nil, err - } - - requestParams := &StarknetGrpcCallArgs{ - Inputs: strings.Join(request.CallData, ","), - Address: request.ContractAddress.String(), - Function: request.EntryPointSelector, - Code: code.String(), - } - - if blockNrOrHash.BlockHash != nil { - requestParams.BlockHash = blockNrOrHash.BlockHash.String() - } - - if blockNrOrHash.BlockNumber != nil { - requestParams.BlockNumber = blockNrOrHash.BlockNumber.Int64() - } - - requestParamsMap := requestParams.ToMapAny() - - grpcRequest := &starknet.CallRequest{ - Method: "starknet_call", - Params: requestParamsMap, - } - - response, err := api.client.Call(ctx, grpcRequest) - if err != nil { - return nil, err - } - - var result []string - for _, v := range response.Result { - s := wrapperspb.String("") - v.UnmarshalTo(s) - result = append(result, s.GetValue()) - } - - return result, nil -} diff --git a/cmd/rpcdaemon/commands/starknet_send_transaction.go b/cmd/rpcdaemon/commands/starknet_send_transaction.go deleted file mode 100644 index 7bb90ea3bf0..00000000000 --- a/cmd/rpcdaemon/commands/starknet_send_transaction.go +++ /dev/null @@ -1,50 +0,0 @@ -package commands - -import ( - "bytes" - "context" - "errors" - "fmt" - txPoolProto "github.com/ledgerwatch/erigon-lib/gointerfaces/txpool" - "github.com/ledgerwatch/erigon/common" - "github.com/ledgerwatch/erigon/common/hexutil" - "github.com/ledgerwatch/erigon/core/types" - "github.com/ledgerwatch/erigon/rlp" - "github.com/ledgerwatch/log/v3" -) - -var ( - ErrOnlyStarknetTx = errors.New("only support starknet transactions") - ErrOnlyContractDeploy = errors.New("only support contract creation") -) - -// SendRawTransaction deploy new cairo contract -func (api *StarknetImpl) SendRawTransaction(ctx context.Context, encodedTx hexutil.Bytes) (common.Hash, error) { - txn, err := types.DecodeTransaction(rlp.NewStream(bytes.NewReader(encodedTx), uint64(len(encodedTx)))) - - if err != nil { - return common.Hash{}, err - } - - if !txn.IsStarkNet() { - return common.Hash{}, ErrOnlyStarknetTx - } - - if !txn.IsContractDeploy() { - return common.Hash{}, ErrOnlyContractDeploy - } - - hash := txn.Hash() - res, err := api.txPool.Add(ctx, &txPoolProto.AddRequest{RlpTxs: [][]byte{encodedTx}}) - if err != nil { - return common.Hash{}, err - } - - if res.Imported[0] != txPoolProto.ImportResult_SUCCESS { - return hash, fmt.Errorf("%s: %s", txPoolProto.ImportResult_name[int32(res.Imported[0])], res.Errors[0]) - } - - log.Info("Submitted contract creation", "hash", txn.Hash().Hex(), "nonce", txn.GetNonce(), "value", txn.GetValue()) - - return txn.Hash(), nil -} diff --git a/cmd/rpcdaemon/commands/starknet_send_transaction_test.go b/cmd/rpcdaemon/commands/starknet_send_transaction_test.go deleted file mode 100644 index 8ab1a19497a..00000000000 --- a/cmd/rpcdaemon/commands/starknet_send_transaction_test.go +++ /dev/null @@ -1,83 +0,0 @@ -package commands_test - -import ( - "bytes" - "testing" - - "github.com/holiman/uint256" - "github.com/ledgerwatch/erigon-lib/gointerfaces/starknet" - "github.com/ledgerwatch/erigon-lib/gointerfaces/txpool" - "github.com/ledgerwatch/erigon-lib/kv/kvcache" - "github.com/ledgerwatch/erigon/cmd/rpcdaemon/commands" - "github.com/ledgerwatch/erigon/cmd/rpcdaemon/rpcdaemontest" - "github.com/ledgerwatch/erigon/common" - "github.com/ledgerwatch/erigon/common/hexutil" - "github.com/ledgerwatch/erigon/core/types" - "github.com/ledgerwatch/erigon/turbo/rpchelper" - "github.com/ledgerwatch/erigon/turbo/snapshotsync" - "github.com/ledgerwatch/erigon/turbo/stages" - "github.com/stretchr/testify/require" -) - -func TestErrorStarknetSendRawTransaction(t *testing.T) { - var cases = []struct { - name string - tx string - error error - }{ - {name: "wrong tx type", tx: generateDynamicFeeTransaction(), error: commands.ErrOnlyStarknetTx}, - {name: "not contract creation", tx: generateStarknetTransaction(), error: commands.ErrOnlyContractDeploy}, - } - - m, require := stages.MockWithTxPool(t), require.New(t) - ctx, conn := rpcdaemontest.CreateTestGrpcConn(t, m) - txPool := txpool.NewTxpoolClient(conn) - starknetClient := starknet.NewCAIROVMClient(conn) - ff := rpchelper.New(ctx, nil, txPool, txpool.NewMiningClient(conn), func() {}) - stateCache := kvcache.New(kvcache.DefaultCoherentConfig) - - for _, tt := range cases { - api := commands.NewStarknetAPI(commands.NewBaseApi(ff, stateCache, snapshotsync.NewBlockReader(), false), m.DB, starknetClient, txPool) - - t.Run(tt.name, func(t *testing.T) { - hex, _ := hexutil.Decode(tt.tx) - - _, err := api.SendRawTransaction(ctx, hex) - - require.ErrorIs(err, tt.error) - }) - } -} - -func generateDynamicFeeTransaction() string { - buf := bytes.NewBuffer(nil) - types.DynamicFeeTransaction{ - CommonTx: types.CommonTx{ - ChainID: new(uint256.Int), - Nonce: 1, - Value: uint256.NewInt(1), - Gas: 1, - }, - Tip: new(uint256.Int), - FeeCap: new(uint256.Int), - }.MarshalBinary(buf) - - return hexutil.Encode(buf.Bytes()) -} - -func generateStarknetTransaction() string { - buf := bytes.NewBuffer(nil) - types.StarknetTransaction{ - CommonTx: types.CommonTx{ - ChainID: new(uint256.Int), - Nonce: 1, - Value: uint256.NewInt(1), - Gas: 1, - To: &common.Address{}, - }, - Tip: new(uint256.Int), - FeeCap: new(uint256.Int), - }.MarshalBinary(buf) - - return hexutil.Encode(buf.Bytes()) -} diff --git a/cmd/rpcdaemon/commands/trace_adhoc.go b/cmd/rpcdaemon/commands/trace_adhoc.go index acb42089017..ce137112592 100644 --- a/cmd/rpcdaemon/commands/trace_adhoc.go +++ b/cmd/rpcdaemon/commands/trace_adhoc.go @@ -21,7 +21,6 @@ import ( "github.com/ledgerwatch/erigon/core/types" "github.com/ledgerwatch/erigon/core/types/accounts" "github.com/ledgerwatch/erigon/core/vm" - "github.com/ledgerwatch/erigon/ethdb" "github.com/ledgerwatch/erigon/rpc" "github.com/ledgerwatch/erigon/turbo/rpchelper" "github.com/ledgerwatch/erigon/turbo/shards" @@ -29,8 +28,6 @@ import ( "github.com/ledgerwatch/log/v3" ) -const callTimeout = 5 * time.Minute - const ( CALL = "call" CALLCODE = "callcode" @@ -895,8 +892,8 @@ func (api *TraceAPIImpl) Call(ctx context.Context, args TraceCallParam, traceTyp // Setup context so it may be cancelled the call has completed // or, in case of unmetered gas, setup a context with a timeout. var cancel context.CancelFunc - if callTimeout > 0 { - ctx, cancel = context.WithTimeout(ctx, callTimeout) + if api.evmCallTimeout > 0 { + ctx, cancel = context.WithTimeout(ctx, api.evmCallTimeout) } else { ctx, cancel = context.WithCancel(ctx) } @@ -943,11 +940,7 @@ func (api *TraceAPIImpl) Call(ctx context.Context, args TraceCallParam, traceTyp return nil, err } - contractHasTEVM := func(contractHash common.Hash) (bool, error) { return false, nil } - if api.TevmEnabled { - contractHasTEVM = ethdb.GetHasTEVM(tx) - } - blockCtx, txCtx := transactions.GetEvmContext(msg, header, blockNrOrHash.RequireCanonical, tx, contractHasTEVM, api._blockReader) + blockCtx, txCtx := transactions.GetEvmContext(msg, header, blockNrOrHash.RequireCanonical, tx, api._blockReader) blockCtx.GasLimit = math.MaxUint64 blockCtx.MaxGasLimit = true @@ -982,7 +975,7 @@ func (api *TraceAPIImpl) Call(ctx context.Context, args TraceCallParam, traceTyp // If the timer caused an abort, return an appropriate error message if evm.Cancelled() { - return nil, fmt.Errorf("execution aborted (timeout = %v)", callTimeout) + return nil, fmt.Errorf("execution aborted (timeout = %v)", api.evmCallTimeout) } return traceResult, nil @@ -1115,8 +1108,8 @@ func (api *TraceAPIImpl) doCallMany(ctx context.Context, dbtx kv.Tx, msgs []type // Setup context so it may be cancelled the call has completed // or, in case of unmetered gas, setup a context with a timeout. var cancel context.CancelFunc - if callTimeout > 0 { - ctx, cancel = context.WithTimeout(ctx, callTimeout) + if api.evmCallTimeout > 0 { + ctx, cancel = context.WithTimeout(ctx, api.evmCallTimeout) } else { ctx, cancel = context.WithCancel(ctx) } @@ -1132,11 +1125,6 @@ func (api *TraceAPIImpl) doCallMany(ctx context.Context, dbtx kv.Tx, msgs []type useParent = true } - contractHasTEVM := func(contractHash common.Hash) (bool, error) { return false, nil } - if api.TevmEnabled { - contractHasTEVM = ethdb.GetHasTEVM(dbtx) - } - for txIndex, msg := range msgs { if err := libcommon.Stopped(ctx.Done()); err != nil { return nil, err @@ -1173,7 +1161,7 @@ func (api *TraceAPIImpl) doCallMany(ctx context.Context, dbtx kv.Tx, msgs []type } // Get a new instance of the EVM. - blockCtx, txCtx := transactions.GetEvmContext(msg, header, parentNrOrHash.RequireCanonical, dbtx, contractHasTEVM, api._blockReader) + blockCtx, txCtx := transactions.GetEvmContext(msg, header, parentNrOrHash.RequireCanonical, dbtx, api._blockReader) if useParent { blockCtx.GasLimit = math.MaxUint64 blockCtx.MaxGasLimit = true @@ -1225,6 +1213,11 @@ func (api *TraceAPIImpl) doCallMany(ctx context.Context, dbtx kv.Tx, msgs []type traceResult.Trace = []*ParityTrace{} } results = append(results, traceResult) + // When txIndexNeeded is not -1, we are tracing specific transaction in the block and not the entire block, so we stop after we've traced + // the required transaction + if txIndexNeeded != -1 && txIndex == txIndexNeeded { + break + } } return results, nil } diff --git a/cmd/rpcdaemon/commands/trace_adhoc_test.go b/cmd/rpcdaemon/commands/trace_adhoc_test.go index b3b2ea5f843..a4b570d9efc 100644 --- a/cmd/rpcdaemon/commands/trace_adhoc_test.go +++ b/cmd/rpcdaemon/commands/trace_adhoc_test.go @@ -13,14 +13,16 @@ import ( "github.com/ledgerwatch/erigon/common/hexutil" "github.com/ledgerwatch/erigon/core/rawdb" "github.com/ledgerwatch/erigon/rpc" + "github.com/ledgerwatch/erigon/rpc/rpccfg" "github.com/ledgerwatch/erigon/turbo/snapshotsync" "github.com/stretchr/testify/require" ) func TestEmptyQuery(t *testing.T) { - db := rpcdaemontest.CreateTestKV(t) + m, _, _ := rpcdaemontest.CreateTestSentry(t) + agg := m.HistoryV3Components() stateCache := kvcache.New(kvcache.DefaultCoherentConfig) - api := NewTraceAPI(NewBaseApi(nil, stateCache, snapshotsync.NewBlockReader(), false), db, &httpcfg.HttpCfg{}) + api := NewTraceAPI(NewBaseApi(nil, stateCache, snapshotsync.NewBlockReader(), agg, false, rpccfg.DefaultEvmCallTimeout), m.DB, &httpcfg.HttpCfg{}) // Call GetTransactionReceipt for transaction which is not in the database var latest = rpc.LatestBlockNumber results, err := api.CallMany(context.Background(), json.RawMessage("[]"), &rpc.BlockNumberOrHash{BlockNumber: &latest}) @@ -35,9 +37,10 @@ func TestEmptyQuery(t *testing.T) { } } func TestCoinbaseBalance(t *testing.T) { - db := rpcdaemontest.CreateTestKV(t) + m, _, _ := rpcdaemontest.CreateTestSentry(t) + agg := m.HistoryV3Components() stateCache := kvcache.New(kvcache.DefaultCoherentConfig) - api := NewTraceAPI(NewBaseApi(nil, stateCache, snapshotsync.NewBlockReader(), false), db, &httpcfg.HttpCfg{}) + api := NewTraceAPI(NewBaseApi(nil, stateCache, snapshotsync.NewBlockReader(), agg, false, rpccfg.DefaultEvmCallTimeout), m.DB, &httpcfg.HttpCfg{}) // Call GetTransactionReceipt for transaction which is not in the database var latest = rpc.LatestBlockNumber results, err := api.CallMany(context.Background(), json.RawMessage(` @@ -62,11 +65,12 @@ func TestCoinbaseBalance(t *testing.T) { } func TestReplayTransaction(t *testing.T) { - db := rpcdaemontest.CreateTestKV(t) + m, _, _ := rpcdaemontest.CreateTestSentry(t) + agg := m.HistoryV3Components() stateCache := kvcache.New(kvcache.DefaultCoherentConfig) - api := NewTraceAPI(NewBaseApi(nil, stateCache, snapshotsync.NewBlockReader(), false), db, &httpcfg.HttpCfg{}) + api := NewTraceAPI(NewBaseApi(nil, stateCache, snapshotsync.NewBlockReader(), agg, false, rpccfg.DefaultEvmCallTimeout), m.DB, &httpcfg.HttpCfg{}) var txnHash common.Hash - if err := db.View(context.Background(), func(tx kv.Tx) error { + if err := m.DB.View(context.Background(), func(tx kv.Tx) error { b, err := rawdb.ReadBlockByNumber(tx, 6) if err != nil { return err @@ -90,9 +94,10 @@ func TestReplayTransaction(t *testing.T) { } func TestReplayBlockTransactions(t *testing.T) { - db := rpcdaemontest.CreateTestKV(t) + m, _, _ := rpcdaemontest.CreateTestSentry(t) + agg := m.HistoryV3Components() stateCache := kvcache.New(kvcache.DefaultCoherentConfig) - api := NewTraceAPI(NewBaseApi(nil, stateCache, snapshotsync.NewBlockReader(), false), db, &httpcfg.HttpCfg{}) + api := NewTraceAPI(NewBaseApi(nil, stateCache, snapshotsync.NewBlockReader(), agg, false, rpccfg.DefaultEvmCallTimeout), m.DB, &httpcfg.HttpCfg{}) // Call GetTransactionReceipt for transaction which is not in the database n := rpc.BlockNumber(6) diff --git a/cmd/rpcdaemon/commands/trace_filtering.go b/cmd/rpcdaemon/commands/trace_filtering.go index 74cec78e1d3..193ba8c4c32 100644 --- a/cmd/rpcdaemon/commands/trace_filtering.go +++ b/cmd/rpcdaemon/commands/trace_filtering.go @@ -4,21 +4,26 @@ import ( "context" "errors" "fmt" + "math/big" "github.com/RoaringBitmap/roaring/roaring64" jsoniter "github.com/json-iterator/go" "github.com/ledgerwatch/erigon-lib/kv" - "github.com/ledgerwatch/erigon/common" "github.com/ledgerwatch/erigon/common/hexutil" "github.com/ledgerwatch/erigon/consensus/ethash" + "github.com/ledgerwatch/erigon/core" "github.com/ledgerwatch/erigon/core/rawdb" + "github.com/ledgerwatch/erigon/core/state" "github.com/ledgerwatch/erigon/core/types" + "github.com/ledgerwatch/erigon/core/vm" "github.com/ledgerwatch/erigon/ethdb" "github.com/ledgerwatch/erigon/ethdb/bitmapdb" "github.com/ledgerwatch/erigon/params" "github.com/ledgerwatch/erigon/rpc" "github.com/ledgerwatch/erigon/turbo/rpchelper" + "github.com/ledgerwatch/erigon/turbo/shards" + "github.com/ledgerwatch/erigon/turbo/transactions" ) // Transaction implements trace_transaction @@ -171,12 +176,18 @@ func (api *TraceAPIImpl) Block(ctx context.Context, blockNr rpc.BlockNumber) (Pa out = append(out, *pt) } } + + difficulty := block.Difficulty() + minerReward, uncleRewards := ethash.AccumulateRewards(chainConfig, block.Header(), block.Uncles()) var tr ParityTrace var rewardAction = &RewardTraceAction{} rewardAction.Author = block.Coinbase() rewardAction.RewardType = "block" // nolint: goconst - rewardAction.Value.ToInt().Set(minerReward.ToBig()) + if difficulty.Cmp(big.NewInt(0)) != 0 { + // block reward is not returned in POS + rewardAction.Value.ToInt().Set(minerReward.ToBig()) + } tr.Action = rewardAction tr.BlockHash = &common.Hash{} copy(tr.BlockHash[:], block.Hash().Bytes()) @@ -185,21 +196,25 @@ func (api *TraceAPIImpl) Block(ctx context.Context, blockNr rpc.BlockNumber) (Pa tr.Type = "reward" // nolint: goconst tr.TraceAddress = []int{} out = append(out, tr) - for i, uncle := range block.Uncles() { - if i < len(uncleRewards) { - var tr ParityTrace - rewardAction = &RewardTraceAction{} - rewardAction.Author = uncle.Coinbase - rewardAction.RewardType = "uncle" // nolint: goconst - rewardAction.Value.ToInt().Set(uncleRewards[i].ToBig()) - tr.Action = rewardAction - tr.BlockHash = &common.Hash{} - copy(tr.BlockHash[:], block.Hash().Bytes()) - tr.BlockNumber = new(uint64) - *tr.BlockNumber = block.NumberU64() - tr.Type = "reward" // nolint: goconst - tr.TraceAddress = []int{} - out = append(out, tr) + + // Uncles are not returned in POS + if difficulty.Cmp(big.NewInt(0)) != 0 { + for i, uncle := range block.Uncles() { + if i < len(uncleRewards) { + var tr ParityTrace + rewardAction = &RewardTraceAction{} + rewardAction.Author = uncle.Coinbase + rewardAction.RewardType = "uncle" // nolint: goconst + rewardAction.Value.ToInt().Set(uncleRewards[i].ToBig()) + tr.Action = rewardAction + tr.BlockHash = &common.Hash{} + copy(tr.BlockHash[:], block.Hash().Bytes()) + tr.BlockNumber = new(uint64) + *tr.BlockNumber = block.NumberU64() + tr.Type = "reward" // nolint: goconst + tr.TraceAddress = []int{} + out = append(out, tr) + } } } @@ -212,7 +227,6 @@ func (api *TraceAPIImpl) Block(ctx context.Context, blockNr rpc.BlockNumber) (Pa func (api *TraceAPIImpl) Filter(ctx context.Context, req TraceFilterRequest, stream *jsoniter.Stream) error { dbtx, err1 := api.kv.BeginRo(ctx) if err1 != nil { - stream.WriteNil() return fmt.Errorf("traceFilter cannot open tx: %w", err1) } defer dbtx.Rollback() @@ -233,10 +247,13 @@ func (api *TraceAPIImpl) Filter(ctx context.Context, req TraceFilterRequest, str } if fromBlock > toBlock { - stream.WriteNil() return fmt.Errorf("invalid parameters: fromBlock cannot be greater than toBlock") } + if api.historyV3(dbtx) { + return api.filterV3(ctx, dbtx, fromBlock, toBlock, req, stream) + } + fromAddresses := make(map[common.Address]struct{}, len(req.FromAddress)) toAddresses := make(map[common.Address]struct{}, len(req.ToAddress)) @@ -252,7 +269,6 @@ func (api *TraceAPIImpl) Filter(ctx context.Context, req TraceFilterRequest, str if errors.Is(err, ethdb.ErrKeyNotFound) { continue } - stream.WriteNil() return err } allBlocks.Or(b) @@ -267,7 +283,7 @@ func (api *TraceAPIImpl) Filter(ctx context.Context, req TraceFilterRequest, str if errors.Is(err, ethdb.ErrKeyNotFound) { continue } - stream.WriteNil() + return err } blocksTo.Or(b) @@ -294,7 +310,6 @@ func (api *TraceAPIImpl) Filter(ctx context.Context, req TraceFilterRequest, str chainConfig, err := api.chainConfig(dbtx) if err != nil { - stream.WriteNil() return err } @@ -315,32 +330,65 @@ func (api *TraceAPIImpl) Filter(ctx context.Context, req TraceFilterRequest, str nExported := uint64(0) it := allBlocks.Iterator() + isPos := false for it.HasNext() { - b := uint64(it.Next()) + b := it.Next() // Extract transactions from block hash, hashErr := rawdb.ReadCanonicalHash(dbtx, b) if hashErr != nil { - stream.WriteNil() - return hashErr + if first { + first = false + } else { + stream.WriteMore() + } + stream.WriteObjectStart() + rpc.HandleError(hashErr, stream) + stream.WriteObjectEnd() + continue } block, bErr := api.blockWithSenders(dbtx, hash, b) if bErr != nil { - stream.WriteNil() - return bErr + if first { + first = false + } else { + stream.WriteMore() + } + stream.WriteObjectStart() + rpc.HandleError(bErr, stream) + stream.WriteObjectEnd() + continue } if block == nil { - stream.WriteNil() - return fmt.Errorf("could not find block %x %d", hash, b) + if first { + first = false + } else { + stream.WriteMore() + } + stream.WriteObjectStart() + rpc.HandleError(fmt.Errorf("could not find block %x %d", hash, b), stream) + stream.WriteObjectEnd() + continue } blockHash := block.Hash() blockNumber := block.NumberU64() + if !isPos && api._chainConfig.TerminalTotalDifficulty != nil { + header := block.Header() + isPos = header.Difficulty.Cmp(common.Big0) == 0 || header.Difficulty.Cmp(api._chainConfig.TerminalTotalDifficulty) >= 0 + } txs := block.Transactions() t, tErr := api.callManyTransactions(ctx, dbtx, txs, []string{TraceTypeTrace}, block.ParentHash(), rpc.BlockNumber(block.NumberU64()-1), block.Header(), -1 /* all tx indices */, types.MakeSigner(chainConfig, b), chainConfig.Rules(b)) if tErr != nil { - stream.WriteNil() - return tErr + if first { + first = false + } else { + stream.WriteMore() + } + stream.WriteObjectStart() + rpc.HandleError(tErr, stream) + stream.WriteObjectEnd() + continue } includeAll := len(fromAddresses) == 0 && len(toAddresses) == 0 for i, trace := range t { @@ -356,8 +404,15 @@ func (api *TraceAPIImpl) Filter(ctx context.Context, req TraceFilterRequest, str pt.TransactionPosition = &txPosition b, err := json.Marshal(pt) if err != nil { - stream.WriteNil() - return err + if first { + first = false + } else { + stream.WriteMore() + } + stream.WriteObjectStart() + rpc.HandleError(err, stream) + stream.WriteObjectEnd() + continue } if nSeen > after && nExported < count { if first { @@ -371,6 +426,13 @@ func (api *TraceAPIImpl) Filter(ctx context.Context, req TraceFilterRequest, str } } } + + // if we are in POS + // we dont check for uncles or block rewards + if isPos { + continue + } + minerReward, uncleRewards := ethash.AccumulateRewards(chainConfig, block.Header(), block.Uncles()) if _, ok := toAddresses[block.Coinbase()]; ok || includeAll { nSeen++ @@ -388,8 +450,15 @@ func (api *TraceAPIImpl) Filter(ctx context.Context, req TraceFilterRequest, str tr.TraceAddress = []int{} b, err := json.Marshal(tr) if err != nil { - stream.WriteNil() - return err + if first { + first = false + } else { + stream.WriteMore() + } + stream.WriteObjectStart() + rpc.HandleError(err, stream) + stream.WriteObjectEnd() + continue } if nSeen > after && nExported < count { if first { @@ -419,8 +488,15 @@ func (api *TraceAPIImpl) Filter(ctx context.Context, req TraceFilterRequest, str tr.TraceAddress = []int{} b, err := json.Marshal(tr) if err != nil { - stream.WriteNil() - return err + if first { + first = false + } else { + stream.WriteMore() + } + stream.WriteObjectStart() + rpc.HandleError(err, stream) + stream.WriteObjectEnd() + continue } if nSeen > after && nExported < count { if first { @@ -439,6 +515,354 @@ func (api *TraceAPIImpl) Filter(ctx context.Context, req TraceFilterRequest, str return stream.Flush() } +func (api *TraceAPIImpl) filterV3(ctx context.Context, dbtx kv.Tx, fromBlock, toBlock uint64, req TraceFilterRequest, stream *jsoniter.Stream) error { + var fromTxNum, toTxNum uint64 + var err error + if fromBlock > 0 { + fromTxNum, err = rawdb.TxNums.Min(dbtx, fromBlock) + if err != nil { + return err + } + } + toTxNum, err = rawdb.TxNums.Max(dbtx, toBlock) // toBlock is an inclusive bound + if err != nil { + return err + } + + fromAddresses := make(map[common.Address]struct{}, len(req.FromAddress)) + toAddresses := make(map[common.Address]struct{}, len(req.ToAddress)) + + var ( + allTxs roaring64.Bitmap + txsTo roaring64.Bitmap + ) + ac := api._agg.MakeContext() + ac.SetTx(dbtx) + + for _, addr := range req.FromAddress { + if addr != nil { + it := ac.TraceFromIterator(addr.Bytes(), fromTxNum, toTxNum, dbtx) + for it.HasNext() { + allTxs.Add(it.Next()) + } + fromAddresses[*addr] = struct{}{} + } + } + + for _, addr := range req.ToAddress { + if addr != nil { + it := ac.TraceToIterator(addr.Bytes(), fromTxNum, toTxNum, dbtx) + for it.HasNext() { + txsTo.Add(it.Next()) + } + toAddresses[*addr] = struct{}{} + } + } + + switch req.Mode { + case TraceFilterModeIntersection: + allTxs.And(&txsTo) + case TraceFilterModeUnion: + fallthrough + default: + allTxs.Or(&txsTo) + } + + // Special case - if no addresses specified, take all traces + if len(req.FromAddress) == 0 && len(req.ToAddress) == 0 { + allTxs.AddRange(fromTxNum, toTxNum+1) + } else { + allTxs.RemoveRange(0, fromTxNum) + allTxs.RemoveRange(toTxNum, uint64(0x1000000000000)) + } + + chainConfig, err := api.chainConfig(dbtx) + if err != nil { + return err + } + + var json = jsoniter.ConfigCompatibleWithStandardLibrary + stream.WriteArrayStart() + first := true + // Execute all transactions in picked blocks + + count := uint64(^uint(0)) // this just makes it easier to use below + if req.Count != nil { + count = *req.Count + } + after := uint64(0) // this just makes it easier to use below + if req.After != nil { + after = *req.After + } + nSeen := uint64(0) + nExported := uint64(0) + includeAll := len(fromAddresses) == 0 && len(toAddresses) == 0 + it := allTxs.Iterator() + var lastBlockNum uint64 + var lastBlockHash common.Hash + var lastHeader *types.Header + var lastSigner *types.Signer + var lastRules *params.Rules + stateReader := state.NewHistoryReader22(ac) + stateReader.SetTx(dbtx) + noop := state.NewNoopWriter() + for it.HasNext() { + txNum := it.Next() + // Find block number + ok, blockNum, err := rawdb.TxNums.FindBlockNum(dbtx, txNum) + if err != nil { + return err + } + if !ok { + return nil + } + if blockNum > lastBlockNum { + if lastHeader, err = api._blockReader.HeaderByNumber(ctx, dbtx, blockNum); err != nil { + if first { + first = false + } else { + stream.WriteMore() + } + stream.WriteObjectStart() + rpc.HandleError(err, stream) + stream.WriteObjectEnd() + continue + } + lastBlockNum = blockNum + lastBlockHash = lastHeader.Hash() + lastSigner = types.MakeSigner(chainConfig, blockNum) + lastRules = chainConfig.Rules(blockNum) + } + maxTxNum, err := rawdb.TxNums.Max(dbtx, blockNum) + if err != nil { + if first { + first = false + } else { + stream.WriteMore() + } + stream.WriteObjectStart() + rpc.HandleError(err, stream) + stream.WriteObjectEnd() + continue + } + if txNum+1 == maxTxNum { + body, _, err := api._blockReader.Body(ctx, dbtx, lastBlockHash, blockNum) + if err != nil { + if first { + first = false + } else { + stream.WriteMore() + } + stream.WriteObjectStart() + rpc.HandleError(err, stream) + stream.WriteObjectEnd() + continue + } + // Block reward section, handle specially + minerReward, uncleRewards := ethash.AccumulateRewards(chainConfig, lastHeader, body.Uncles) + if _, ok := toAddresses[lastHeader.Coinbase]; ok || includeAll { + nSeen++ + var tr ParityTrace + var rewardAction = &RewardTraceAction{} + rewardAction.Author = lastHeader.Coinbase + rewardAction.RewardType = "block" // nolint: goconst + rewardAction.Value.ToInt().Set(minerReward.ToBig()) + tr.Action = rewardAction + tr.BlockHash = &common.Hash{} + copy(tr.BlockHash[:], lastBlockHash.Bytes()) + tr.BlockNumber = new(uint64) + *tr.BlockNumber = blockNum + tr.Type = "reward" // nolint: goconst + tr.TraceAddress = []int{} + b, err := json.Marshal(tr) + if err != nil { + if first { + first = false + } else { + stream.WriteMore() + } + stream.WriteObjectStart() + rpc.HandleError(err, stream) + stream.WriteObjectEnd() + continue + } + if nSeen > after && nExported < count { + if first { + first = false + } else { + stream.WriteMore() + } + stream.Write(b) + nExported++ + } + } + for i, uncle := range body.Uncles { + if _, ok := toAddresses[uncle.Coinbase]; ok || includeAll { + if i < len(uncleRewards) { + nSeen++ + var tr ParityTrace + rewardAction := &RewardTraceAction{} + rewardAction.Author = uncle.Coinbase + rewardAction.RewardType = "uncle" // nolint: goconst + rewardAction.Value.ToInt().Set(uncleRewards[i].ToBig()) + tr.Action = rewardAction + tr.BlockHash = &common.Hash{} + copy(tr.BlockHash[:], lastBlockHash[:]) + tr.BlockNumber = new(uint64) + *tr.BlockNumber = blockNum + tr.Type = "reward" // nolint: goconst + tr.TraceAddress = []int{} + b, err := json.Marshal(tr) + if err != nil { + if first { + first = false + } else { + stream.WriteMore() + } + stream.WriteObjectStart() + rpc.HandleError(err, stream) + stream.WriteObjectEnd() + continue + } + if nSeen > after && nExported < count { + if first { + first = false + } else { + stream.WriteMore() + } + stream.Write(b) + nExported++ + } + } + } + } + continue + } + var startTxNum uint64 + if blockNum > 0 { + startTxNum, err = rawdb.TxNums.Min(dbtx, blockNum) + if err != nil { + return err + } + } + txIndex := txNum - startTxNum - 1 + //fmt.Printf("txNum=%d, blockNum=%d, txIndex=%d\n", txNum, blockNum, txIndex) + txn, err := api._txnReader.TxnByIdxInBlock(ctx, dbtx, blockNum, int(txIndex)) + if err != nil { + if first { + first = false + } else { + stream.WriteMore() + } + stream.WriteObjectStart() + rpc.HandleError(err, stream) + stream.WriteObjectEnd() + continue + } + txHash := txn.Hash() + msg, err := txn.AsMessage(*lastSigner, lastHeader.BaseFee, lastRules) + if err != nil { + if first { + first = false + } else { + stream.WriteMore() + } + stream.WriteObjectStart() + rpc.HandleError(err, stream) + stream.WriteObjectEnd() + continue + } + blockCtx, txCtx := transactions.GetEvmContext(msg, lastHeader, true /* requireCanonical */, dbtx, api._blockReader) + stateReader.SetTxNum(txNum) + stateCache := shards.NewStateCache(32, 0 /* no limit */) // this cache living only during current RPC call, but required to store state writes + cachedReader := state.NewCachedReader(stateReader, stateCache) + cachedWriter := state.NewCachedWriter(noop, stateCache) + vmConfig := vm.Config{} + vmConfig.SkipAnalysis = core.SkipAnalysis(chainConfig, blockNum) + traceResult := &TraceCallResult{Trace: []*ParityTrace{}} + var ot OeTracer + ot.compat = api.compatibility + ot.r = traceResult + ot.idx = []string{fmt.Sprintf("%d-", txIndex)} + ot.traceAddr = []int{} + vmConfig.Debug = true + vmConfig.Tracer = &ot + ibs := state.New(cachedReader) + evm := vm.NewEVM(blockCtx, txCtx, ibs, chainConfig, vmConfig) + + gp := new(core.GasPool).AddGas(msg.Gas()) + ibs.Prepare(txHash, lastBlockHash, int(txIndex)) + var execResult *core.ExecutionResult + execResult, err = core.ApplyMessage(evm, msg, gp, true /* refunds */, false /* gasBailout */) + if err != nil { + if first { + first = false + } else { + stream.WriteMore() + } + stream.WriteObjectStart() + rpc.HandleError(err, stream) + stream.WriteObjectEnd() + continue + } + traceResult.Output = common.CopyBytes(execResult.ReturnData) + if err = ibs.FinalizeTx(evm.ChainRules(), noop); err != nil { + if first { + first = false + } else { + stream.WriteMore() + } + stream.WriteObjectStart() + rpc.HandleError(err, stream) + stream.WriteObjectEnd() + continue + } + if err = ibs.CommitBlock(evm.ChainRules(), cachedWriter); err != nil { + if first { + first = false + } else { + stream.WriteMore() + } + stream.WriteObjectStart() + rpc.HandleError(err, stream) + stream.WriteObjectEnd() + continue + } + for _, pt := range traceResult.Trace { + if includeAll || filter_trace(pt, fromAddresses, toAddresses) { + nSeen++ + pt.BlockHash = &lastBlockHash + pt.BlockNumber = &blockNum + pt.TransactionHash = &txHash + pt.TransactionPosition = &txIndex + b, err := json.Marshal(pt) + if err != nil { + if first { + first = false + } else { + stream.WriteMore() + } + stream.WriteObjectStart() + rpc.HandleError(err, stream) + stream.WriteObjectEnd() + continue + } + if nSeen > after && nExported < count { + if first { + first = false + } else { + stream.WriteMore() + } + stream.Write(b) + nExported++ + } + } + } + } + stream.WriteArrayEnd() + return stream.Flush() +} + func filter_trace(pt *ParityTrace, fromAddresses map[common.Address]struct{}, toAddresses map[common.Address]struct{}) bool { switch action := pt.Action.(type) { case *CallTraceAction: diff --git a/cmd/rpcdaemon/commands/trace_types.go b/cmd/rpcdaemon/commands/trace_types.go index 2b98c70bdb0..c0d8d6e640f 100644 --- a/cmd/rpcdaemon/commands/trace_types.go +++ b/cmd/rpcdaemon/commands/trace_types.go @@ -94,7 +94,7 @@ type SuicideTraceAction struct { type RewardTraceAction struct { Author common.Address `json:"author"` RewardType string `json:"rewardType"` - Value hexutil.Big `json:"value"` + Value hexutil.Big `json:"value,omitempty"` } type CreateTraceResult struct { diff --git a/cmd/rpcdaemon/commands/tracing.go b/cmd/rpcdaemon/commands/tracing.go index a69e3791db8..7ff0707b48f 100644 --- a/cmd/rpcdaemon/commands/tracing.go +++ b/cmd/rpcdaemon/commands/tracing.go @@ -18,7 +18,6 @@ import ( "github.com/ledgerwatch/erigon/core/types" "github.com/ledgerwatch/erigon/core/vm" "github.com/ledgerwatch/erigon/eth/tracers" - "github.com/ledgerwatch/erigon/ethdb" "github.com/ledgerwatch/erigon/internal/ethapi" "github.com/ledgerwatch/erigon/rpc" "github.com/ledgerwatch/erigon/turbo/rpchelper" @@ -43,10 +42,16 @@ func (api *PrivateDebugAPIImpl) traceBlock(ctx context.Context, blockNrOrHash rp return err } defer tx.Rollback() - var block *types.Block - if number, ok := blockNrOrHash.Number(); ok { + var ( + block *types.Block + number rpc.BlockNumber + numberOk bool + hash common.Hash + hashOk bool + ) + if number, numberOk = blockNrOrHash.Number(); numberOk { block, err = api.blockByRPCNumber(number, tx) - } else if hash, ok := blockNrOrHash.Hash(); ok { + } else if hash, hashOk = blockNrOrHash.Hash(); hashOk { block, err = api.blockByHashWithSenders(tx, hash) } else { return fmt.Errorf("invalid arguments; neither block nor hash specified") @@ -57,17 +62,19 @@ func (api *PrivateDebugAPIImpl) traceBlock(ctx context.Context, blockNrOrHash rp return err } + if block == nil { + if numberOk { + return fmt.Errorf("invalid arguments; block with number %d not found", number) + } + return fmt.Errorf("invalid arguments; block with hash %x not found", hash) + } + chainConfig, err := api.chainConfig(tx) if err != nil { stream.WriteNil() return err } - contractHasTEVM := func(contractHash common.Hash) (bool, error) { return false, nil } - if api.TevmEnabled { - contractHasTEVM = ethdb.GetHasTEVM(tx) - } - getHeader := func(hash common.Hash, number uint64) *types.Header { h, e := api._blockReader.Header(ctx, tx, hash, number) if e != nil { @@ -76,7 +83,7 @@ func (api *PrivateDebugAPIImpl) traceBlock(ctx context.Context, blockNrOrHash rp return h } - _, blockCtx, _, ibs, reader, err := transactions.ComputeTxEnv(ctx, block, chainConfig, getHeader, contractHasTEVM, ethash.NewFaker(), tx, block.Hash(), 0) + _, blockCtx, _, ibs, reader, err := transactions.ComputeTxEnv(ctx, block, chainConfig, getHeader, ethash.NewFaker(), tx, block.Hash(), 0) if err != nil { stream.WriteNil() return err @@ -100,7 +107,7 @@ func (api *PrivateDebugAPIImpl) traceBlock(ctx context.Context, blockNrOrHash rp GasPrice: msg.GasPrice().ToBig(), } - transactions.TraceTx(ctx, msg, blockCtx, txCtx, ibs, config, chainConfig, stream) + transactions.TraceTx(ctx, msg, blockCtx, txCtx, ibs, config, chainConfig, stream, api.evmCallTimeout) _ = ibs.FinalizeTx(rules, reader) if idx != len(block.Transactions())-1 { stream.WriteMore() @@ -173,17 +180,13 @@ func (api *PrivateDebugAPIImpl) TraceTransaction(ctx context.Context, hash commo getHeader := func(hash common.Hash, number uint64) *types.Header { return rawdb.ReadHeader(tx, hash, number) } - contractHasTEVM := func(contractHash common.Hash) (bool, error) { return false, nil } - if api.TevmEnabled { - contractHasTEVM = ethdb.GetHasTEVM(tx) - } - msg, blockCtx, txCtx, ibs, _, err := transactions.ComputeTxEnv(ctx, block, chainConfig, getHeader, contractHasTEVM, ethash.NewFaker(), tx, blockHash, txnIndex) + msg, blockCtx, txCtx, ibs, _, err := transactions.ComputeTxEnv(ctx, block, chainConfig, getHeader, ethash.NewFaker(), tx, blockHash, txnIndex) if err != nil { stream.WriteNil() return err } // Trace the transaction and return - return transactions.TraceTx(ctx, msg, blockCtx, txCtx, ibs, config, chainConfig, stream) + return transactions.TraceTx(ctx, msg, blockCtx, txCtx, ibs, config, chainConfig, stream, api.evmCallTimeout) } func (api *PrivateDebugAPIImpl) TraceCall(ctx context.Context, args ethapi.CallArgs, blockNrOrHash rpc.BlockNumberOrHash, config *tracers.TraceConfig, stream *jsoniter.Stream) error { @@ -213,7 +216,7 @@ func (api *PrivateDebugAPIImpl) TraceCall(ctx context.Context, args ethapi.CallA } stateReader = state.NewCachedReader2(cacheView, dbtx) } else { - stateReader = state.NewPlainState(dbtx, blockNumber) + stateReader = state.NewPlainState(dbtx, blockNumber+1) } header := rawdb.ReadHeader(dbtx, hash, blockNumber) if header == nil { @@ -241,13 +244,9 @@ func (api *PrivateDebugAPIImpl) TraceCall(ctx context.Context, args ethapi.CallA return err } - contractHasTEVM := func(contractHash common.Hash) (bool, error) { return false, nil } - if api.TevmEnabled { - contractHasTEVM = ethdb.GetHasTEVM(dbtx) - } - blockCtx, txCtx := transactions.GetEvmContext(msg, header, blockNrOrHash.RequireCanonical, dbtx, contractHasTEVM, api._blockReader) + blockCtx, txCtx := transactions.GetEvmContext(msg, header, blockNrOrHash.RequireCanonical, dbtx, api._blockReader) // Trace the transaction and return - return transactions.TraceTx(ctx, msg, blockCtx, txCtx, ibs, config, chainConfig, stream) + return transactions.TraceTx(ctx, msg, blockCtx, txCtx, ibs, config, chainConfig, stream, api.evmCallTimeout) } func (api *PrivateDebugAPIImpl) TraceCallMany(ctx context.Context, bundles []Bundle, simulateContext StateContext, config *tracers.TraceConfig, stream *jsoniter.Stream) error { @@ -317,8 +316,7 @@ func (api *PrivateDebugAPIImpl) TraceCallMany(ctx context.Context, bundles []Bun replayTransactions = block.Transactions()[:transactionIndex] - stateReader, err := rpchelper.CreateStateReader(ctx, tx, rpc.BlockNumberOrHashWithNumber(rpc.BlockNumber(blockNum-1)), api.filters, api.stateCache) - + stateReader, err := rpchelper.CreateStateReader(ctx, tx, rpc.BlockNumberOrHashWithNumber(rpc.BlockNumber(blockNum-1)), api.filters, api.stateCache, api.historyV3(tx), api._agg) if err != nil { stream.WriteNil() return err @@ -337,12 +335,6 @@ func (api *PrivateDebugAPIImpl) TraceCallMany(ctx context.Context, bundles []Bun signer := types.MakeSigner(chainConfig, blockNum) rules := chainConfig.Rules(blockNum) - contractHasTEVM := func(contractHash common.Hash) (bool, error) { return false, nil } - - if api.TevmEnabled { - contractHasTEVM = ethdb.GetHasTEVM(tx) - } - getHash := func(i uint64) common.Hash { if hash, ok := overrideBlockHash[i]; ok { return hash @@ -359,16 +351,15 @@ func (api *PrivateDebugAPIImpl) TraceCallMany(ctx context.Context, bundles []Bun } blockCtx = vm.BlockContext{ - CanTransfer: core.CanTransfer, - Transfer: core.Transfer, - GetHash: getHash, - ContractHasTEVM: contractHasTEVM, - Coinbase: parent.Coinbase, - BlockNumber: parent.Number.Uint64(), - Time: parent.Time, - Difficulty: new(big.Int).Set(parent.Difficulty), - GasLimit: parent.GasLimit, - BaseFee: &baseFee, + CanTransfer: core.CanTransfer, + Transfer: core.Transfer, + GetHash: getHash, + Coinbase: parent.Coinbase, + BlockNumber: parent.Number.Uint64(), + Time: parent.Time, + Difficulty: new(big.Int).Set(parent.Difficulty), + GasLimit: parent.GasLimit, + BaseFee: &baseFee, } evm = vm.NewEVM(blockCtx, txCtx, st, chainConfig, vm.Config{Debug: false}) @@ -419,7 +410,7 @@ func (api *PrivateDebugAPIImpl) TraceCallMany(ctx context.Context, bundles []Bun txCtx = core.NewEVMTxContext(msg) ibs := evm.IntraBlockState().(*state.IntraBlockState) ibs.Prepare(common.Hash{}, parent.Hash(), txn_index) - err = transactions.TraceTx(ctx, msg, blockCtx, txCtx, evm.IntraBlockState(), config, chainConfig, stream) + err = transactions.TraceTx(ctx, msg, blockCtx, txCtx, evm.IntraBlockState(), config, chainConfig, stream, api.evmCallTimeout) if err != nil { stream.WriteNil() diff --git a/cmd/rpcdaemon/commands/txpool_api_test.go b/cmd/rpcdaemon/commands/txpool_api_test.go index 9fbe268e5d1..1a6c993d289 100644 --- a/cmd/rpcdaemon/commands/txpool_api_test.go +++ b/cmd/rpcdaemon/commands/txpool_api_test.go @@ -15,6 +15,7 @@ import ( "github.com/ledgerwatch/erigon/core" "github.com/ledgerwatch/erigon/core/types" "github.com/ledgerwatch/erigon/params" + "github.com/ledgerwatch/erigon/rpc/rpccfg" "github.com/ledgerwatch/erigon/turbo/rpchelper" "github.com/ledgerwatch/erigon/turbo/snapshotsync" "github.com/ledgerwatch/erigon/turbo/stages" @@ -23,6 +24,9 @@ import ( func TestTxPoolContent(t *testing.T) { m, require := stages.MockWithTxPool(t), require.New(t) + if m.HistoryV3 { + t.Skip("HistoryV3: please implement StateStream support") + } chain, err := core.GenerateChain(m.ChainConfig, m.Genesis, m.Engine, m.DB, 1, func(i int, b *core.BlockGen) { b.SetCoinbase(common.Address{1}) }, false /* intermediateHashes */) @@ -33,7 +37,8 @@ func TestTxPoolContent(t *testing.T) { ctx, conn := rpcdaemontest.CreateTestGrpcConn(t, m) txPool := txpool.NewTxpoolClient(conn) ff := rpchelper.New(ctx, nil, txPool, txpool.NewMiningClient(conn), func() {}) - api := NewTxPoolAPI(NewBaseApi(ff, kvcache.New(kvcache.DefaultCoherentConfig), snapshotsync.NewBlockReader(), false), m.DB, txPool) + agg := m.HistoryV3Components() + api := NewTxPoolAPI(NewBaseApi(ff, kvcache.New(kvcache.DefaultCoherentConfig), snapshotsync.NewBlockReader(), agg, false, rpccfg.DefaultEvmCallTimeout), m.DB, txPool) expectValue := uint64(1234) txn, err := types.SignTx(types.NewTransaction(0, common.Address{1}, uint256.NewInt(expectValue), params.TxGas, uint256.NewInt(10*params.GWei), nil), *types.LatestSignerForChainID(m.ChainConfig.ChainID), m.Key) diff --git a/cmd/rpcdaemon/commands/validator_set.go b/cmd/rpcdaemon/commands/validator_set.go index 5ebbe35217a..5ba50c09c49 100644 --- a/cmd/rpcdaemon/commands/validator_set.go +++ b/cmd/rpcdaemon/commands/validator_set.go @@ -578,14 +578,15 @@ func (vals *ValidatorSet) updateWithChangeSet(changes []*bor.Validator, allowDel // UpdateWithChangeSet attempts to update the validator set with 'changes'. // It performs the following steps: -// - validates the changes making sure there are no duplicates and splits them in updates and deletes -// - verifies that applying the changes will not result in errors -// - computes the total voting power BEFORE removals to ensure that in the next steps the priorities -// across old and newly added validators are fair -// - computes the priorities of new validators against the final set -// - applies the updates against the validator set -// - applies the removals against the validator set -// - performs scaling and centering of priority values +// - validates the changes making sure there are no duplicates and splits them in updates and deletes +// - verifies that applying the changes will not result in errors +// - computes the total voting power BEFORE removals to ensure that in the next steps the priorities +// across old and newly added validators are fair +// - computes the priorities of new validators against the final set +// - applies the updates against the validator set +// - applies the removals against the validator set +// - performs scaling and centering of priority values +// // If an error is detected during verification steps, it is returned and the validator set // is not changed. func (vals *ValidatorSet) UpdateWithChangeSet(changes []*bor.Validator) error { diff --git a/cmd/rpcdaemon/health/health_test.go b/cmd/rpcdaemon/health/health_test.go index e90f840127c..39c71c9cad1 100644 --- a/cmd/rpcdaemon/health/health_test.go +++ b/cmd/rpcdaemon/health/health_test.go @@ -4,7 +4,7 @@ import ( "context" "encoding/json" "errors" - "io/ioutil" + "io" "net/http" "net/http/httptest" "strings" @@ -377,7 +377,7 @@ func TestProcessHealthcheckIfNeeded_HeadersTests(t *testing.T) { t.Errorf("%v: expected status code: %v, but got: %v", idx, c.expectedStatusCode, result.StatusCode) } - bodyBytes, err := ioutil.ReadAll(result.Body) + bodyBytes, err := io.ReadAll(result.Body) if err != nil { t.Errorf("%v: reading response body: %s", idx, err) } @@ -504,7 +504,7 @@ func TestProcessHealthcheckIfNeeded_RequestBody(t *testing.T) { t.Errorf("%v: creating request: %v", idx, err) } - r.Body = ioutil.NopCloser(strings.NewReader(c.body)) + r.Body = io.NopCloser(strings.NewReader(c.body)) netAPI := rpc.API{ Namespace: "", @@ -537,7 +537,7 @@ func TestProcessHealthcheckIfNeeded_RequestBody(t *testing.T) { t.Errorf("%v: expected status code: %v, but got: %v", idx, c.expectedStatusCode, result.StatusCode) } - bodyBytes, err := ioutil.ReadAll(result.Body) + bodyBytes, err := io.ReadAll(result.Body) if err != nil { t.Errorf("%v: reading response body: %s", idx, err) } diff --git a/cmd/rpcdaemon/main.go b/cmd/rpcdaemon/main.go index 3d273486b83..2f6d9762218 100644 --- a/cmd/rpcdaemon/main.go +++ b/cmd/rpcdaemon/main.go @@ -16,7 +16,7 @@ func main() { cmd.RunE = func(cmd *cobra.Command, args []string) error { ctx := cmd.Context() logger := log.New() - db, borDb, backend, txPool, mining, starknet, stateCache, blockReader, ff, err := cli.RemoteServices(ctx, *cfg, logger, rootCancel) + db, borDb, backend, txPool, mining, stateCache, blockReader, ff, agg, err := cli.RemoteServices(ctx, *cfg, logger, rootCancel) if err != nil { log.Error("Could not connect to DB", "err", err) return nil @@ -26,8 +26,8 @@ func main() { defer borDb.Close() } - apiList := commands.APIList(db, borDb, backend, txPool, mining, starknet, ff, stateCache, blockReader, *cfg) - if err := cli.StartRpcServer(ctx, *cfg, apiList); err != nil { + apiList := commands.APIList(db, borDb, backend, txPool, mining, ff, stateCache, blockReader, agg, *cfg) + if err := cli.StartRpcServer(ctx, *cfg, apiList, nil); err != nil { log.Error(err.Error()) return nil } diff --git a/cmd/rpcdaemon/rpcdaemontest/test_util.go b/cmd/rpcdaemon/rpcdaemontest/test_util.go index c802052e466..45aaf35b2df 100644 --- a/cmd/rpcdaemon/rpcdaemontest/test_util.go +++ b/cmd/rpcdaemon/rpcdaemontest/test_util.go @@ -11,7 +11,6 @@ import ( "github.com/holiman/uint256" "github.com/ledgerwatch/erigon-lib/gointerfaces/remote" - "github.com/ledgerwatch/erigon-lib/gointerfaces/starknet" "github.com/ledgerwatch/erigon-lib/gointerfaces/txpool" "github.com/ledgerwatch/erigon-lib/kv" "github.com/ledgerwatch/erigon/accounts/abi/bind" @@ -28,6 +27,7 @@ import ( "github.com/ledgerwatch/erigon/turbo/snapshotsync" "github.com/ledgerwatch/erigon/turbo/stages" "google.golang.org/grpc" + "google.golang.org/grpc/credentials/insecure" "google.golang.org/grpc/test/bufconn" ) @@ -295,7 +295,6 @@ func CreateTestGrpcConn(t *testing.T, m *stages.MockSentry) (context.Context, *g remote.RegisterETHBACKENDServer(server, privateapi.NewEthBackendServer(ctx, nil, m.DB, m.Notifications.Events, snapshotsync.NewBlockReader(), nil, nil, nil, false)) txpool.RegisterTxpoolServer(server, m.TxPoolGrpcServer) txpool.RegisterMiningServer(server, privateapi.NewMiningServer(ctx, &IsMiningMock{}, ethashApi)) - starknet.RegisterCAIROVMServer(server, &starknet.UnimplementedCAIROVMServer{}) listener := bufconn.Listen(1024 * 1024) dialer := func() func(context.Context, string) (net.Conn, error) { @@ -309,7 +308,7 @@ func CreateTestGrpcConn(t *testing.T, m *stages.MockSentry) (context.Context, *g } } - conn, err := grpc.DialContext(ctx, "", grpc.WithInsecure(), grpc.WithContextDialer(dialer())) + conn, err := grpc.DialContext(ctx, "", grpc.WithTransportCredentials(insecure.NewCredentials()), grpc.WithContextDialer(dialer())) if err != nil { t.Fatal(err) } diff --git a/cmd/rpcdaemon/rpcservices/eth_backend.go b/cmd/rpcdaemon/rpcservices/eth_backend.go index 248345a003c..f9a344bac51 100644 --- a/cmd/rpcdaemon/rpcservices/eth_backend.go +++ b/cmd/rpcdaemon/rpcservices/eth_backend.go @@ -1,6 +1,7 @@ package rpcservices import ( + "bytes" "context" "encoding/json" "errors" @@ -177,7 +178,7 @@ func (back *RemoteBackend) BodyWithTransactions(ctx context.Context, tx kv.Gette func (back *RemoteBackend) BodyRlp(ctx context.Context, tx kv.Getter, hash common.Hash, blockHeight uint64) (bodyRlp rlp.RawValue, err error) { return back.blockReader.BodyRlp(ctx, tx, hash, blockHeight) } -func (back *RemoteBackend) Body(ctx context.Context, tx kv.Getter, hash common.Hash, blockHeight uint64) (body *types.Body, err error) { +func (back *RemoteBackend) Body(ctx context.Context, tx kv.Getter, hash common.Hash, blockHeight uint64) (body *types.Body, txAmount uint32, err error) { return back.blockReader.Body(ctx, tx, hash, blockHeight) } func (back *RemoteBackend) Header(ctx context.Context, tx kv.Getter, hash common.Hash, blockHeight uint64) (*types.Header, error) { @@ -289,3 +290,21 @@ func (back *RemoteBackend) Peers(ctx context.Context) ([]*p2p.PeerInfo, error) { return peers, nil } + +func (back *RemoteBackend) PendingBlock(ctx context.Context) (*types.Block, error) { + blockRlp, err := back.remoteEthBackend.PendingBlock(ctx, &emptypb.Empty{}) + if err != nil { + return nil, fmt.Errorf("ETHBACKENDClient.PendingBlock() error: %w", err) + } + if blockRlp == nil { + return nil, nil + } + + var block types.Block + err = rlp.Decode(bytes.NewReader(blockRlp.BlockRlp), &block) + if err != nil { + return nil, fmt.Errorf("decoding block from %x: %w", blockRlp, err) + } + + return &block, nil +} diff --git a/cmd/rpcdaemon/rpcservices/eth_starknet.go b/cmd/rpcdaemon/rpcservices/eth_starknet.go deleted file mode 100644 index 6dcc02d448d..00000000000 --- a/cmd/rpcdaemon/rpcservices/eth_starknet.go +++ /dev/null @@ -1,31 +0,0 @@ -package rpcservices - -import ( - "github.com/ledgerwatch/erigon-lib/gointerfaces" - "github.com/ledgerwatch/erigon-lib/gointerfaces/starknet" - types2 "github.com/ledgerwatch/erigon-lib/gointerfaces/types" - "github.com/ledgerwatch/log/v3" - "google.golang.org/grpc" -) - -// StarknetAPIVersion -var StarknetAPIVersion = &types2.VersionReply{Major: 1, Minor: 0, Patch: 0} - -type StarknetService struct { - starknet.CAIROVMClient - log log.Logger - version gointerfaces.Version -} - -func NewStarknetService(cc grpc.ClientConnInterface) *StarknetService { - return &StarknetService{ - CAIROVMClient: starknet.NewCAIROVMClient(cc), - version: gointerfaces.VersionFromProto(StarknetAPIVersion), - log: log.New("remote_service", "starknet"), - } -} - -func (s *StarknetService) EnsureVersionCompatibility() bool { - //TODO: add version check - return true -} diff --git a/cmd/rpcdaemon22/.gitignore b/cmd/rpcdaemon22/.gitignore deleted file mode 100644 index e69de29bb2d..00000000000 diff --git a/cmd/rpcdaemon22/README.md b/cmd/rpcdaemon22/README.md deleted file mode 100644 index 50ad1581831..00000000000 --- a/cmd/rpcdaemon22/README.md +++ /dev/null @@ -1,485 +0,0 @@ -- [Introduction](#introduction) -- [Getting Started](#getting-started) - * [Running locally](#running-locally) - * [Running remotely](#running-remotely) - * [Healthcheck](#healthcheck) - * [Testing](#testing) -- [FAQ](#faq) - * [Relations between prune options and rpc methods](#relations-between-prune-options-and-rpc-method) - * [RPC Implementation Status](#rpc-implementation-status) - * [Securing the communication between RPC daemon and Erigon instance via TLS and authentication](#securing-the-communication-between-rpc-daemon-and-erigon-instance-via-tls-and-authentication) - * [Ethstats](#ethstats) - * [Allowing only specific methods (Allowlist)](#allowing-only-specific-methods--allowlist-) - * [Trace transactions progress](#trace-transactions-progress) - * [Clients getting timeout, but server load is low](#clients-getting-timeout--but-server-load-is-low) - * [Server load too high](#server-load-too-high) - * [Faster Batch requests](#faster-batch-requests) -- [For Developers](#for-developers) - * [Code generation](#code-generation) - -## Introduction - -Erigon's `rpcdaemon` runs in its own seperate process. - -This brings many benefits including easier development, the ability to run multiple daemons at once, and the ability to -run the daemon remotely. It is possible to run the daemon locally as well (read-only) if both processes have access to -the data folder. - -## Getting Started - -The `rpcdaemon` gets built as part of the main `erigon` build process, but you can build it directly with this command: - -```[bash] -make rpcdaemon -``` - -### Running locally - -Run `rpcdaemon` on same computer with Erigon. It's default option because it using Shared Memory access to Erigon's db - -it's much faster than TCP access. Provide both `--datadir` and `--private.api.addr` flags: - -```[bash] -make erigon -./build/bin/erigon --datadir= --private.api.addr=localhost:9090 -make rpcdaemon -./build/bin/rpcdaemon --datadir= --txpool.api.addr=localhost:9090 --private.api.addr=localhost:9090 --http.api=eth,erigon,web3,net,debug,trace,txpool -``` - -Note that we've also specified which RPC namespaces to enable in the above command by `--http.api` flag. - -### Running remotely - -To start the daemon remotely - just don't set `--datadir` flag: - -```[bash] -make erigon -./build/bin/erigon --datadir= --private.api.addr=0.0.0.0:9090 -make rpcdaemon -./build/bin/rpcdaemon --private.api.addr=:9090 --txpool.api.addr=localhost:9090 --http.api=eth,erigon,web3,net,debug,trace,txpool -``` - -The daemon should respond with something like: - -```[bash] -INFO [date-time] HTTP endpoint opened url=localhost:8545... -``` - -When RPC daemon runs remotely, by default it maintains a state cache, which is updated every time when Erigon imports a -new block. When state cache is reasonably warm, it allows such remote RPC daemon to execute queries related to `latest` -block (i.e. to current state) with comparable performance to a local RPC daemon -(around 2x slower vs 10x slower without state cache). Since there can be multiple such RPC daemons per one Erigon node, -it may scale well for some workloads that are heavy on the current state queries. - -### Healthcheck - -Running the daemon also opens an endpoint `/health` that provides a basic health check. - -If the health check is successful it returns 200 OK. - -If the health check fails it returns 500 Internal Server Error. - -Configuration of the health check is sent as POST body of the method. - -``` -{ - "min_peer_count": , - "known_block": -} -``` - -Not adding a check disables that. - -**`min_peer_count`** -- checks for mimimum of healthy node peers. Requires -`net` namespace to be listed in `http.api`. - -**`known_block`** -- sets up the block that node has to know about. Requires -`eth` namespace to be listed in `http.api`. - -Example request -```http POST http://localhost:8545/health --raw '{"min_peer_count": 3, "known_block": "0x1F"}'``` -Example response - -``` -{ - "check_block": "HEALTHY", - "healthcheck_query": "HEALTHY", - "min_peer_count": "HEALTHY" -} -``` - -### Testing - -By default, the `rpcdaemon` serves data from `localhost:8545`. You may send `curl` commands to see if things are -working. - -Try `eth_blockNumber` for example. In a third terminal window enter this command: - -```[bash] -curl -X POST -H "Content-Type: application/json" --data '{"jsonrpc": "2.0", "method": "eth_blockNumber", "params": [], "id":1}' localhost:8545 -``` - -This should return something along the lines of this (depending on how far your Erigon node has synced): - -```[bash] -{ - "jsonrpc": "2.0", - "id": 1, - "result":" 0xa5b9ba" -} -``` - -Also, there -are [extensive instructions for using Postman](https://github.com/ledgerwatch/erigon/wiki/Using-Postman-to-Test-TurboGeth-RPC) -to test the RPC. - -## FAQ - -### Relations between prune options and RPC methods - -Next options available (by `--prune` flag): - -``` -* h - prune history (ChangeSets, HistoryIndices - used to access historical state, like eth_getStorageAt, eth_getBalanceAt, debug_traceTransaction, trace_block, trace_transaction, etc.) -* r - prune receipts (Receipts, Logs, LogTopicIndex, LogAddressIndex - used by eth_getLogs and similar RPC methods) -* t - prune tx lookup (used to get transaction by hash) -* c - prune call traces (used by trace_filter method) -``` - -By default data pruned after 90K blocks, can change it by flags like `--prune.history.after=100_000` - -Some methods, if not found historical data in DB, can fallback to old blocks re-execution - but it require `h`. - -### RPC Implementation Status - -Label "remote" means: `--private.api.addr` flag is required. - -The following table shows the current implementation status of Erigon's RPC daemon. - -| Command | Avail | Notes | -| ------------------------------------------ | ------- | ------------------------------------------ | -| web3_clientVersion | Yes | | -| web3_sha3 | Yes | | -| | | | -| net_listening | HC | (`remote` hard coded returns true) | -| net_peerCount | Limited | internal sentries only | -| net_version | Yes | `remote`. | -| | | | -| eth_blockNumber | Yes | | -| eth_chainID/eth_chainId | Yes | | -| eth_protocolVersion | Yes | | -| eth_syncing | Yes | | -| eth_gasPrice | Yes | | -| eth_maxPriorityFeePerGas | Yes | | -| eth_feeHistory | Yes | | -| | | | -| eth_getBlockByHash | Yes | | -| eth_getBlockByNumber | Yes | | -| eth_getBlockTransactionCountByHash | Yes | | -| eth_getBlockTransactionCountByNumber | Yes | | -| eth_getUncleByBlockHashAndIndex | Yes | | -| eth_getUncleByBlockNumberAndIndex | Yes | | -| eth_getUncleCountByBlockHash | Yes | | -| eth_getUncleCountByBlockNumber | Yes | | -| | | | -| eth_getTransactionByHash | Yes | | -| eth_getRawTransactionByHash | Yes | | -| eth_getTransactionByBlockHashAndIndex | Yes | | -| eth_retRawTransactionByBlockHashAndIndex | Yes | | -| eth_getTransactionByBlockNumberAndIndex | Yes | | -| eth_retRawTransactionByBlockNumberAndIndex | Yes | | -| eth_getTransactionReceipt | Yes | | -| eth_getBlockReceipts | Yes | | -| | | | -| eth_estimateGas | Yes | | -| eth_getBalance | Yes | | -| eth_getCode | Yes | | -| eth_getTransactionCount | Yes | | -| eth_getStorageAt | Yes | | -| eth_call | Yes | | -| eth_callBundle | Yes | | -| eth_createAccessList | Yes | -| | | | -| eth_newFilter | - | not yet implemented | -| eth_newBlockFilter | - | not yet implemented | -| eth_newPendingTransactionFilter | - | not yet implemented | -| eth_getFilterChanges | - | not yet implemented | -| eth_uninstallFilter | - | not yet implemented | -| eth_getLogs | Yes | | -| | | | -| eth_accounts | No | deprecated | -| eth_sendRawTransaction | Yes | `remote`. | -| eth_sendTransaction | - | not yet implemented | -| eth_sign | No | deprecated | -| eth_signTransaction | - | not yet implemented | -| eth_signTypedData | - | ???? | -| | | | -| eth_getProof | - | not yet implemented | -| | | | -| eth_mining | Yes | returns true if --mine flag provided | -| eth_coinbase | Yes | | -| eth_hashrate | Yes | | -| eth_submitHashrate | Yes | | -| eth_getWork | Yes | | -| eth_submitWork | Yes | | -| | | | -| eth_subscribe | Limited | Websock Only - newHeads, | -| | | newPendingTransactions | -| eth_unsubscribe | Yes | Websock Only | -| | | | -| engine_newPayloadV1 | Yes | | -| engine_forkchoiceUpdatedV1 | Yes | | -| engine_getPayloadV1 | Yes | | -| engine_exchangeTransitionConfigurationV1 | Yes | | -| | | | -| debug_accountRange | Yes | Private Erigon debug module | -| debug_accountAt | Yes | Private Erigon debug module | -| debug_getModifiedAccountsByNumber | Yes | | -| debug_getModifiedAccountsByHash | Yes | | -| debug_storageRangeAt | Yes | | -| debug_traceBlockByHash | Yes | Streaming (can handle huge results) | -| debug_traceBlockByNumber | Yes | Streaming (can handle huge results) | -| debug_traceTransaction | Yes | Streaming (can handle huge results) | -| debug_traceCall | Yes | Streaming (can handle huge results) | -| | | | -| trace_call | Yes | | -| trace_callMany | Yes | | -| trace_rawTransaction | - | not yet implemented (come help!) | -| trace_replayBlockTransactions | yes | stateDiff only (come help!) | -| trace_replayTransaction | yes | stateDiff only (come help!) | -| trace_block | Yes | | -| trace_filter | Yes | no pagination, but streaming | -| trace_get | Yes | | -| trace_transaction | Yes | | -| | | | -| txpool_content | Yes | `remote` | -| txpool_status | Yes | `remote` | -| | | | -| eth_getCompilers | No | deprecated | -| eth_compileLLL | No | deprecated | -| eth_compileSolidity | No | deprecated | -| eth_compileSerpent | No | deprecated | -| | | | -| db_putString | No | deprecated | -| db_getString | No | deprecated | -| db_putHex | No | deprecated | -| db_getHex | No | deprecated | -| | | | -| erigon_getHeaderByHash | Yes | Erigon only | -| erigon_getHeaderByNumber | Yes | Erigon only | -| erigon_getLogsByHash | Yes | Erigon only | -| erigon_forks | Yes | Erigon only | -| erigon_issuance | Yes | Erigon only | -| erigon_GetBlockByTimestamp | Yes | Erigon only | -| | | | -| starknet_call | Yes | Starknet only | -| | | | -| bor_getSnapshot | Yes | Bor only | -| bor_getAuthor | Yes | Bor only | -| bor_getSnapshotAtHash | Yes | Bor only | -| bor_getSigners | Yes | Bor only | -| bor_getSignersAtHash | Yes | Bor only | -| bor_getCurrentProposer | Yes | Bor only | -| bor_getCurrentValidators | Yes | Bor only | -| bor_getRootHash | Yes | Bor only | - -This table is constantly updated. Please visit again. - -### Securing the communication between RPC daemon and Erigon instance via TLS and authentication - -In some cases, it is useful to run Erigon nodes in a different network (for example, in a Public cloud), but RPC daemon -locally. To ensure the integrity of communication and access control to the Erigon node, TLS authentication can be -enabled. On the high level, the process consists of these steps (this process needs to be done for any "cluster" of -Erigon and RPC daemon nodes that are supposed to work together): - -1. Generate key pair for the Certificate Authority (CA). The private key of CA will be used to authorise new Erigon - instances as well as new RPC daemon instances, so that they can mutually authenticate. -2. Create CA certificate file that needs to be deployed on any Erigon instance and any RPC daemon. This CA cerf file is - used as a "root of trust", whatever is in it, will be trusted by the participants when they authenticate their - counterparts. -3. For each Erigon instance and each RPC daemon instance, generate a key pair. If you are lazy, you can generate one - pair for all Erigon nodes, and one pair for all RPC daemons, and copy these keys around. -4. Using the CA private key, create cerificate file for each public key generated on the previous step. This - effectively "inducts" these keys into the "cluster of trust". -5. On each instance, deploy 3 files - CA certificate, instance key, and certificate signed by CA for this instance key. - -Following is the detailed description of how it can be done using `openssl` suite of tools. - -Generate CA key pair using Elliptic Curve (as opposed to RSA). The generated CA key will be in the file `CA-key.pem`. -Access to this file will allow anyone to later include any new instance key pair into the "cluster of trust", so keep it -secure. - -``` -openssl ecparam -name prime256v1 -genkey -noout -out CA-key.pem -``` - -Create CA self-signed certificate (this command will ask questions, answers aren't important for now). The file created -by this command is `CA-cert.pem` - -``` -openssl req -x509 -new -nodes -key CA-key.pem -sha256 -days 3650 -out CA-cert.pem -``` - -For Erigon node, generate a key pair: - -``` -openssl ecparam -name prime256v1 -genkey -noout -out erigon-key.pem -``` - -Also, generate one for the RPC daemon: - -``` -openssl ecparam -name prime256v1 -genkey -noout -out RPC-key.pem -``` - -Now create certificate signing request for Erigon key pair: - -``` -openssl req -new -key erigon-key.pem -out erigon.csr -``` - -And from this request, produce the certificate (signed by CA), proving that this key is now part of the "cluster of -trust" - -``` -openssl x509 -req -in erigon.csr -CA CA-cert.pem -CAkey CA-key.pem -CAcreateserial -out erigon.crt -days 3650 -sha256 -``` - -Then, produce the certificate signing request for RPC daemon key pair: - -``` -openssl req -new -key RPC-key.pem -out RPC.csr -``` - -And from this request, produce the certificate (signed by CA), proving that this key is now part of the "cluster of -trust" - -``` -openssl x509 -req -in RPC.csr -CA CA-cert.pem -CAkey CA-key.pem -CAcreateserial -out RPC.crt -days 3650 -sha256 -``` - -When this is all done, these three files need to be placed on the machine where Erigon is running: `CA-cert.pem` -, `erigon-key.pem`, `erigon.crt`. And Erigon needs to be run with these extra options: - -``` ---tls --tls.cacert CA-cert.pem --tls.key erigon-key.pem --tls.cert erigon.crt -``` - -On the RPC daemon machine, these three files need to be placed: `CA-cert.pem`, `RPC-key.pem`, and `RPC.crt`. And RPC -daemon needs to be started with these extra options: - -``` ---tls.key RPC-key.pem --tls.cacert CA-cert.pem --tls.cert RPC.crt -``` - -**WARNING** Normally, the "client side" (which in our case is RPC daemon), verifies that the host name of the server -matches the "Common Name" attribute of the "server" cerificate. At this stage, this verification is turned off, and it -will be turned on again once we have updated the instruction above on how to properly generate cerificates with "Common -Name". - -When running Erigon instance in the Google Cloud, for example, you need to specify the **Internal IP** in -the `--private.api.addr` option. And, you will need to open the firewall on the port you are using, to that connection -to the Erigon instances can be made. - -### Ethstats - -This version of the RPC daemon is compatible with [ethstats-client](https://github.com/goerli/ethstats-client). - -To run ethstats, run the RPC daemon remotely and open some of the APIs. - -`./build/bin/rpcdaemon --private.api.addr=localhost:9090 --http.api=net,eth,web3` - -Then update your `app.json` for ethstats-client like that: - -```json -[ - { - "name": "ethstats", - "script": "app.js", - "log_date_format": "YYYY-MM-DD HH:mm Z", - "merge_logs": false, - "watch": false, - "max_restarts": 10, - "exec_interpreter": "node", - "exec_mode": "fork_mode", - "env": { - "NODE_ENV": "production", - "RPC_HOST": "localhost", - "RPC_PORT": "8545", - "LISTENING_PORT": "30303", - "INSTANCE_NAME": "Erigon node", - "CONTACT_DETAILS": , - "WS_SERVER": "wss://ethstats.net/api", - "WS_SECRET": , - "VERBOSITY": 2 - } - } -] -``` - -Run ethstats-client through pm2 as usual. - -You will see these warnings in the RPC daemon output, but they are expected - -``` -WARN [11-05|09:03:47.911] Served conn=127.0.0.1:59753 method=eth_newBlockFilter reqid=5 t="21.194µs" err="the method eth_newBlockFilter does not exist/is not available" -WARN [11-05|09:03:47.911] Served conn=127.0.0.1:59754 method=eth_newPendingTransactionFilter reqid=6 t="9.053µs" err="the method eth_newPendingTransactionFilter does not exist/is not available" -``` - -### Allowing only specific methods (Allowlist) - -In some cases you might want to only allow certain methods in the namespaces and hide others. That is possible -with `rpc.accessList` flag. - -1. Create a file, say, `rules.json` - -2. Add the following content - -```json -{ - "allow": [ - "net_version", - "web3_eth_getBlockByHash" - ] -} -``` - -3. Provide this file to the rpcdaemon using `--rpc.accessList` flag - -``` -> rpcdaemon --private.api.addr=localhost:9090 --http.api=eth,debug,net,web3 --rpc.accessList=rules.json -``` - -Now only these two methods are available. - -### Clients getting timeout, but server load is low - -In this case: increase default rate-limit - amount of requests server handle simultaneously - requests over this limit -will wait. Increase it - if your 'hot data' is small or have much RAM or see "request timeout" while server load is low. - -``` -./build/bin/erigon --private.api.addr=localhost:9090 --private.api.ratelimit=1024 -``` - -### Server load too high - -Reduce `--private.api.ratelimit` - -### Read DB directly without Json-RPC/Graphql - -[./../../docs/programmers_guide/db_faq.md](./../../docs/programmers_guide/db_faq.md) - -### Faster Batch requests - -Currently batch requests are spawn multiple goroutines and process all sub-requests in parallel. To limit impact of 1 -huge batch to other users - added flag `--rpc.batch.concurrency` (default: 2). Increase it to process large batches -faster. - -Known Issue: if at least 1 request is "stremable" (has parameter of type *jsoniter.Stream) - then whole batch will -processed sequentially (on 1 goroutine). - -## For Developers - -### Code generation - -`go.mod` stores right version of generators, use `make grpc` to install it and generate code (it also installs protoc -into ./build/bin folder). diff --git a/cmd/rpcdaemon22/cli/config.go b/cmd/rpcdaemon22/cli/config.go deleted file mode 100644 index f4444ded653..00000000000 --- a/cmd/rpcdaemon22/cli/config.go +++ /dev/null @@ -1,677 +0,0 @@ -package cli - -import ( - "context" - "crypto/rand" - "encoding/binary" - "errors" - "fmt" - "net" - "net/http" - "os" - "path/filepath" - "runtime" - "strings" - "time" - - "github.com/ledgerwatch/erigon/internal/debug" - "github.com/ledgerwatch/erigon/node/nodecfg/datadir" - "github.com/ledgerwatch/erigon/rpc/rpccfg" - "golang.org/x/sync/semaphore" - - "github.com/ledgerwatch/erigon-lib/direct" - "github.com/ledgerwatch/erigon-lib/gointerfaces" - "github.com/ledgerwatch/erigon-lib/gointerfaces/grpcutil" - "github.com/ledgerwatch/erigon-lib/gointerfaces/remote" - "github.com/ledgerwatch/erigon-lib/gointerfaces/txpool" - "github.com/ledgerwatch/erigon-lib/kv" - "github.com/ledgerwatch/erigon-lib/kv/kvcache" - kv2 "github.com/ledgerwatch/erigon-lib/kv/mdbx" - "github.com/ledgerwatch/erigon-lib/kv/remotedb" - "github.com/ledgerwatch/erigon-lib/kv/remotedbserver" - libstate "github.com/ledgerwatch/erigon-lib/state" - "github.com/ledgerwatch/erigon/cmd/rpcdaemon22/cli/httpcfg" - "github.com/ledgerwatch/erigon/cmd/rpcdaemon22/health" - "github.com/ledgerwatch/erigon/cmd/rpcdaemon22/rpcservices" - "github.com/ledgerwatch/erigon/cmd/utils" - "github.com/ledgerwatch/erigon/common" - "github.com/ledgerwatch/erigon/common/hexutil" - "github.com/ledgerwatch/erigon/common/paths" - "github.com/ledgerwatch/erigon/core/rawdb" - "github.com/ledgerwatch/erigon/node" - "github.com/ledgerwatch/erigon/node/nodecfg" - "github.com/ledgerwatch/erigon/params" - "github.com/ledgerwatch/erigon/rpc" - "github.com/ledgerwatch/erigon/turbo/rpchelper" - "github.com/ledgerwatch/erigon/turbo/services" - "github.com/ledgerwatch/erigon/turbo/snapshotsync" - "github.com/ledgerwatch/erigon/turbo/snapshotsync/snap" - "github.com/ledgerwatch/log/v3" - "github.com/spf13/cobra" - "google.golang.org/grpc" - grpcHealth "google.golang.org/grpc/health" - "google.golang.org/grpc/health/grpc_health_v1" -) - -var rootCmd = &cobra.Command{ - Use: "rpcdaemon", - Short: "rpcdaemon is JSON RPC server that connects to Erigon node for remote DB access", -} - -const JwtDefaultFile = "jwt.hex" - -func RootCommand() (*cobra.Command, *httpcfg.HttpCfg) { - utils.CobraFlags(rootCmd, append(debug.Flags, utils.MetricFlags...)) - - cfg := &httpcfg.HttpCfg{StateCache: kvcache.DefaultCoherentConfig} - rootCmd.PersistentFlags().StringVar(&cfg.PrivateApiAddr, "private.api.addr", "127.0.0.1:9090", "private api network address, for example: 127.0.0.1:9090") - rootCmd.PersistentFlags().StringVar(&cfg.DataDir, "datadir", "", "path to Erigon working directory") - rootCmd.PersistentFlags().StringVar(&cfg.HttpListenAddress, "http.addr", nodecfg.DefaultHTTPHost, "HTTP-RPC server listening interface") - rootCmd.PersistentFlags().StringVar(&cfg.EngineHTTPListenAddress, "engine.addr", nodecfg.DefaultHTTPHost, "HTTP-RPC server listening interface for engineAPI") - rootCmd.PersistentFlags().StringVar(&cfg.TLSCertfile, "tls.cert", "", "certificate for client side TLS handshake") - rootCmd.PersistentFlags().StringVar(&cfg.TLSKeyFile, "tls.key", "", "key file for client side TLS handshake") - rootCmd.PersistentFlags().StringVar(&cfg.TLSCACert, "tls.cacert", "", "CA certificate for client side TLS handshake") - rootCmd.PersistentFlags().IntVar(&cfg.HttpPort, "http.port", nodecfg.DefaultHTTPPort, "HTTP-RPC server listening port") - rootCmd.PersistentFlags().IntVar(&cfg.EnginePort, "engine.port", nodecfg.DefaultEngineHTTPPort, "HTTP-RPC server listening port for the engineAPI") - rootCmd.PersistentFlags().StringSliceVar(&cfg.HttpCORSDomain, "http.corsdomain", []string{}, "Comma separated list of domains from which to accept cross origin requests (browser enforced)") - rootCmd.PersistentFlags().StringSliceVar(&cfg.HttpVirtualHost, "http.vhosts", nodecfg.DefaultConfig.HTTPVirtualHosts, "Comma separated list of virtual hostnames from which to accept requests (server enforced). Accepts '*' wildcard.") - rootCmd.PersistentFlags().BoolVar(&cfg.HttpCompression, "http.compression", true, "Disable http compression") - rootCmd.PersistentFlags().StringSliceVar(&cfg.API, "http.api", []string{"eth", "erigon", "engine"}, "API's offered over the HTTP-RPC interface: eth,engine,erigon,web3,net,debug,trace,txpool,db,starknet. Supported methods: https://github.com/ledgerwatch/erigon/tree/devel/cmd/rpcdaemon22") - rootCmd.PersistentFlags().Uint64Var(&cfg.Gascap, "rpc.gascap", 50000000, "Sets a cap on gas that can be used in eth_call/estimateGas") - rootCmd.PersistentFlags().Uint64Var(&cfg.MaxTraces, "trace.maxtraces", 200, "Sets a limit on traces that can be returned in trace_filter") - rootCmd.PersistentFlags().BoolVar(&cfg.WebsocketEnabled, "ws", false, "Enable Websockets") - rootCmd.PersistentFlags().BoolVar(&cfg.WebsocketCompression, "ws.compression", false, "Enable Websocket compression (RFC 7692)") - rootCmd.PersistentFlags().StringVar(&cfg.RpcAllowListFilePath, "rpc.accessList", "", "Specify granular (method-by-method) API allowlist") - rootCmd.PersistentFlags().UintVar(&cfg.RpcBatchConcurrency, utils.RpcBatchConcurrencyFlag.Name, 2, utils.RpcBatchConcurrencyFlag.Usage) - rootCmd.PersistentFlags().BoolVar(&cfg.RpcStreamingDisable, utils.RpcStreamingDisableFlag.Name, false, utils.RpcStreamingDisableFlag.Usage) - rootCmd.PersistentFlags().IntVar(&cfg.DBReadConcurrency, "db.read.concurrency", runtime.GOMAXPROCS(-1), "Does limit amount of parallel db reads") - rootCmd.PersistentFlags().BoolVar(&cfg.TraceCompatibility, "trace.compat", false, "Bug for bug compatibility with OE for trace_ routines") - rootCmd.PersistentFlags().StringVar(&cfg.TxPoolApiAddr, "txpool.api.addr", "", "txpool api network address, for example: 127.0.0.1:9090 (default: use value of --private.api.addr)") - rootCmd.PersistentFlags().BoolVar(&cfg.TevmEnabled, utils.TevmFlag.Name, false, utils.TevmFlag.Usage) - rootCmd.PersistentFlags().BoolVar(&cfg.Sync.UseSnapshots, "snapshot", true, utils.SnapshotFlag.Usage) - rootCmd.PersistentFlags().IntVar(&cfg.StateCache.KeysLimit, "state.cache", kvcache.DefaultCoherentConfig.KeysLimit, "Amount of keys to store in StateCache (enabled if no --datadir set). Set 0 to disable StateCache. 1_000_000 keys ~ equal to 2Gb RAM (maybe we will add RAM accounting in future versions).") - rootCmd.PersistentFlags().BoolVar(&cfg.GRPCServerEnabled, "grpc", false, "Enable GRPC server") - rootCmd.PersistentFlags().StringVar(&cfg.GRPCListenAddress, "grpc.addr", nodecfg.DefaultGRPCHost, "GRPC server listening interface") - rootCmd.PersistentFlags().IntVar(&cfg.GRPCPort, "grpc.port", nodecfg.DefaultGRPCPort, "GRPC server listening port") - rootCmd.PersistentFlags().BoolVar(&cfg.GRPCHealthCheckEnabled, "grpc.healthcheck", false, "Enable GRPC health check") - rootCmd.PersistentFlags().StringVar(&cfg.StarknetGRPCAddress, "starknet.grpc.address", "127.0.0.1:6066", "Starknet GRPC address") - rootCmd.PersistentFlags().StringVar(&cfg.JWTSecretPath, utils.JWTSecretPath.Name, utils.JWTSecretPath.Value, "Token to ensure safe connection between CL and EL") - rootCmd.PersistentFlags().BoolVar(&cfg.TraceRequests, utils.HTTPTraceFlag.Name, false, "Trace HTTP requests with INFO level") - - if err := rootCmd.MarkPersistentFlagFilename("rpc.accessList", "json"); err != nil { - panic(err) - } - if err := rootCmd.MarkPersistentFlagDirname("datadir"); err != nil { - panic(err) - } - - rootCmd.PersistentPreRunE = func(cmd *cobra.Command, args []string) error { - if err := utils.SetupCobra(cmd); err != nil { - return err - } - cfg.WithDatadir = cfg.DataDir != "" - if cfg.WithDatadir { - if cfg.DataDir == "" { - cfg.DataDir = paths.DefaultDataDir() - } - cfg.Dirs = datadir.New(cfg.DataDir) - } - if cfg.TxPoolApiAddr == "" { - cfg.TxPoolApiAddr = cfg.PrivateApiAddr - } - return nil - } - rootCmd.PersistentPostRunE = func(cmd *cobra.Command, args []string) error { - utils.StopDebug() - return nil - } - - cfg.StateCache.MetricsLabel = "rpc" - - return rootCmd, cfg -} - -type StateChangesClient interface { - StateChanges(ctx context.Context, in *remote.StateChangeRequest, opts ...grpc.CallOption) (remote.KV_StateChangesClient, error) -} - -func subscribeToStateChangesLoop(ctx context.Context, client StateChangesClient, cache kvcache.Cache) { - go func() { - for { - select { - case <-ctx.Done(): - return - default: - } - if err := subscribeToStateChanges(ctx, client, cache); err != nil { - if grpcutil.IsRetryLater(err) || grpcutil.IsEndOfStream(err) { - time.Sleep(3 * time.Second) - continue - } - log.Warn("[txpool.handleStateChanges]", "err", err) - } - } - }() -} - -func subscribeToStateChanges(ctx context.Context, client StateChangesClient, cache kvcache.Cache) error { - streamCtx, cancel := context.WithCancel(ctx) - defer cancel() - stream, err := client.StateChanges(streamCtx, &remote.StateChangeRequest{WithStorage: true, WithTransactions: false}, grpc.WaitForReady(true)) - if err != nil { - return err - } - for req, err := stream.Recv(); ; req, err = stream.Recv() { - if err != nil { - return err - } - if req == nil { - return nil - } - - cache.OnNewBlock(req) - } -} - -func checkDbCompatibility(ctx context.Context, db kv.RoDB) error { - // DB schema version compatibility check - var version []byte - var compatErr error - var compatTx kv.Tx - if compatTx, compatErr = db.BeginRo(ctx); compatErr != nil { - return fmt.Errorf("open Ro Tx for DB schema compability check: %w", compatErr) - } - defer compatTx.Rollback() - if version, compatErr = compatTx.GetOne(kv.DatabaseInfo, kv.DBSchemaVersionKey); compatErr != nil { - return fmt.Errorf("read version for DB schema compability check: %w", compatErr) - } - if len(version) != 12 { - return fmt.Errorf("database does not have major schema version. upgrade and restart Erigon core") - } - major := binary.BigEndian.Uint32(version) - minor := binary.BigEndian.Uint32(version[4:]) - patch := binary.BigEndian.Uint32(version[8:]) - var compatible bool - dbSchemaVersion := &kv.DBSchemaVersion - if major != dbSchemaVersion.Major { - compatible = false - } else if minor != dbSchemaVersion.Minor { - compatible = false - } else { - compatible = true - } - if !compatible { - return fmt.Errorf("incompatible DB Schema versions: reader %d.%d.%d, database %d.%d.%d", - dbSchemaVersion.Major, dbSchemaVersion.Minor, dbSchemaVersion.Patch, - major, minor, patch) - } - log.Info("DB schemas compatible", "reader", fmt.Sprintf("%d.%d.%d", dbSchemaVersion.Major, dbSchemaVersion.Minor, dbSchemaVersion.Patch), - "database", fmt.Sprintf("%d.%d.%d", major, minor, patch)) - return nil -} - -func EmbeddedServices(ctx context.Context, erigonDB kv.RoDB, stateCacheCfg kvcache.CoherentConfig, blockReader services.FullBlockReader, snapshots remotedbserver.Snapsthots, ethBackendServer remote.ETHBACKENDServer, - txPoolServer txpool.TxpoolServer, miningServer txpool.MiningServer, -) ( - eth rpchelper.ApiBackend, txPool txpool.TxpoolClient, mining txpool.MiningClient, starknet *rpcservices.StarknetService, stateCache kvcache.Cache, ff *rpchelper.Filters, err error, -) { - if stateCacheCfg.KeysLimit > 0 { - stateCache = kvcache.New(stateCacheCfg) - } else { - stateCache = kvcache.NewDummy() - } - kvRPC := remotedbserver.NewKvServer(ctx, erigonDB, snapshots) - stateDiffClient := direct.NewStateDiffClientDirect(kvRPC) - subscribeToStateChangesLoop(ctx, stateDiffClient, stateCache) - - directClient := direct.NewEthBackendClientDirect(ethBackendServer) - - eth = rpcservices.NewRemoteBackend(directClient, erigonDB, blockReader) - txPool = direct.NewTxPoolClient(txPoolServer) - mining = direct.NewMiningClient(miningServer) - ff = rpchelper.New(ctx, eth, txPool, mining, func() {}) - return -} - -// RemoteServices - use when RPCDaemon run as independent process. Still it can use --datadir flag to enable -// `cfg.WithDatadir` (mode when it on 1 machine with Erigon) -func RemoteServices(ctx context.Context, cfg httpcfg.HttpCfg, logger log.Logger, rootCancel context.CancelFunc) ( - db kv.RoDB, borDb kv.RoDB, - eth rpchelper.ApiBackend, txPool txpool.TxpoolClient, mining txpool.MiningClient, - starknet *rpcservices.StarknetService, - stateCache kvcache.Cache, blockReader services.FullBlockReader, - ff *rpchelper.Filters, - agg *libstate.Aggregator, - txNums []uint64, - err error) { - if !cfg.WithDatadir && cfg.PrivateApiAddr == "" { - return nil, nil, nil, nil, nil, nil, nil, nil, ff, nil, nil, fmt.Errorf("either remote db or local db must be specified") - } - - // Do not change the order of these checks. Chaindata needs to be checked first, because PrivateApiAddr has default value which is not "" - // If PrivateApiAddr is checked first, the Chaindata option will never work - if cfg.WithDatadir { - var rwKv kv.RwDB - log.Trace("Creating chain db", "path", cfg.Dirs.Chaindata) - limiter := semaphore.NewWeighted(int64(cfg.DBReadConcurrency)) - rwKv, err = kv2.NewMDBX(logger).RoTxsLimiter(limiter).Path(cfg.Dirs.Chaindata).Readonly().Open() - if err != nil { - return nil, nil, nil, nil, nil, nil, nil, nil, ff, nil, nil, err - } - if compatErr := checkDbCompatibility(ctx, rwKv); compatErr != nil { - return nil, nil, nil, nil, nil, nil, nil, nil, ff, nil, nil, compatErr - } - db = rwKv - stateCache = kvcache.NewDummy() - blockReader = snapshotsync.NewBlockReader() - - // bor (consensus) specific db - var borKv kv.RoDB - borDbPath := filepath.Join(cfg.DataDir, "bor") - { - // ensure db exist - tmpDb, err := kv2.NewMDBX(logger).Path(borDbPath).Label(kv.ConsensusDB).Open() - if err != nil { - return nil, nil, nil, nil, nil, nil, nil, nil, ff, nil, nil, err - } - tmpDb.Close() - } - log.Trace("Creating consensus db", "path", borDbPath) - borKv, err = kv2.NewMDBX(logger).Path(borDbPath).Label(kv.ConsensusDB).Readonly().Open() - if err != nil { - return nil, nil, nil, nil, nil, nil, nil, nil, ff, nil, nil, err - } - // Skip the compatibility check, until we have a schema in erigon-lib - borDb = borKv - } else { - if cfg.StateCache.KeysLimit > 0 { - stateCache = kvcache.New(cfg.StateCache) - } else { - stateCache = kvcache.NewDummy() - } - log.Info("if you run RPCDaemon on same machine with Erigon add --datadir option") - } - - if db != nil { - var cc *params.ChainConfig - if err := db.View(context.Background(), func(tx kv.Tx) error { - genesisBlock, err := rawdb.ReadBlockByNumber(tx, 0) - if err != nil { - return err - } - if genesisBlock == nil { - return fmt.Errorf("genesis not found in DB. Likely Erigon was never started on this datadir") - } - cc, err = rawdb.ReadChainConfig(tx, genesisBlock.Hash()) - if err != nil { - return err - } - cfg.Snap.Enabled, err = snap.Enabled(tx) - if err != nil { - return err - } - return nil - }); err != nil { - return nil, nil, nil, nil, nil, nil, nil, nil, ff, nil, nil, err - } - if cc == nil { - return nil, nil, nil, nil, nil, nil, nil, nil, ff, nil, nil, fmt.Errorf("chain config not found in db. Need start erigon at least once on this db") - } - cfg.Snap.Enabled = cfg.Snap.Enabled || cfg.Sync.UseSnapshots - - // if chain config has terminal total difficulty then rpc must have eth and engine APIs enableds - if cc.TerminalTotalDifficulty != nil { - hasEthApiEnabled := false - hasEngineApiEnabled := false - - for _, api := range cfg.API { - switch api { - case "eth": - hasEthApiEnabled = true - case "engine": - hasEngineApiEnabled = true - } - } - - if !hasEthApiEnabled { - cfg.API = append(cfg.API, "eth") - } - - if !hasEngineApiEnabled { - cfg.API = append(cfg.API, "engine") - } - } - } - - creds, err := grpcutil.TLS(cfg.TLSCACert, cfg.TLSCertfile, cfg.TLSKeyFile) - if err != nil { - return nil, nil, nil, nil, nil, nil, nil, nil, ff, nil, nil, fmt.Errorf("open tls cert: %w", err) - } - conn, err := grpcutil.Connect(creds, cfg.PrivateApiAddr) - if err != nil { - return nil, nil, nil, nil, nil, nil, nil, nil, ff, nil, nil, fmt.Errorf("could not connect to execution service privateApi: %w", err) - } - - kvClient := remote.NewKVClient(conn) - remoteKv, err := remotedb.NewRemote(gointerfaces.VersionFromProto(remotedbserver.KvServiceAPIVersion), logger, kvClient).Open() - if err != nil { - return nil, nil, nil, nil, nil, nil, nil, nil, ff, nil, nil, fmt.Errorf("could not connect to remoteKv: %w", err) - } - - subscribeToStateChangesLoop(ctx, kvClient, stateCache) - - onNewSnapshot := func() {} - if cfg.WithDatadir { - if cfg.Snap.Enabled { - allSnapshots := snapshotsync.NewRoSnapshots(cfg.Snap, cfg.Dirs.Snap) - onNewSnapshot = func() { - go func() { // don't block events processing by network communication - reply, err := kvClient.Snapshots(ctx, &remote.SnapshotsRequest{}, grpc.WaitForReady(true)) - if err != nil { - log.Warn("[Snapshots] reopen", "err", err) - return - } - if err := allSnapshots.ReopenList(reply.Files, true); err != nil { - log.Error("[Snapshots] reopen", "err", err) - } else { - allSnapshots.LogStat() - } - }() - } - onNewSnapshot() - blockReader = snapshotsync.NewBlockReaderWithSnapshots(allSnapshots) - - txNums = make([]uint64, allSnapshots.BlocksAvailable()+1) - if err = allSnapshots.Bodies.View(func(bs []*snapshotsync.BodySegment) error { - for _, b := range bs { - if err = b.Iterate(func(blockNum, baseTxNum, txAmount uint64) { - txNums[blockNum] = baseTxNum + txAmount - }); err != nil { - return err - } - } - return nil - }); err != nil { - return nil, nil, nil, nil, nil, nil, nil, nil, ff, nil, nil, fmt.Errorf("build txNum => blockNum mapping: %w", err) - } - } else { - log.Info("Use --snapshots=false") - } - } - - if !cfg.WithDatadir { - blockReader = snapshotsync.NewRemoteBlockReader(remote.NewETHBACKENDClient(conn)) - } - remoteEth := rpcservices.NewRemoteBackend(remote.NewETHBACKENDClient(conn), db, blockReader) - blockReader = remoteEth - - txpoolConn := conn - if cfg.TxPoolApiAddr != cfg.PrivateApiAddr { - txpoolConn, err = grpcutil.Connect(creds, cfg.TxPoolApiAddr) - if err != nil { - return nil, nil, nil, nil, nil, nil, nil, nil, ff, nil, nil, fmt.Errorf("could not connect to txpool api: %w", err) - } - } - - mining = txpool.NewMiningClient(txpoolConn) - miningService := rpcservices.NewMiningService(mining) - txPool = txpool.NewTxpoolClient(txpoolConn) - txPoolService := rpcservices.NewTxPoolService(txPool) - if db == nil { - db = remoteKv - } - eth = remoteEth - go func() { - if !remoteKv.EnsureVersionCompatibility() { - rootCancel() - } - if !remoteEth.EnsureVersionCompatibility() { - rootCancel() - } - if mining != nil && !miningService.EnsureVersionCompatibility() { - rootCancel() - } - if !txPoolService.EnsureVersionCompatibility() { - rootCancel() - } - }() - - if cfg.StarknetGRPCAddress != "" { - starknetConn, err := grpcutil.Connect(creds, cfg.StarknetGRPCAddress) - if err != nil { - return nil, nil, nil, nil, nil, nil, nil, nil, ff, nil, nil, fmt.Errorf("could not connect to starknet api: %w", err) - } - starknet = rpcservices.NewStarknetService(starknetConn) - } - - ff = rpchelper.New(ctx, eth, txPool, mining, onNewSnapshot) - - if cfg.WithDatadir { - if agg, err = libstate.NewAggregator(filepath.Join(cfg.DataDir, "erigon22"), 3_125_000); err != nil { - return nil, nil, nil, nil, nil, nil, nil, nil, ff, nil, nil, fmt.Errorf("create aggregator: %w", err) - } - } - return db, borDb, eth, txPool, mining, starknet, stateCache, blockReader, ff, agg, txNums, err -} - -func StartRpcServer(ctx context.Context, cfg httpcfg.HttpCfg, rpcAPI []rpc.API) error { - var engineListener *http.Server - var engineSrv *rpc.Server - var engineHttpEndpoint string - - // register apis and create handler stack - httpEndpoint := fmt.Sprintf("%s:%d", cfg.HttpListenAddress, cfg.HttpPort) - - srv := rpc.NewServer(cfg.RpcBatchConcurrency, cfg.TraceRequests, cfg.RpcStreamingDisable) - - allowListForRPC, err := parseAllowListForRPC(cfg.RpcAllowListFilePath) - if err != nil { - return err - } - srv.SetAllowList(allowListForRPC) - - var defaultAPIList []rpc.API - var engineAPI []rpc.API - - for _, api := range rpcAPI { - if api.Namespace != "engine" { - defaultAPIList = append(defaultAPIList, api) - } else { - engineAPI = append(engineAPI, api) - } - } - - if len(engineAPI) != 0 { - // eth API should also be exposed on the same port as engine API - for _, api := range rpcAPI { - if api.Namespace == "eth" { - engineAPI = append(engineAPI, api) - } - } - } - - var apiFlags []string - for _, flag := range cfg.API { - if flag != "engine" { - apiFlags = append(apiFlags, flag) - } - } - - if err := node.RegisterApisFromWhitelist(defaultAPIList, apiFlags, srv, false); err != nil { - return fmt.Errorf("could not start register RPC apis: %w", err) - } - - httpHandler := node.NewHTTPHandlerStack(srv, cfg.HttpCORSDomain, cfg.HttpVirtualHost, cfg.HttpCompression) - var wsHandler http.Handler - if cfg.WebsocketEnabled { - wsHandler = srv.WebsocketHandler([]string{"*"}, nil, cfg.WebsocketCompression) - } - - apiHandler, err := createHandler(cfg, defaultAPIList, httpHandler, wsHandler, nil) - if err != nil { - return err - } - - listener, _, err := node.StartHTTPEndpoint(httpEndpoint, rpccfg.DefaultHTTPTimeouts, apiHandler) - if err != nil { - return fmt.Errorf("could not start RPC api: %w", err) - } - info := []interface{}{"url", httpEndpoint, "ws", cfg.WebsocketEnabled, - "ws.compression", cfg.WebsocketCompression, "grpc", cfg.GRPCServerEnabled} - - if len(engineAPI) > 0 { - engineListener, engineSrv, engineHttpEndpoint, err = createEngineListener(cfg, engineAPI) - if err != nil { - return fmt.Errorf("could not start RPC api for engine: %w", err) - } - } - - var ( - healthServer *grpcHealth.Server - grpcServer *grpc.Server - grpcListener net.Listener - grpcEndpoint string - ) - if cfg.GRPCServerEnabled { - grpcEndpoint = fmt.Sprintf("%s:%d", cfg.GRPCListenAddress, cfg.GRPCPort) - if grpcListener, err = net.Listen("tcp", grpcEndpoint); err != nil { - return fmt.Errorf("could not start GRPC listener: %w", err) - } - grpcServer = grpc.NewServer() - if cfg.GRPCHealthCheckEnabled { - healthServer = grpcHealth.NewServer() - grpc_health_v1.RegisterHealthServer(grpcServer, healthServer) - } - go grpcServer.Serve(grpcListener) - info = append(info, "grpc.port", cfg.GRPCPort) - } - - log.Info("HTTP endpoint opened", info...) - - defer func() { - srv.Stop() - if engineSrv != nil { - engineSrv.Stop() - } - shutdownCtx, cancel := context.WithTimeout(context.Background(), 5*time.Second) - defer cancel() - _ = listener.Shutdown(shutdownCtx) - log.Info("HTTP endpoint closed", "url", httpEndpoint) - - if engineListener != nil { - _ = engineListener.Shutdown(shutdownCtx) - log.Info("Engine HTTP endpoint close", "url", engineHttpEndpoint) - } - - if cfg.GRPCServerEnabled { - if cfg.GRPCHealthCheckEnabled { - healthServer.Shutdown() - } - grpcServer.GracefulStop() - _ = grpcListener.Close() - log.Info("GRPC endpoint closed", "url", grpcEndpoint) - } - }() - <-ctx.Done() - log.Info("Exiting...") - return nil -} - -// isWebsocket checks the header of a http request for a websocket upgrade request. -func isWebsocket(r *http.Request) bool { - return strings.ToLower(r.Header.Get("Upgrade")) == "websocket" && - strings.Contains(strings.ToLower(r.Header.Get("Connection")), "upgrade") -} - -// obtainJWTSecret loads the jwt-secret, either from the provided config, -// or from the default location. If neither of those are present, it generates -// a new secret and stores to the default location. -func obtainJWTSecret(cfg httpcfg.HttpCfg) ([]byte, error) { - // try reading from file - log.Info("Reading JWT secret", "path", cfg.JWTSecretPath) - // If we run the rpcdaemon and datadir is not specified we just use jwt.hex in current directory. - if len(cfg.JWTSecretPath) == 0 { - cfg.JWTSecretPath = "jwt.hex" - } - if data, err := os.ReadFile(cfg.JWTSecretPath); err == nil { - jwtSecret := common.FromHex(strings.TrimSpace(string(data))) - if len(jwtSecret) == 32 { - return jwtSecret, nil - } - log.Error("Invalid JWT secret", "path", cfg.JWTSecretPath, "length", len(jwtSecret)) - return nil, errors.New("invalid JWT secret") - } - // Need to generate one - jwtSecret := make([]byte, 32) - rand.Read(jwtSecret) - - if err := os.WriteFile(cfg.JWTSecretPath, []byte(hexutil.Encode(jwtSecret)), 0600); err != nil { - return nil, err - } - log.Info("Generated JWT secret", "path", cfg.JWTSecretPath) - return jwtSecret, nil -} - -func createHandler(cfg httpcfg.HttpCfg, apiList []rpc.API, httpHandler http.Handler, wsHandler http.Handler, jwtSecret []byte) (http.Handler, error) { - var handler http.Handler = http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - // adding a healthcheck here - if health.ProcessHealthcheckIfNeeded(w, r, apiList) { - return - } - if cfg.WebsocketEnabled && wsHandler != nil && isWebsocket(r) { - wsHandler.ServeHTTP(w, r) - return - } - - if jwtSecret != nil && !rpc.CheckJwtSecret(w, r, jwtSecret) { - return - } - - httpHandler.ServeHTTP(w, r) - }) - - return handler, nil -} - -func createEngineListener(cfg httpcfg.HttpCfg, engineApi []rpc.API) (*http.Server, *rpc.Server, string, error) { - engineHttpEndpoint := fmt.Sprintf("%s:%d", cfg.EngineHTTPListenAddress, cfg.EnginePort) - - engineSrv := rpc.NewServer(cfg.RpcBatchConcurrency, cfg.TraceRequests, true) - - allowListForRPC, err := parseAllowListForRPC(cfg.RpcAllowListFilePath) - if err != nil { - return nil, nil, "", err - } - engineSrv.SetAllowList(allowListForRPC) - - if err := node.RegisterApisFromWhitelist(engineApi, nil, engineSrv, true); err != nil { - return nil, nil, "", fmt.Errorf("could not start register RPC engine api: %w", err) - } - - jwtSecret, err := obtainJWTSecret(cfg) - if err != nil { - return nil, nil, "", err - } - - var wsHandler http.Handler - if cfg.WebsocketEnabled { - wsHandler = engineSrv.WebsocketHandler([]string{"*"}, jwtSecret, cfg.WebsocketCompression) - } - - engineHttpHandler := node.NewHTTPHandlerStack(engineSrv, cfg.HttpCORSDomain, cfg.HttpVirtualHost, cfg.HttpCompression) - - engineApiHandler, err := createHandler(cfg, engineApi, engineHttpHandler, wsHandler, jwtSecret) - if err != nil { - return nil, nil, "", err - } - - engineListener, _, err := node.StartHTTPEndpoint(engineHttpEndpoint, rpccfg.DefaultHTTPTimeouts, engineApiHandler) - if err != nil { - return nil, nil, "", fmt.Errorf("could not start RPC api: %w", err) - } - - engineInfo := []interface{}{"url", engineHttpEndpoint, "ws", cfg.WebsocketEnabled} - log.Info("HTTP endpoint opened for Engine API", engineInfo...) - - return engineListener, engineSrv, engineHttpEndpoint, nil -} diff --git a/cmd/rpcdaemon22/cli/httpcfg/http_cfg.go b/cmd/rpcdaemon22/cli/httpcfg/http_cfg.go deleted file mode 100644 index 9cc7c72b68b..00000000000 --- a/cmd/rpcdaemon22/cli/httpcfg/http_cfg.go +++ /dev/null @@ -1,47 +0,0 @@ -package httpcfg - -import ( - "github.com/ledgerwatch/erigon-lib/kv/kvcache" - "github.com/ledgerwatch/erigon/eth/ethconfig" - "github.com/ledgerwatch/erigon/node/nodecfg/datadir" -) - -type HttpCfg struct { - Enabled bool - PrivateApiAddr string - WithDatadir bool // Erigon's database can be read by separated processes on same machine - in read-only mode - with full support of transactions. It will share same "OS PageCache" with Erigon process. - DataDir string - Dirs datadir.Dirs - HttpListenAddress string - EngineHTTPListenAddress string - TLSCertfile string - TLSCACert string - TLSKeyFile string - HttpPort int - EnginePort int - HttpCORSDomain []string - HttpVirtualHost []string - HttpCompression bool - API []string - Gascap uint64 - MaxTraces uint64 - WebsocketEnabled bool - WebsocketCompression bool - RpcAllowListFilePath string - RpcBatchConcurrency uint - RpcStreamingDisable bool - DBReadConcurrency int - TraceCompatibility bool // Bug for bug compatibility for trace_ routines with OpenEthereum - TxPoolApiAddr string - TevmEnabled bool - StateCache kvcache.CoherentConfig - Snap ethconfig.Snapshot - Sync ethconfig.Sync - GRPCServerEnabled bool - GRPCListenAddress string - GRPCPort int - GRPCHealthCheckEnabled bool - StarknetGRPCAddress string - JWTSecretPath string // Engine API Authentication - TraceRequests bool // Always trace requests in INFO level -} diff --git a/cmd/rpcdaemon22/cli/rpc_allow_list.go b/cmd/rpcdaemon22/cli/rpc_allow_list.go deleted file mode 100644 index dbf6fbff88a..00000000000 --- a/cmd/rpcdaemon22/cli/rpc_allow_list.go +++ /dev/null @@ -1,43 +0,0 @@ -package cli - -import ( - "encoding/json" - "io" - "os" - "strings" - - "github.com/ledgerwatch/erigon/rpc" -) - -type allowListFile struct { - Allow rpc.AllowList `json:"allow"` -} - -func parseAllowListForRPC(path string) (rpc.AllowList, error) { - path = strings.TrimSpace(path) - if path == "" { // no file is provided - return nil, nil - } - - file, err := os.Open(path) - if err != nil { - return nil, err - } - defer func() { - file.Close() //nolint: errcheck - }() - - fileContents, err := io.ReadAll(file) - if err != nil { - return nil, err - } - - var allowListFileObj allowListFile - - err = json.Unmarshal(fileContents, &allowListFileObj) - if err != nil { - return nil, err - } - - return allowListFileObj.Allow, nil -} diff --git a/cmd/rpcdaemon22/commands/admin_api.go b/cmd/rpcdaemon22/commands/admin_api.go deleted file mode 100644 index 636e1de30c6..00000000000 --- a/cmd/rpcdaemon22/commands/admin_api.go +++ /dev/null @@ -1,49 +0,0 @@ -package commands - -import ( - "context" - "errors" - "fmt" - - "github.com/ledgerwatch/erigon/p2p" - "github.com/ledgerwatch/erigon/turbo/rpchelper" -) - -// AdminAPI the interface for the admin_* RPC commands. -type AdminAPI interface { - // NodeInfo returns a collection of metadata known about the host. - NodeInfo(ctx context.Context) (*p2p.NodeInfo, error) - - // Peers returns information about the connected remote nodes. - // https://geth.ethereum.org/docs/rpc/ns-admin#admin_peers - Peers(ctx context.Context) ([]*p2p.PeerInfo, error) -} - -// AdminAPIImpl data structure to store things needed for admin_* commands. -type AdminAPIImpl struct { - ethBackend rpchelper.ApiBackend -} - -// NewAdminAPI returns AdminAPIImpl instance. -func NewAdminAPI(eth rpchelper.ApiBackend) *AdminAPIImpl { - return &AdminAPIImpl{ - ethBackend: eth, - } -} - -func (api *AdminAPIImpl) NodeInfo(ctx context.Context) (*p2p.NodeInfo, error) { - nodes, err := api.ethBackend.NodeInfo(ctx, 1) - if err != nil { - return nil, fmt.Errorf("node info request error: %w", err) - } - - if len(nodes) == 0 { - return nil, errors.New("empty nodesInfo response") - } - - return &nodes[0], nil -} - -func (api *AdminAPIImpl) Peers(ctx context.Context) ([]*p2p.PeerInfo, error) { - return api.ethBackend.Peers(ctx) -} diff --git a/cmd/rpcdaemon22/commands/bor_api.go b/cmd/rpcdaemon22/commands/bor_api.go deleted file mode 100644 index 79eb2a48327..00000000000 --- a/cmd/rpcdaemon22/commands/bor_api.go +++ /dev/null @@ -1,37 +0,0 @@ -package commands - -import ( - "github.com/ledgerwatch/erigon-lib/kv" - "github.com/ledgerwatch/erigon/common" - "github.com/ledgerwatch/erigon/consensus/bor" - "github.com/ledgerwatch/erigon/rpc" -) - -// BorAPI Bor specific routines -type BorAPI interface { - // Bor snapshot related (see ./bor_snapshot.go) - GetSnapshot(number *rpc.BlockNumber) (*Snapshot, error) - GetAuthor(number *rpc.BlockNumber) (*common.Address, error) - GetSnapshotAtHash(hash common.Hash) (*Snapshot, error) - GetSigners(number *rpc.BlockNumber) ([]common.Address, error) - GetSignersAtHash(hash common.Hash) ([]common.Address, error) - GetCurrentProposer() (common.Address, error) - GetCurrentValidators() ([]*bor.Validator, error) - GetRootHash(start uint64, end uint64) (string, error) -} - -// BorImpl is implementation of the BorAPI interface -type BorImpl struct { - *BaseAPI - db kv.RoDB // the chain db - borDb kv.RoDB // the consensus db -} - -// NewBorAPI returns BorImpl instance -func NewBorAPI(base *BaseAPI, db kv.RoDB, borDb kv.RoDB) *BorImpl { - return &BorImpl{ - BaseAPI: base, - db: db, - borDb: borDb, - } -} diff --git a/cmd/rpcdaemon22/commands/bor_helper.go b/cmd/rpcdaemon22/commands/bor_helper.go deleted file mode 100644 index 51e60d5f70f..00000000000 --- a/cmd/rpcdaemon22/commands/bor_helper.go +++ /dev/null @@ -1,159 +0,0 @@ -package commands - -import ( - "bytes" - "context" - "errors" - "fmt" - - "github.com/ledgerwatch/erigon-lib/kv" - "github.com/ledgerwatch/erigon/common" - "github.com/ledgerwatch/erigon/consensus/bor" - "github.com/ledgerwatch/erigon/core/types" - "github.com/ledgerwatch/erigon/crypto" - "github.com/ledgerwatch/erigon/params" - "github.com/ledgerwatch/erigon/rpc" -) - -const ( - checkpointInterval = 1024 // Number of blocks after which vote snapshots are saved to db -) - -var ( - extraVanity = 32 // Fixed number of extra-data prefix bytes reserved for signer vanity - extraSeal = 65 // Fixed number of extra-data suffix bytes reserved for signer seal -) - -var ( - // errUnknownBlock is returned when the list of signers is requested for a block - // that is not part of the local blockchain. - errUnknownBlock = errors.New("unknown block") - - // errMissingSignature is returned if a block's extra-data section doesn't seem - // to contain a 65 byte secp256k1 signature. - errMissingSignature = errors.New("extra-data 65 byte signature suffix missing") - - // errOutOfRangeChain is returned if an authorization list is attempted to - // be modified via out-of-range or non-contiguous headers. - errOutOfRangeChain = errors.New("out of range or non-contiguous chain") - - // errMissingVanity is returned if a block's extra-data section is shorter than - // 32 bytes, which is required to store the signer vanity. - errMissingVanity = errors.New("extra-data 32 byte vanity prefix missing") -) - -// getHeaderByNumber returns a block's header given a block number ignoring the block's transaction and uncle list (may be faster). -// derived from erigon_getHeaderByNumber implementation (see ./erigon_block.go) -func getHeaderByNumber(ctx context.Context, number rpc.BlockNumber, api *BorImpl, tx kv.Tx) (*types.Header, error) { - // Pending block is only known by the miner - if number == rpc.PendingBlockNumber { - block := api.pendingBlock() - if block == nil { - return nil, nil - } - return block.Header(), nil - } - - blockNum, err := getBlockNumber(number, tx) - if err != nil { - return nil, err - } - - header, err := api._blockReader.HeaderByNumber(ctx, tx, blockNum) - if err != nil { - return nil, err - } - if header == nil { - return nil, fmt.Errorf("block header not found: %d", blockNum) - } - - return header, nil -} - -// getHeaderByHash returns a block's header given a block's hash. -// derived from erigon_getHeaderByHash implementation (see ./erigon_block.go) -func getHeaderByHash(ctx context.Context, api *BorImpl, tx kv.Tx, hash common.Hash) (*types.Header, error) { - header, err := api._blockReader.HeaderByHash(ctx, tx, hash) - if err != nil { - return nil, err - } - if header == nil { - return nil, fmt.Errorf("block header not found: %s", hash.String()) - } - - return header, nil -} - -// ecrecover extracts the Ethereum account address from a signed header. -func ecrecover(header *types.Header, c *params.BorConfig) (common.Address, error) { - // Retrieve the signature from the header extra-data - if len(header.Extra) < extraSeal { - return common.Address{}, errMissingSignature - } - signature := header.Extra[len(header.Extra)-extraSeal:] - - // Recover the public key and the Ethereum address - pubkey, err := crypto.Ecrecover(bor.SealHash(header, c).Bytes(), signature) - if err != nil { - return common.Address{}, err - } - var signer common.Address - copy(signer[:], crypto.Keccak256(pubkey[1:])[12:]) - - return signer, nil -} - -// validateHeaderExtraField validates that the extra-data contains both the vanity and signature. -// header.Extra = header.Vanity + header.ProducerBytes (optional) + header.Seal -func validateHeaderExtraField(extraBytes []byte) error { - if len(extraBytes) < extraVanity { - return errMissingVanity - } - if len(extraBytes) < extraVanity+extraSeal { - return errMissingSignature - } - return nil -} - -// validatorContains checks for a validator in given validator set -func validatorContains(a []*bor.Validator, x *bor.Validator) (*bor.Validator, bool) { - for _, n := range a { - if bytes.Equal(n.Address.Bytes(), x.Address.Bytes()) { - return n, true - } - } - return nil, false -} - -// getUpdatedValidatorSet applies changes to a validator set and returns a new validator set -func getUpdatedValidatorSet(oldValidatorSet *ValidatorSet, newVals []*bor.Validator) *ValidatorSet { - v := oldValidatorSet - oldVals := v.Validators - - var changes []*bor.Validator - for _, ov := range oldVals { - if f, ok := validatorContains(newVals, ov); ok { - ov.VotingPower = f.VotingPower - } else { - ov.VotingPower = 0 - } - - changes = append(changes, ov) - } - - for _, nv := range newVals { - if _, ok := validatorContains(changes, nv); !ok { - changes = append(changes, nv) - } - } - - v.UpdateWithChangeSet(changes) - return v -} - -// author returns the Ethereum address recovered -// from the signature in the header's extra-data section. -func author(api *BorImpl, tx kv.Tx, header *types.Header) (common.Address, error) { - config, _ := api.BaseAPI.chainConfig(tx) - return ecrecover(header, config.Bor) -} diff --git a/cmd/rpcdaemon22/commands/bor_snapshot.go b/cmd/rpcdaemon22/commands/bor_snapshot.go deleted file mode 100644 index 2915afe9513..00000000000 --- a/cmd/rpcdaemon22/commands/bor_snapshot.go +++ /dev/null @@ -1,424 +0,0 @@ -package commands - -import ( - "context" - "encoding/hex" - "encoding/json" - "fmt" - "math/big" - - "github.com/ledgerwatch/erigon-lib/kv" - "github.com/ledgerwatch/erigon/common" - "github.com/ledgerwatch/erigon/consensus" - "github.com/ledgerwatch/erigon/consensus/bor" - "github.com/ledgerwatch/erigon/core/rawdb" - "github.com/ledgerwatch/erigon/core/types" - "github.com/ledgerwatch/erigon/crypto" - "github.com/ledgerwatch/erigon/params" - "github.com/ledgerwatch/erigon/rpc" - "github.com/ledgerwatch/log/v3" - "github.com/xsleonard/go-merkle" - "golang.org/x/crypto/sha3" -) - -type Snapshot struct { - config *params.BorConfig // Consensus engine parameters to fine tune behavior - - Number uint64 `json:"number"` // Block number where the snapshot was created - Hash common.Hash `json:"hash"` // Block hash where the snapshot was created - ValidatorSet *ValidatorSet `json:"validatorSet"` // Validator set at this moment - Recents map[uint64]common.Address `json:"recents"` // Set of recent signers for spam protections -} - -// GetSnapshot retrieves the state snapshot at a given block. -func (api *BorImpl) GetSnapshot(number *rpc.BlockNumber) (*Snapshot, error) { - // init chain db - ctx := context.Background() - tx, err := api.db.BeginRo(ctx) - if err != nil { - return nil, err - } - defer tx.Rollback() - - // Retrieve the requested block number (or current if none requested) - var header *types.Header - if number == nil || *number == rpc.LatestBlockNumber { - header = rawdb.ReadCurrentHeader(tx) - } else { - header, _ = getHeaderByNumber(ctx, *number, api, tx) - } - // Ensure we have an actually valid block - if header == nil { - return nil, errUnknownBlock - } - - // init consensus db - borTx, err := api.borDb.BeginRo(ctx) - if err != nil { - return nil, err - } - defer borTx.Rollback() - return snapshot(ctx, api, tx, borTx, header) -} - -// GetAuthor retrieves the author a block. -func (api *BorImpl) GetAuthor(number *rpc.BlockNumber) (*common.Address, error) { - ctx := context.Background() - tx, err := api.db.BeginRo(ctx) - if err != nil { - return nil, err - } - defer tx.Rollback() - - // Retrieve the requested block number (or current if none requested) - var header *types.Header - if number == nil || *number == rpc.LatestBlockNumber { - header = rawdb.ReadCurrentHeader(tx) - } else { - header, _ = getHeaderByNumber(ctx, *number, api, tx) - } - // Ensure we have an actually valid block - if header == nil { - return nil, errUnknownBlock - } - author, err := author(api, tx, header) - return &author, err -} - -// GetSnapshotAtHash retrieves the state snapshot at a given block. -func (api *BorImpl) GetSnapshotAtHash(hash common.Hash) (*Snapshot, error) { - // init chain db - ctx := context.Background() - tx, err := api.db.BeginRo(ctx) - if err != nil { - return nil, err - } - defer tx.Rollback() - - // Retreive the header - header, _ := getHeaderByHash(ctx, api, tx, hash) - - // Ensure we have an actually valid block - if header == nil { - return nil, errUnknownBlock - } - - // init consensus db - borTx, err := api.borDb.BeginRo(ctx) - if err != nil { - return nil, err - } - defer borTx.Rollback() - return snapshot(ctx, api, tx, borTx, header) -} - -// GetSigners retrieves the list of authorized signers at the specified block. -func (api *BorImpl) GetSigners(number *rpc.BlockNumber) ([]common.Address, error) { - // init chain db - ctx := context.Background() - tx, err := api.db.BeginRo(ctx) - if err != nil { - return nil, err - } - defer tx.Rollback() - - // Retrieve the requested block number (or current if none requested) - var header *types.Header - if number == nil || *number == rpc.LatestBlockNumber { - header = rawdb.ReadCurrentHeader(tx) - } else { - header, _ = getHeaderByNumber(ctx, *number, api, tx) - } - // Ensure we have an actually valid block - if header == nil { - return nil, errUnknownBlock - } - - // init consensus db - borTx, err := api.borDb.BeginRo(ctx) - if err != nil { - return nil, err - } - defer borTx.Rollback() - snap, err := snapshot(ctx, api, tx, borTx, header) - return snap.signers(), err -} - -// GetSignersAtHash retrieves the list of authorized signers at the specified block. -func (api *BorImpl) GetSignersAtHash(hash common.Hash) ([]common.Address, error) { - // init chain db - ctx := context.Background() - tx, err := api.db.BeginRo(ctx) - if err != nil { - return nil, err - } - defer tx.Rollback() - - // Retreive the header - header, _ := getHeaderByHash(ctx, api, tx, hash) - - // Ensure we have an actually valid block - if header == nil { - return nil, errUnknownBlock - } - - // init consensus db - borTx, err := api.borDb.BeginRo(ctx) - if err != nil { - return nil, err - } - defer borTx.Rollback() - - snap, err := snapshot(ctx, api, tx, borTx, header) - return snap.signers(), err -} - -// GetCurrentProposer gets the current proposer -func (api *BorImpl) GetCurrentProposer() (common.Address, error) { - snap, err := api.GetSnapshot(nil) - if err != nil { - return common.Address{}, err - } - return snap.ValidatorSet.GetProposer().Address, nil -} - -// GetCurrentValidators gets the current validators -func (api *BorImpl) GetCurrentValidators() ([]*bor.Validator, error) { - snap, err := api.GetSnapshot(nil) - if err != nil { - return make([]*bor.Validator, 0), err - } - return snap.ValidatorSet.Validators, nil -} - -// GetRootHash returns the merkle root of the start to end block headers -func (api *BorImpl) GetRootHash(start, end uint64) (string, error) { - length := uint64(end - start + 1) - if length > bor.MaxCheckpointLength { - return "", &bor.MaxCheckpointLengthExceededError{Start: start, End: end} - } - ctx := context.Background() - tx, err := api.db.BeginRo(ctx) - if err != nil { - return "", err - } - defer tx.Rollback() - header := rawdb.ReadCurrentHeader(tx) - var currentHeaderNumber uint64 = 0 - if header == nil { - return "", &bor.InvalidStartEndBlockError{Start: start, End: end, CurrentHeader: currentHeaderNumber} - } - currentHeaderNumber = header.Number.Uint64() - if start > end || end > currentHeaderNumber { - return "", &bor.InvalidStartEndBlockError{Start: start, End: end, CurrentHeader: currentHeaderNumber} - } - blockHeaders := make([]*types.Header, end-start+1) - for number := start; number <= end; number++ { - blockHeaders[number-start], _ = getHeaderByNumber(ctx, rpc.BlockNumber(number), api, tx) - } - - headers := make([][32]byte, bor.NextPowerOfTwo(length)) - for i := 0; i < len(blockHeaders); i++ { - blockHeader := blockHeaders[i] - header := crypto.Keccak256(bor.AppendBytes32( - blockHeader.Number.Bytes(), - new(big.Int).SetUint64(blockHeader.Time).Bytes(), - blockHeader.TxHash.Bytes(), - blockHeader.ReceiptHash.Bytes(), - )) - - var arr [32]byte - copy(arr[:], header) - headers[i] = arr - } - tree := merkle.NewTreeWithOpts(merkle.TreeOptions{EnableHashSorting: false, DisableHashLeaves: true}) - if err := tree.Generate(bor.Convert(headers), sha3.NewLegacyKeccak256()); err != nil { - return "", err - } - root := hex.EncodeToString(tree.Root().Hash) - return root, nil -} - -// Helper functions for Snapshot Type - -// copy creates a deep copy of the snapshot, though not the individual votes. -func (s *Snapshot) copy() *Snapshot { - cpy := &Snapshot{ - config: s.config, - Number: s.Number, - Hash: s.Hash, - ValidatorSet: s.ValidatorSet.Copy(), - Recents: make(map[uint64]common.Address), - } - for block, signer := range s.Recents { - cpy.Recents[block] = signer - } - - return cpy -} - -// GetSignerSuccessionNumber returns the relative position of signer in terms of the in-turn proposer -func (s *Snapshot) GetSignerSuccessionNumber(signer common.Address) (int, error) { - validators := s.ValidatorSet.Validators - proposer := s.ValidatorSet.GetProposer().Address - proposerIndex, _ := s.ValidatorSet.GetByAddress(proposer) - if proposerIndex == -1 { - return -1, &bor.UnauthorizedProposerError{Number: s.Number, Proposer: proposer.Bytes()} - } - signerIndex, _ := s.ValidatorSet.GetByAddress(signer) - if signerIndex == -1 { - return -1, &bor.UnauthorizedSignerError{Number: s.Number, Signer: signer.Bytes()} - } - - tempIndex := signerIndex - if proposerIndex != tempIndex { - if tempIndex < proposerIndex { - tempIndex = tempIndex + len(validators) - } - } - return tempIndex - proposerIndex, nil -} - -// signers retrieves the list of authorized signers in ascending order. -func (s *Snapshot) signers() []common.Address { - sigs := make([]common.Address, 0, len(s.ValidatorSet.Validators)) - for _, sig := range s.ValidatorSet.Validators { - sigs = append(sigs, sig.Address) - } - return sigs -} - -// apply header changes on snapshot -func (s *Snapshot) apply(headers []*types.Header) (*Snapshot, error) { - // Allow passing in no headers for cleaner code - if len(headers) == 0 { - return s, nil - } - // Sanity check that the headers can be applied - for i := 0; i < len(headers)-1; i++ { - if headers[i+1].Number.Uint64() != headers[i].Number.Uint64()+1 { - return nil, errOutOfRangeChain - } - } - if headers[0].Number.Uint64() != s.Number+1 { - return nil, errOutOfRangeChain - } - // Iterate through the headers and create a new snapshot - snap := s.copy() - - for _, header := range headers { - // Remove any votes on checkpoint blocks - number := header.Number.Uint64() - - // Delete the oldest signer from the recent list to allow it signing again - if number >= s.config.Sprint { - delete(snap.Recents, number-s.config.Sprint) - } - - // Resolve the authorization key and check against signers - signer, err := ecrecover(header, s.config) - if err != nil { - return nil, err - } - - // check if signer is in validator set - if !snap.ValidatorSet.HasAddress(signer.Bytes()) { - return nil, &bor.UnauthorizedSignerError{Number: number, Signer: signer.Bytes()} - } - - if _, err = snap.GetSignerSuccessionNumber(signer); err != nil { - return nil, err - } - - // add recents - snap.Recents[number] = signer - - // change validator set and change proposer - if number > 0 && (number+1)%s.config.Sprint == 0 { - if err := validateHeaderExtraField(header.Extra); err != nil { - return nil, err - } - validatorBytes := header.Extra[extraVanity : len(header.Extra)-extraSeal] - - // get validators from headers and use that for new validator set - newVals, _ := bor.ParseValidators(validatorBytes) - v := getUpdatedValidatorSet(snap.ValidatorSet.Copy(), newVals) - v.IncrementProposerPriority(1) - snap.ValidatorSet = v - } - } - snap.Number += uint64(len(headers)) - snap.Hash = headers[len(headers)-1].Hash() - - return snap, nil -} - -// snapshot retrieves the authorization snapshot at a given point in time. -func snapshot(ctx context.Context, api *BorImpl, db kv.Tx, borDb kv.Tx, header *types.Header) (*Snapshot, error) { - // Search for a snapshot on disk or build it from checkpoint - var ( - headers []*types.Header - snap *Snapshot - ) - - number := header.Number.Uint64() - hash := header.Hash() - - for snap == nil { - // If an on-disk checkpoint snapshot can be found, use that - if number%checkpointInterval == 0 { - if s, err := loadSnapshot(api, db, borDb, hash); err == nil { - log.Info("Loaded snapshot from disk", "number", number, "hash", hash) - snap = s - } - break - } - - // No snapshot for this header, move backward and check parent snapshots - if header == nil { - header, _ = getHeaderByNumber(ctx, rpc.BlockNumber(number), api, db) - if header == nil { - return nil, consensus.ErrUnknownAncestor - } - } - headers = append(headers, header) - number, hash = number-1, header.ParentHash - header = nil - } - - if snap == nil { - return nil, fmt.Errorf("unknown error while retrieving snapshot at block number %v", number) - } - - // Previous snapshot found, apply any pending headers on top of it - for i := 0; i < len(headers)/2; i++ { - headers[i], headers[len(headers)-1-i] = headers[len(headers)-1-i], headers[i] - } - - snap, err := snap.apply(headers) - if err != nil { - return nil, err - } - return snap, nil -} - -// loadSnapshot loads an existing snapshot from the database. -func loadSnapshot(api *BorImpl, db kv.Tx, borDb kv.Tx, hash common.Hash) (*Snapshot, error) { - blob, err := borDb.GetOne(kv.BorSeparate, append([]byte("bor-"), hash[:]...)) - if err != nil { - return nil, err - } - snap := new(Snapshot) - if err := json.Unmarshal(blob, snap); err != nil { - return nil, err - } - config, _ := api.BaseAPI.chainConfig(db) - snap.config = config.Bor - - // update total voting power - if err := snap.ValidatorSet.updateTotalVotingPower(); err != nil { - return nil, err - } - - return snap, nil -} diff --git a/cmd/rpcdaemon22/commands/call_traces_test.go b/cmd/rpcdaemon22/commands/call_traces_test.go deleted file mode 100644 index cd2ea4fdeb5..00000000000 --- a/cmd/rpcdaemon22/commands/call_traces_test.go +++ /dev/null @@ -1,269 +0,0 @@ -package commands - -import ( - "bytes" - "context" - "sync" - "testing" - - "github.com/holiman/uint256" - jsoniter "github.com/json-iterator/go" - "github.com/ledgerwatch/erigon-lib/kv/kvcache" - "github.com/ledgerwatch/erigon/cmd/rpcdaemon22/cli/httpcfg" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - "github.com/valyala/fastjson" - - "github.com/ledgerwatch/erigon/common" - "github.com/ledgerwatch/erigon/common/hexutil" - "github.com/ledgerwatch/erigon/core" - "github.com/ledgerwatch/erigon/core/types" - "github.com/ledgerwatch/erigon/turbo/snapshotsync" - "github.com/ledgerwatch/erigon/turbo/stages" -) - -func blockNumbersFromTraces(t *testing.T, b []byte) []int { - var err error - var p fastjson.Parser - response := b - var v *fastjson.Value - if v, err = p.ParseBytes(response); err != nil { - t.Fatalf("parsing response: %v", err) - } - var elems []*fastjson.Value - if elems, err = v.Array(); err != nil { - t.Fatalf("expected array in the response: %v", err) - } - var numbers []int - for _, elem := range elems { - bn := elem.GetInt("blockNumber") - numbers = append(numbers, bn) - } - return numbers -} - -func TestCallTraceOneByOne(t *testing.T) { - t.Skip() - m := stages.Mock(t) - defer m.DB.Close() - chain, err := core.GenerateChain(m.ChainConfig, m.Genesis, m.Engine, m.DB, 10, func(i int, gen *core.BlockGen) { - gen.SetCoinbase(common.Address{1}) - }, false /* intermediateHashes */) - if err != nil { - t.Fatalf("generate chain: %v", err) - } - api := NewTraceAPI( - NewBaseApi(nil, kvcache.New(kvcache.DefaultCoherentConfig), snapshotsync.NewBlockReader(), nil, nil, false), - m.DB, &httpcfg.HttpCfg{}) - // Insert blocks 1 by 1, to tirgget possible "off by one" errors - for i := 0; i < chain.Length(); i++ { - if err = m.InsertChain(chain.Slice(i, i+1)); err != nil { - t.Fatalf("inserting chain: %v", err) - } - } - stream := jsoniter.ConfigDefault.BorrowStream(nil) - defer jsoniter.ConfigDefault.ReturnStream(stream) - var fromBlock, toBlock uint64 - fromBlock = 1 - toBlock = 10 - toAddress1 := common.Address{1} - traceReq1 := TraceFilterRequest{ - FromBlock: (*hexutil.Uint64)(&fromBlock), - ToBlock: (*hexutil.Uint64)(&toBlock), - ToAddress: []*common.Address{&toAddress1}, - } - if err = api.Filter(context.Background(), traceReq1, stream); err != nil { - t.Fatalf("trace_filter failed: %v", err) - } - assert.Equal(t, []int{1, 2, 3, 4, 5, 6, 7, 8, 9, 10}, blockNumbersFromTraces(t, stream.Buffer())) -} - -func TestCallTraceUnwind(t *testing.T) { - t.Skip() - m := stages.Mock(t) - defer m.DB.Close() - var chainA, chainB *core.ChainPack - var err error - chainA, err = core.GenerateChain(m.ChainConfig, m.Genesis, m.Engine, m.DB, 10, func(i int, gen *core.BlockGen) { - gen.SetCoinbase(common.Address{1}) - }, false /* intermediateHashes */) - if err != nil { - t.Fatalf("generate chainA: %v", err) - } - chainB, err = core.GenerateChain(m.ChainConfig, m.Genesis, m.Engine, m.DB, 20, func(i int, gen *core.BlockGen) { - if i < 5 || i >= 10 { - gen.SetCoinbase(common.Address{1}) - } else { - gen.SetCoinbase(common.Address{2}) - } - }, false /* intermediateHashes */) - if err != nil { - t.Fatalf("generate chainB: %v", err) - } - api := NewTraceAPI(NewBaseApi(nil, kvcache.New(kvcache.DefaultCoherentConfig), snapshotsync.NewBlockReader(), nil, nil, false), m.DB, &httpcfg.HttpCfg{}) - if err = m.InsertChain(chainA); err != nil { - t.Fatalf("inserting chainA: %v", err) - } - var buf bytes.Buffer - stream := jsoniter.NewStream(jsoniter.ConfigDefault, &buf, 4096) - var fromBlock, toBlock uint64 - fromBlock = 1 - toBlock = 10 - toAddress1 := common.Address{1} - traceReq1 := TraceFilterRequest{ - FromBlock: (*hexutil.Uint64)(&fromBlock), - ToBlock: (*hexutil.Uint64)(&toBlock), - ToAddress: []*common.Address{&toAddress1}, - } - if err = api.Filter(context.Background(), traceReq1, stream); err != nil { - t.Fatalf("trace_filter failed: %v", err) - } - - assert.Equal(t, []int{1, 2, 3, 4, 5, 6, 7, 8, 9, 10}, blockNumbersFromTraces(t, buf.Bytes())) - if err = m.InsertChain(chainB.Slice(0, 12)); err != nil { - t.Fatalf("inserting chainB: %v", err) - } - buf.Reset() - toBlock = 12 - traceReq2 := TraceFilterRequest{ - FromBlock: (*hexutil.Uint64)(&fromBlock), - ToBlock: (*hexutil.Uint64)(&toBlock), - ToAddress: []*common.Address{&toAddress1}, - } - if err = api.Filter(context.Background(), traceReq2, stream); err != nil { - t.Fatalf("trace_filter failed: %v", err) - } - assert.Equal(t, []int{1, 2, 3, 4, 5, 11, 12}, blockNumbersFromTraces(t, buf.Bytes())) - if err = m.InsertChain(chainB.Slice(12, 20)); err != nil { - t.Fatalf("inserting chainB: %v", err) - } - buf.Reset() - fromBlock = 12 - toBlock = 20 - traceReq3 := TraceFilterRequest{ - FromBlock: (*hexutil.Uint64)(&fromBlock), - ToBlock: (*hexutil.Uint64)(&toBlock), - ToAddress: []*common.Address{&toAddress1}, - } - if err = api.Filter(context.Background(), traceReq3, stream); err != nil { - t.Fatalf("trace_filter failed: %v", err) - } - assert.Equal(t, []int{12, 13, 14, 15, 16, 17, 18, 19, 20}, blockNumbersFromTraces(t, buf.Bytes())) -} - -func TestFilterNoAddresses(t *testing.T) { - t.Skip() - m := stages.Mock(t) - defer m.DB.Close() - chain, err := core.GenerateChain(m.ChainConfig, m.Genesis, m.Engine, m.DB, 10, func(i int, gen *core.BlockGen) { - gen.SetCoinbase(common.Address{1}) - }, false /* intermediateHashes */) - if err != nil { - t.Fatalf("generate chain: %v", err) - } - api := NewTraceAPI(NewBaseApi(nil, kvcache.New(kvcache.DefaultCoherentConfig), snapshotsync.NewBlockReader(), nil, nil, false), m.DB, &httpcfg.HttpCfg{}) - // Insert blocks 1 by 1, to tirgget possible "off by one" errors - for i := 0; i < chain.Length(); i++ { - if err = m.InsertChain(chain.Slice(i, i+1)); err != nil { - t.Fatalf("inserting chain: %v", err) - } - } - var buf bytes.Buffer - stream := jsoniter.NewStream(jsoniter.ConfigDefault, &buf, 4096) - var fromBlock, toBlock uint64 - fromBlock = 1 - toBlock = 10 - traceReq1 := TraceFilterRequest{ - FromBlock: (*hexutil.Uint64)(&fromBlock), - ToBlock: (*hexutil.Uint64)(&toBlock), - } - if err = api.Filter(context.Background(), traceReq1, stream); err != nil { - t.Fatalf("trace_filter failed: %v", err) - } - assert.Equal(t, []int{1, 2, 3, 4, 5, 6, 7, 8, 9, 10}, blockNumbersFromTraces(t, buf.Bytes())) -} - -func TestFilterAddressIntersection(t *testing.T) { - t.Skip() - m := stages.Mock(t) - defer m.DB.Close() - - api := NewTraceAPI(NewBaseApi(nil, kvcache.New(kvcache.DefaultCoherentConfig), snapshotsync.NewBlockReader(), nil, nil, false), m.DB, &httpcfg.HttpCfg{}) - - toAddress1, toAddress2, other := common.Address{1}, common.Address{2}, common.Address{3} - - once := new(sync.Once) - chain, err := core.GenerateChain(m.ChainConfig, m.Genesis, m.Engine, m.DB, 15, func(i int, block *core.BlockGen) { - once.Do(func() { block.SetCoinbase(common.Address{4}) }) - - var rcv common.Address - if i < 5 { - rcv = toAddress1 - } else if i < 10 { - rcv = toAddress2 - } else { - rcv = other - } - - signer := types.LatestSigner(m.ChainConfig) - txn, err := types.SignTx(types.NewTransaction(block.TxNonce(m.Address), rcv, new(uint256.Int), 21000, new(uint256.Int), nil), *signer, m.Key) - if err != nil { - t.Fatal(err) - } - block.AddTx(txn) - }, false /* intermediateHashes */) - require.NoError(t, err, "generate chain") - - err = m.InsertChain(chain) - require.NoError(t, err, "inserting chain") - - fromBlock, toBlock := uint64(1), uint64(15) - t.Run("second", func(t *testing.T) { - stream := jsoniter.ConfigDefault.BorrowStream(nil) - defer jsoniter.ConfigDefault.ReturnStream(stream) - - traceReq1 := TraceFilterRequest{ - FromBlock: (*hexutil.Uint64)(&fromBlock), - ToBlock: (*hexutil.Uint64)(&toBlock), - FromAddress: []*common.Address{&m.Address, &other}, - ToAddress: []*common.Address{&m.Address, &toAddress2}, - Mode: TraceFilterModeIntersection, - } - if err = api.Filter(context.Background(), traceReq1, stream); err != nil { - t.Fatalf("trace_filter failed: %v", err) - } - assert.Equal(t, []int{6, 7, 8, 9, 10}, blockNumbersFromTraces(t, stream.Buffer())) - }) - t.Run("first", func(t *testing.T) { - stream := jsoniter.ConfigDefault.BorrowStream(nil) - defer jsoniter.ConfigDefault.ReturnStream(stream) - - traceReq1 := TraceFilterRequest{ - FromBlock: (*hexutil.Uint64)(&fromBlock), - ToBlock: (*hexutil.Uint64)(&toBlock), - FromAddress: []*common.Address{&m.Address, &other}, - ToAddress: []*common.Address{&toAddress1, &m.Address}, - Mode: TraceFilterModeIntersection, - } - if err = api.Filter(context.Background(), traceReq1, stream); err != nil { - t.Fatalf("trace_filter failed: %v", err) - } - assert.Equal(t, []int{1, 2, 3, 4, 5}, blockNumbersFromTraces(t, stream.Buffer())) - }) - t.Run("empty", func(t *testing.T) { - stream := jsoniter.ConfigDefault.BorrowStream(nil) - defer jsoniter.ConfigDefault.ReturnStream(stream) - - traceReq1 := TraceFilterRequest{ - FromBlock: (*hexutil.Uint64)(&fromBlock), - ToBlock: (*hexutil.Uint64)(&toBlock), - ToAddress: []*common.Address{&other}, - FromAddress: []*common.Address{&toAddress2, &toAddress1, &other}, - Mode: TraceFilterModeIntersection, - } - if err = api.Filter(context.Background(), traceReq1, stream); err != nil { - t.Fatalf("trace_filter failed: %v", err) - } - require.Empty(t, blockNumbersFromTraces(t, stream.Buffer())) - }) -} diff --git a/cmd/rpcdaemon22/commands/contracts/build/Poly.abi b/cmd/rpcdaemon22/commands/contracts/build/Poly.abi deleted file mode 100644 index 95e590dcd3c..00000000000 --- a/cmd/rpcdaemon22/commands/contracts/build/Poly.abi +++ /dev/null @@ -1 +0,0 @@ -[{"inputs":[],"stateMutability":"nonpayable","type":"constructor"},{"anonymous":false,"inputs":[{"indexed":false,"internalType":"address","name":"d","type":"address"}],"name":"DeployEvent","type":"event"},{"inputs":[{"internalType":"uint256","name":"salt","type":"uint256"}],"name":"deploy","outputs":[],"stateMutability":"nonpayable","type":"function"},{"inputs":[{"internalType":"uint256","name":"salt","type":"uint256"}],"name":"deployAndDestruct","outputs":[],"stateMutability":"nonpayable","type":"function"}] \ No newline at end of file diff --git a/cmd/rpcdaemon22/commands/contracts/build/Poly.bin b/cmd/rpcdaemon22/commands/contracts/build/Poly.bin deleted file mode 100644 index 4223e1ab18a..00000000000 --- a/cmd/rpcdaemon22/commands/contracts/build/Poly.bin +++ /dev/null @@ -1 +0,0 @@ -608060405234801561001057600080fd5b506101d1806100206000396000f3fe608060405234801561001057600080fd5b50600436106100365760003560e01c80639debe9811461003b578063a5e387511461005a575b600080fd5b6100586004803603602081101561005157600080fd5b5035610077565b005b6100586004803603602081101561007057600080fd5b50356100fd565b6040805180820190915260138082527260606000534360015360ff60025360036000f360681b60208301908152600091849183f59050600080600080600085620186a0f150604080516001600160a01b038316815290517f68f6a0f063c25c6678c443b9a484086f15ba8f91f60218695d32a5251f2050eb9181900360200190a1505050565b6040805180820190915260138082527260606000534360015360ff60025360036000f360681b60208301908152600091849183f5604080516001600160a01b038316815290519192507f68f6a0f063c25c6678c443b9a484086f15ba8f91f60218695d32a5251f2050eb919081900360200190a150505056fea2646970667358221220c4436dde70fbebb14cf02477e4d8f270620c7f9f54b9b1a2e09b1edcc8c6db6764736f6c637827302e372e352d646576656c6f702e323032302e31322e392b636f6d6d69742e65623737656430380058 \ No newline at end of file diff --git a/cmd/rpcdaemon22/commands/contracts/build/Token.abi b/cmd/rpcdaemon22/commands/contracts/build/Token.abi deleted file mode 100644 index 20efed58391..00000000000 --- a/cmd/rpcdaemon22/commands/contracts/build/Token.abi +++ /dev/null @@ -1 +0,0 @@ -[{"inputs":[{"internalType":"address","name":"_minter","type":"address"}],"stateMutability":"nonpayable","type":"constructor"},{"inputs":[{"internalType":"address","name":"","type":"address"}],"name":"balanceOf","outputs":[{"internalType":"uint256","name":"","type":"uint256"}],"stateMutability":"view","type":"function"},{"inputs":[{"internalType":"address","name":"_to","type":"address"},{"internalType":"uint256","name":"_value","type":"uint256"}],"name":"mint","outputs":[{"internalType":"bool","name":"","type":"bool"}],"stateMutability":"nonpayable","type":"function"},{"inputs":[],"name":"minter","outputs":[{"internalType":"address","name":"","type":"address"}],"stateMutability":"view","type":"function"},{"inputs":[],"name":"totalSupply","outputs":[{"internalType":"uint256","name":"","type":"uint256"}],"stateMutability":"view","type":"function"},{"inputs":[{"internalType":"address","name":"_to","type":"address"},{"internalType":"uint256","name":"_value","type":"uint256"}],"name":"transfer","outputs":[{"internalType":"bool","name":"","type":"bool"}],"stateMutability":"nonpayable","type":"function"}] \ No newline at end of file diff --git a/cmd/rpcdaemon22/commands/contracts/build/Token.bin b/cmd/rpcdaemon22/commands/contracts/build/Token.bin deleted file mode 100644 index 803e31eebc2..00000000000 --- a/cmd/rpcdaemon22/commands/contracts/build/Token.bin +++ /dev/null @@ -1 +0,0 @@ -608060405234801561001057600080fd5b506040516102cd3803806102cd8339818101604052602081101561003357600080fd5b5051600280546001600160a01b0319166001600160a01b0390921691909117905561026a806100636000396000f3fe608060405234801561001057600080fd5b50600436106100575760003560e01c8063075461721461005c57806318160ddd1461008057806340c10f191461009a57806370a08231146100da578063a9059cbb14610100575b600080fd5b61006461012c565b604080516001600160a01b039092168252519081900360200190f35b61008861013b565b60408051918252519081900360200190f35b6100c6600480360360408110156100b057600080fd5b506001600160a01b038135169060200135610141565b604080519115158252519081900360200190f35b610088600480360360208110156100f057600080fd5b50356001600160a01b03166101b1565b6100c66004803603604081101561011657600080fd5b506001600160a01b0381351690602001356101c3565b6002546001600160a01b031681565b60005481565b6002546000906001600160a01b0316331461015b57600080fd5b6001600160a01b03831660009081526001602052604090205482810181111561018357600080fd5b6001600160a01b03841660009081526001602081905260408220928501909255805484019055905092915050565b60016020526000908152604090205481565b33600090815260016020526040808220546001600160a01b038516835290822054838210156101f157600080fd5b80848201101561020057600080fd5b336000908152600160208190526040808320948790039094556001600160a01b03969096168152919091209201909155509056fea2646970667358221220db4c7b3ba8d073604af68ade92006926639bb4003f2a18929524d580777155fb64736f6c63430007020033 \ No newline at end of file diff --git a/cmd/rpcdaemon22/commands/contracts/gen.go b/cmd/rpcdaemon22/commands/contracts/gen.go deleted file mode 100644 index 96e2eff812c..00000000000 --- a/cmd/rpcdaemon22/commands/contracts/gen.go +++ /dev/null @@ -1,4 +0,0 @@ -package contracts - -//go:generate solc --allow-paths ., --abi --bin --overwrite --optimize -o build token.sol -//go:generate abigen -abi build/Token.abi -bin build/Token.bin -pkg contracts -type token -out ./gen_token.go diff --git a/cmd/rpcdaemon22/commands/contracts/gen_poly.go b/cmd/rpcdaemon22/commands/contracts/gen_poly.go deleted file mode 100644 index e4bfb1f2997..00000000000 --- a/cmd/rpcdaemon22/commands/contracts/gen_poly.go +++ /dev/null @@ -1,364 +0,0 @@ -// Code generated - DO NOT EDIT. -// This file is a generated binding and any manual changes will be lost. - -package contracts - -import ( - "math/big" - "strings" - - ethereum "github.com/ledgerwatch/erigon" - "github.com/ledgerwatch/erigon/accounts/abi" - "github.com/ledgerwatch/erigon/accounts/abi/bind" - "github.com/ledgerwatch/erigon/common" - "github.com/ledgerwatch/erigon/core/types" - "github.com/ledgerwatch/erigon/event" -) - -// Reference imports to suppress errors if they are not otherwise used. -var ( - _ = big.NewInt - _ = strings.NewReader - _ = ethereum.NotFound - _ = bind.Bind - _ = common.Big1 - _ = types.BloomLookup - _ = event.NewSubscription -) - -// PolyABI is the input ABI used to generate the binding from. -const PolyABI = "[{\"inputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"constructor\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":false,\"internalType\":\"address\",\"name\":\"d\",\"type\":\"address\"}],\"name\":\"DeployEvent\",\"type\":\"event\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"salt\",\"type\":\"uint256\"}],\"name\":\"deploy\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"salt\",\"type\":\"uint256\"}],\"name\":\"deployAndDestruct\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"}]" - -// PolyBin is the compiled bytecode used for deploying new contracts. -var PolyBin = "0x608060405234801561001057600080fd5b506101d1806100206000396000f3fe608060405234801561001057600080fd5b50600436106100365760003560e01c80639debe9811461003b578063a5e387511461005a575b600080fd5b6100586004803603602081101561005157600080fd5b5035610077565b005b6100586004803603602081101561007057600080fd5b50356100fd565b6040805180820190915260138082527260606000534360015360ff60025360036000f360681b60208301908152600091849183f59050600080600080600085620186a0f150604080516001600160a01b038316815290517f68f6a0f063c25c6678c443b9a484086f15ba8f91f60218695d32a5251f2050eb9181900360200190a1505050565b6040805180820190915260138082527260606000534360015360ff60025360036000f360681b60208301908152600091849183f5604080516001600160a01b038316815290519192507f68f6a0f063c25c6678c443b9a484086f15ba8f91f60218695d32a5251f2050eb919081900360200190a150505056fea2646970667358221220c4436dde70fbebb14cf02477e4d8f270620c7f9f54b9b1a2e09b1edcc8c6db6764736f6c637827302e372e352d646576656c6f702e323032302e31322e392b636f6d6d69742e65623737656430380058" - -// DeployPoly deploys a new Ethereum contract, binding an instance of Poly to it. -func DeployPoly(auth *bind.TransactOpts, backend bind.ContractBackend) (common.Address, types.Transaction, *Poly, error) { - parsed, err := abi.JSON(strings.NewReader(PolyABI)) - if err != nil { - return common.Address{}, nil, nil, err - } - - address, tx, contract, err := bind.DeployContract(auth, parsed, common.FromHex(PolyBin), backend) - if err != nil { - return common.Address{}, nil, nil, err - } - return address, tx, &Poly{PolyCaller: PolyCaller{contract: contract}, PolyTransactor: PolyTransactor{contract: contract}, PolyFilterer: PolyFilterer{contract: contract}}, nil -} - -// Poly is an auto generated Go binding around an Ethereum contract. -type Poly struct { - PolyCaller // Read-only binding to the contract - PolyTransactor // Write-only binding to the contract - PolyFilterer // Log filterer for contract events -} - -// PolyCaller is an auto generated read-only Go binding around an Ethereum contract. -type PolyCaller struct { - contract *bind.BoundContract // Generic contract wrapper for the low level calls -} - -// PolyTransactor is an auto generated write-only Go binding around an Ethereum contract. -type PolyTransactor struct { - contract *bind.BoundContract // Generic contract wrapper for the low level calls -} - -// PolyFilterer is an auto generated log filtering Go binding around an Ethereum contract events. -type PolyFilterer struct { - contract *bind.BoundContract // Generic contract wrapper for the low level calls -} - -// PolySession is an auto generated Go binding around an Ethereum contract, -// with pre-set call and transact options. -type PolySession struct { - Contract *Poly // Generic contract binding to set the session for - CallOpts bind.CallOpts // Call options to use throughout this session - TransactOpts bind.TransactOpts // Transaction auth options to use throughout this session -} - -// PolyCallerSession is an auto generated read-only Go binding around an Ethereum contract, -// with pre-set call options. -type PolyCallerSession struct { - Contract *PolyCaller // Generic contract caller binding to set the session for - CallOpts bind.CallOpts // Call options to use throughout this session -} - -// PolyTransactorSession is an auto generated write-only Go binding around an Ethereum contract, -// with pre-set transact options. -type PolyTransactorSession struct { - Contract *PolyTransactor // Generic contract transactor binding to set the session for - TransactOpts bind.TransactOpts // Transaction auth options to use throughout this session -} - -// PolyRaw is an auto generated low-level Go binding around an Ethereum contract. -type PolyRaw struct { - Contract *Poly // Generic contract binding to access the raw methods on -} - -// PolyCallerRaw is an auto generated low-level read-only Go binding around an Ethereum contract. -type PolyCallerRaw struct { - Contract *PolyCaller // Generic read-only contract binding to access the raw methods on -} - -// PolyTransactorRaw is an auto generated low-level write-only Go binding around an Ethereum contract. -type PolyTransactorRaw struct { - Contract *PolyTransactor // Generic write-only contract binding to access the raw methods on -} - -// NewPoly creates a new instance of Poly, bound to a specific deployed contract. -func NewPoly(address common.Address, backend bind.ContractBackend) (*Poly, error) { - contract, err := bindPoly(address, backend, backend, backend) - if err != nil { - return nil, err - } - return &Poly{PolyCaller: PolyCaller{contract: contract}, PolyTransactor: PolyTransactor{contract: contract}, PolyFilterer: PolyFilterer{contract: contract}}, nil -} - -// NewPolyCaller creates a new read-only instance of Poly, bound to a specific deployed contract. -func NewPolyCaller(address common.Address, caller bind.ContractCaller) (*PolyCaller, error) { - contract, err := bindPoly(address, caller, nil, nil) - if err != nil { - return nil, err - } - return &PolyCaller{contract: contract}, nil -} - -// NewPolyTransactor creates a new write-only instance of Poly, bound to a specific deployed contract. -func NewPolyTransactor(address common.Address, transactor bind.ContractTransactor) (*PolyTransactor, error) { - contract, err := bindPoly(address, nil, transactor, nil) - if err != nil { - return nil, err - } - return &PolyTransactor{contract: contract}, nil -} - -// NewPolyFilterer creates a new log filterer instance of Poly, bound to a specific deployed contract. -func NewPolyFilterer(address common.Address, filterer bind.ContractFilterer) (*PolyFilterer, error) { - contract, err := bindPoly(address, nil, nil, filterer) - if err != nil { - return nil, err - } - return &PolyFilterer{contract: contract}, nil -} - -// bindPoly binds a generic wrapper to an already deployed contract. -func bindPoly(address common.Address, caller bind.ContractCaller, transactor bind.ContractTransactor, filterer bind.ContractFilterer) (*bind.BoundContract, error) { - parsed, err := abi.JSON(strings.NewReader(PolyABI)) - if err != nil { - return nil, err - } - return bind.NewBoundContract(address, parsed, caller, transactor, filterer), nil -} - -// Call invokes the (constant) contract method with params as input values and -// sets the output to result. The result type might be a single field for simple -// returns, a slice of interfaces for anonymous returns and a struct for named -// returns. -func (_Poly *PolyRaw) Call(opts *bind.CallOpts, result *[]interface{}, method string, params ...interface{}) error { - return _Poly.Contract.PolyCaller.contract.Call(opts, result, method, params...) -} - -// Transfer initiates a plain transaction to move funds to the contract, calling -// its default method if one is available. -func (_Poly *PolyRaw) Transfer(opts *bind.TransactOpts) (types.Transaction, error) { - return _Poly.Contract.PolyTransactor.contract.Transfer(opts) -} - -// Transact invokes the (paid) contract method with params as input values. -func (_Poly *PolyRaw) Transact(opts *bind.TransactOpts, method string, params ...interface{}) (types.Transaction, error) { - return _Poly.Contract.PolyTransactor.contract.Transact(opts, method, params...) -} - -// Call invokes the (constant) contract method with params as input values and -// sets the output to result. The result type might be a single field for simple -// returns, a slice of interfaces for anonymous returns and a struct for named -// returns. -func (_Poly *PolyCallerRaw) Call(opts *bind.CallOpts, result *[]interface{}, method string, params ...interface{}) error { - return _Poly.Contract.contract.Call(opts, result, method, params...) -} - -// Transfer initiates a plain transaction to move funds to the contract, calling -// its default method if one is available. -func (_Poly *PolyTransactorRaw) Transfer(opts *bind.TransactOpts) (types.Transaction, error) { - return _Poly.Contract.contract.Transfer(opts) -} - -// Transact invokes the (paid) contract method with params as input values. -func (_Poly *PolyTransactorRaw) Transact(opts *bind.TransactOpts, method string, params ...interface{}) (types.Transaction, error) { - return _Poly.Contract.contract.Transact(opts, method, params...) -} - -// Deploy is a paid mutator transaction binding the contract method 0xa5e38751. -// -// Solidity: function deploy(uint256 salt) returns() -func (_Poly *PolyTransactor) Deploy(opts *bind.TransactOpts, salt *big.Int) (types.Transaction, error) { - return _Poly.contract.Transact(opts, "deploy", salt) -} - -// Deploy is a paid mutator transaction binding the contract method 0xa5e38751. -// -// Solidity: function deploy(uint256 salt) returns() -func (_Poly *PolySession) Deploy(salt *big.Int) (types.Transaction, error) { - return _Poly.Contract.Deploy(&_Poly.TransactOpts, salt) -} - -// Deploy is a paid mutator transaction binding the contract method 0xa5e38751. -// -// Solidity: function deploy(uint256 salt) returns() -func (_Poly *PolyTransactorSession) Deploy(salt *big.Int) (types.Transaction, error) { - return _Poly.Contract.Deploy(&_Poly.TransactOpts, salt) -} - -// DeployAndDestruct is a paid mutator transaction binding the contract method 0x9debe981. -// -// Solidity: function deployAndDestruct(uint256 salt) returns() -func (_Poly *PolyTransactor) DeployAndDestruct(opts *bind.TransactOpts, salt *big.Int) (types.Transaction, error) { - return _Poly.contract.Transact(opts, "deployAndDestruct", salt) -} - -// DeployAndDestruct is a paid mutator transaction binding the contract method 0x9debe981. -// -// Solidity: function deployAndDestruct(uint256 salt) returns() -func (_Poly *PolySession) DeployAndDestruct(salt *big.Int) (types.Transaction, error) { - return _Poly.Contract.DeployAndDestruct(&_Poly.TransactOpts, salt) -} - -// DeployAndDestruct is a paid mutator transaction binding the contract method 0x9debe981. -// -// Solidity: function deployAndDestruct(uint256 salt) returns() -func (_Poly *PolyTransactorSession) DeployAndDestruct(salt *big.Int) (types.Transaction, error) { - return _Poly.Contract.DeployAndDestruct(&_Poly.TransactOpts, salt) -} - -// PolyDeployEventIterator is returned from FilterDeployEvent and is used to iterate over the raw logs and unpacked data for DeployEvent events raised by the Poly contract. -type PolyDeployEventIterator struct { - Event *PolyDeployEvent // Event containing the contract specifics and raw log - - contract *bind.BoundContract // Generic contract to use for unpacking event data - event string // Event name to use for unpacking event data - - logs chan types.Log // Log channel receiving the found contract events - sub ethereum.Subscription // Subscription for errors, completion and termination - done bool // Whether the subscription completed delivering logs - fail error // Occurred error to stop iteration -} - -// Next advances the iterator to the subsequent event, returning whether there -// are any more events found. In case of a retrieval or parsing error, false is -// returned and Error() can be queried for the exact failure. -func (it *PolyDeployEventIterator) Next() bool { - // If the iterator failed, stop iterating - if it.fail != nil { - return false - } - // If the iterator completed, deliver directly whatever's available - if it.done { - select { - case log := <-it.logs: - it.Event = new(PolyDeployEvent) - if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { - it.fail = err - return false - } - it.Event.Raw = log - return true - - default: - return false - } - } - // Iterator still in progress, wait for either a data or an error event - select { - case log := <-it.logs: - it.Event = new(PolyDeployEvent) - if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { - it.fail = err - return false - } - it.Event.Raw = log - return true - - case err := <-it.sub.Err(): - it.done = true - it.fail = err - return it.Next() - } -} - -// Error returns any retrieval or parsing error occurred during filtering. -func (it *PolyDeployEventIterator) Error() error { - return it.fail -} - -// Close terminates the iteration process, releasing any pending underlying -// resources. -func (it *PolyDeployEventIterator) Close() error { - it.sub.Unsubscribe() - return nil -} - -// PolyDeployEvent represents a DeployEvent event raised by the Poly contract. -type PolyDeployEvent struct { - D common.Address - Raw types.Log // Blockchain specific contextual infos -} - -// FilterDeployEvent is a free log retrieval operation binding the contract event 0x68f6a0f063c25c6678c443b9a484086f15ba8f91f60218695d32a5251f2050eb. -// -// Solidity: event DeployEvent(address d) -func (_Poly *PolyFilterer) FilterDeployEvent(opts *bind.FilterOpts) (*PolyDeployEventIterator, error) { - - logs, sub, err := _Poly.contract.FilterLogs(opts, "DeployEvent") - if err != nil { - return nil, err - } - return &PolyDeployEventIterator{contract: _Poly.contract, event: "DeployEvent", logs: logs, sub: sub}, nil -} - -// WatchDeployEvent is a free log subscription operation binding the contract event 0x68f6a0f063c25c6678c443b9a484086f15ba8f91f60218695d32a5251f2050eb. -// -// Solidity: event DeployEvent(address d) -func (_Poly *PolyFilterer) WatchDeployEvent(opts *bind.WatchOpts, sink chan<- *PolyDeployEvent) (event.Subscription, error) { - - logs, sub, err := _Poly.contract.WatchLogs(opts, "DeployEvent") - if err != nil { - return nil, err - } - return event.NewSubscription(func(quit <-chan struct{}) error { - defer sub.Unsubscribe() - for { - select { - case log := <-logs: - // New log arrived, parse the event and forward to the user - event := new(PolyDeployEvent) - if err := _Poly.contract.UnpackLog(event, "DeployEvent", log); err != nil { - return err - } - event.Raw = log - - select { - case sink <- event: - case err := <-sub.Err(): - return err - case <-quit: - return nil - } - case err := <-sub.Err(): - return err - case <-quit: - return nil - } - } - }), nil -} - -// ParseDeployEvent is a log parse operation binding the contract event 0x68f6a0f063c25c6678c443b9a484086f15ba8f91f60218695d32a5251f2050eb. -// -// Solidity: event DeployEvent(address d) -func (_Poly *PolyFilterer) ParseDeployEvent(log types.Log) (*PolyDeployEvent, error) { - event := new(PolyDeployEvent) - if err := _Poly.contract.UnpackLog(event, "DeployEvent", log); err != nil { - return nil, err - } - return event, nil -} diff --git a/cmd/rpcdaemon22/commands/contracts/gen_token.go b/cmd/rpcdaemon22/commands/contracts/gen_token.go deleted file mode 100644 index 4d276e0b56c..00000000000 --- a/cmd/rpcdaemon22/commands/contracts/gen_token.go +++ /dev/null @@ -1,324 +0,0 @@ -// Code generated - DO NOT EDIT. -// This file is a generated binding and any manual changes will be lost. - -package contracts - -import ( - "math/big" - "strings" - - ethereum "github.com/ledgerwatch/erigon" - "github.com/ledgerwatch/erigon/accounts/abi" - "github.com/ledgerwatch/erigon/accounts/abi/bind" - "github.com/ledgerwatch/erigon/common" - "github.com/ledgerwatch/erigon/core/types" - "github.com/ledgerwatch/erigon/event" -) - -// Reference imports to suppress errors if they are not otherwise used. -var ( - _ = big.NewInt - _ = strings.NewReader - _ = ethereum.NotFound - _ = bind.Bind - _ = common.Big1 - _ = types.BloomLookup - _ = event.NewSubscription -) - -// TokenABI is the input ABI used to generate the binding from. -const TokenABI = "[{\"inputs\":[{\"internalType\":\"address\",\"name\":\"_minter\",\"type\":\"address\"}],\"stateMutability\":\"nonpayable\",\"type\":\"constructor\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"\",\"type\":\"address\"}],\"name\":\"balanceOf\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"_to\",\"type\":\"address\"},{\"internalType\":\"uint256\",\"name\":\"_value\",\"type\":\"uint256\"}],\"name\":\"mint\",\"outputs\":[{\"internalType\":\"bool\",\"name\":\"\",\"type\":\"bool\"}],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"minter\",\"outputs\":[{\"internalType\":\"address\",\"name\":\"\",\"type\":\"address\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"totalSupply\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"_to\",\"type\":\"address\"},{\"internalType\":\"uint256\",\"name\":\"_value\",\"type\":\"uint256\"}],\"name\":\"transfer\",\"outputs\":[{\"internalType\":\"bool\",\"name\":\"\",\"type\":\"bool\"}],\"stateMutability\":\"nonpayable\",\"type\":\"function\"}]" - -// TokenBin is the compiled bytecode used for deploying new contracts. -var TokenBin = "0x608060405234801561001057600080fd5b506040516102cd3803806102cd8339818101604052602081101561003357600080fd5b5051600280546001600160a01b0319166001600160a01b0390921691909117905561026a806100636000396000f3fe608060405234801561001057600080fd5b50600436106100575760003560e01c8063075461721461005c57806318160ddd1461008057806340c10f191461009a57806370a08231146100da578063a9059cbb14610100575b600080fd5b61006461012c565b604080516001600160a01b039092168252519081900360200190f35b61008861013b565b60408051918252519081900360200190f35b6100c6600480360360408110156100b057600080fd5b506001600160a01b038135169060200135610141565b604080519115158252519081900360200190f35b610088600480360360208110156100f057600080fd5b50356001600160a01b03166101b1565b6100c66004803603604081101561011657600080fd5b506001600160a01b0381351690602001356101c3565b6002546001600160a01b031681565b60005481565b6002546000906001600160a01b0316331461015b57600080fd5b6001600160a01b03831660009081526001602052604090205482810181111561018357600080fd5b6001600160a01b03841660009081526001602081905260408220928501909255805484019055905092915050565b60016020526000908152604090205481565b33600090815260016020526040808220546001600160a01b038516835290822054838210156101f157600080fd5b80848201101561020057600080fd5b336000908152600160208190526040808320948790039094556001600160a01b03969096168152919091209201909155509056fea2646970667358221220db4c7b3ba8d073604af68ade92006926639bb4003f2a18929524d580777155fb64736f6c63430007020033" - -// DeployToken deploys a new Ethereum contract, binding an instance of Token to it. -func DeployToken(auth *bind.TransactOpts, backend bind.ContractBackend, _minter common.Address) (common.Address, types.Transaction, *Token, error) { - parsed, err := abi.JSON(strings.NewReader(TokenABI)) - if err != nil { - return common.Address{}, nil, nil, err - } - - address, tx, contract, err := bind.DeployContract(auth, parsed, common.FromHex(TokenBin), backend, _minter) - if err != nil { - return common.Address{}, nil, nil, err - } - return address, tx, &Token{TokenCaller: TokenCaller{contract: contract}, TokenTransactor: TokenTransactor{contract: contract}, TokenFilterer: TokenFilterer{contract: contract}}, nil -} - -// Token is an auto generated Go binding around an Ethereum contract. -type Token struct { - TokenCaller // Read-only binding to the contract - TokenTransactor // Write-only binding to the contract - TokenFilterer // Log filterer for contract events -} - -// TokenCaller is an auto generated read-only Go binding around an Ethereum contract. -type TokenCaller struct { - contract *bind.BoundContract // Generic contract wrapper for the low level calls -} - -// TokenTransactor is an auto generated write-only Go binding around an Ethereum contract. -type TokenTransactor struct { - contract *bind.BoundContract // Generic contract wrapper for the low level calls -} - -// TokenFilterer is an auto generated log filtering Go binding around an Ethereum contract events. -type TokenFilterer struct { - contract *bind.BoundContract // Generic contract wrapper for the low level calls -} - -// TokenSession is an auto generated Go binding around an Ethereum contract, -// with pre-set call and transact options. -type TokenSession struct { - Contract *Token // Generic contract binding to set the session for - CallOpts bind.CallOpts // Call options to use throughout this session - TransactOpts bind.TransactOpts // Transaction auth options to use throughout this session -} - -// TokenCallerSession is an auto generated read-only Go binding around an Ethereum contract, -// with pre-set call options. -type TokenCallerSession struct { - Contract *TokenCaller // Generic contract caller binding to set the session for - CallOpts bind.CallOpts // Call options to use throughout this session -} - -// TokenTransactorSession is an auto generated write-only Go binding around an Ethereum contract, -// with pre-set transact options. -type TokenTransactorSession struct { - Contract *TokenTransactor // Generic contract transactor binding to set the session for - TransactOpts bind.TransactOpts // Transaction auth options to use throughout this session -} - -// TokenRaw is an auto generated low-level Go binding around an Ethereum contract. -type TokenRaw struct { - Contract *Token // Generic contract binding to access the raw methods on -} - -// TokenCallerRaw is an auto generated low-level read-only Go binding around an Ethereum contract. -type TokenCallerRaw struct { - Contract *TokenCaller // Generic read-only contract binding to access the raw methods on -} - -// TokenTransactorRaw is an auto generated low-level write-only Go binding around an Ethereum contract. -type TokenTransactorRaw struct { - Contract *TokenTransactor // Generic write-only contract binding to access the raw methods on -} - -// NewToken creates a new instance of Token, bound to a specific deployed contract. -func NewToken(address common.Address, backend bind.ContractBackend) (*Token, error) { - contract, err := bindToken(address, backend, backend, backend) - if err != nil { - return nil, err - } - return &Token{TokenCaller: TokenCaller{contract: contract}, TokenTransactor: TokenTransactor{contract: contract}, TokenFilterer: TokenFilterer{contract: contract}}, nil -} - -// NewTokenCaller creates a new read-only instance of Token, bound to a specific deployed contract. -func NewTokenCaller(address common.Address, caller bind.ContractCaller) (*TokenCaller, error) { - contract, err := bindToken(address, caller, nil, nil) - if err != nil { - return nil, err - } - return &TokenCaller{contract: contract}, nil -} - -// NewTokenTransactor creates a new write-only instance of Token, bound to a specific deployed contract. -func NewTokenTransactor(address common.Address, transactor bind.ContractTransactor) (*TokenTransactor, error) { - contract, err := bindToken(address, nil, transactor, nil) - if err != nil { - return nil, err - } - return &TokenTransactor{contract: contract}, nil -} - -// NewTokenFilterer creates a new log filterer instance of Token, bound to a specific deployed contract. -func NewTokenFilterer(address common.Address, filterer bind.ContractFilterer) (*TokenFilterer, error) { - contract, err := bindToken(address, nil, nil, filterer) - if err != nil { - return nil, err - } - return &TokenFilterer{contract: contract}, nil -} - -// bindToken binds a generic wrapper to an already deployed contract. -func bindToken(address common.Address, caller bind.ContractCaller, transactor bind.ContractTransactor, filterer bind.ContractFilterer) (*bind.BoundContract, error) { - parsed, err := abi.JSON(strings.NewReader(TokenABI)) - if err != nil { - return nil, err - } - return bind.NewBoundContract(address, parsed, caller, transactor, filterer), nil -} - -// Call invokes the (constant) contract method with params as input values and -// sets the output to result. The result type might be a single field for simple -// returns, a slice of interfaces for anonymous returns and a struct for named -// returns. -func (_Token *TokenRaw) Call(opts *bind.CallOpts, result *[]interface{}, method string, params ...interface{}) error { - return _Token.Contract.TokenCaller.contract.Call(opts, result, method, params...) -} - -// Transfer initiates a plain transaction to move funds to the contract, calling -// its default method if one is available. -func (_Token *TokenRaw) Transfer(opts *bind.TransactOpts) (types.Transaction, error) { - return _Token.Contract.TokenTransactor.contract.Transfer(opts) -} - -// Transact invokes the (paid) contract method with params as input values. -func (_Token *TokenRaw) Transact(opts *bind.TransactOpts, method string, params ...interface{}) (types.Transaction, error) { - return _Token.Contract.TokenTransactor.contract.Transact(opts, method, params...) -} - -// Call invokes the (constant) contract method with params as input values and -// sets the output to result. The result type might be a single field for simple -// returns, a slice of interfaces for anonymous returns and a struct for named -// returns. -func (_Token *TokenCallerRaw) Call(opts *bind.CallOpts, result *[]interface{}, method string, params ...interface{}) error { - return _Token.Contract.contract.Call(opts, result, method, params...) -} - -// Transfer initiates a plain transaction to move funds to the contract, calling -// its default method if one is available. -func (_Token *TokenTransactorRaw) Transfer(opts *bind.TransactOpts) (types.Transaction, error) { - return _Token.Contract.contract.Transfer(opts) -} - -// Transact invokes the (paid) contract method with params as input values. -func (_Token *TokenTransactorRaw) Transact(opts *bind.TransactOpts, method string, params ...interface{}) (types.Transaction, error) { - return _Token.Contract.contract.Transact(opts, method, params...) -} - -// BalanceOf is a free data retrieval call binding the contract method 0x70a08231. -// -// Solidity: function balanceOf(address ) view returns(uint256) -func (_Token *TokenCaller) BalanceOf(opts *bind.CallOpts, arg0 common.Address) (*big.Int, error) { - var out []interface{} - err := _Token.contract.Call(opts, &out, "balanceOf", arg0) - - if err != nil { - return *new(*big.Int), err - } - - out0 := *abi.ConvertType(out[0], new(*big.Int)).(**big.Int) - - return out0, err - -} - -// BalanceOf is a free data retrieval call binding the contract method 0x70a08231. -// -// Solidity: function balanceOf(address ) view returns(uint256) -func (_Token *TokenSession) BalanceOf(arg0 common.Address) (*big.Int, error) { - return _Token.Contract.BalanceOf(&_Token.CallOpts, arg0) -} - -// BalanceOf is a free data retrieval call binding the contract method 0x70a08231. -// -// Solidity: function balanceOf(address ) view returns(uint256) -func (_Token *TokenCallerSession) BalanceOf(arg0 common.Address) (*big.Int, error) { - return _Token.Contract.BalanceOf(&_Token.CallOpts, arg0) -} - -// Minter is a free data retrieval call binding the contract method 0x07546172. -// -// Solidity: function minter() view returns(address) -func (_Token *TokenCaller) Minter(opts *bind.CallOpts) (common.Address, error) { - var out []interface{} - err := _Token.contract.Call(opts, &out, "minter") - - if err != nil { - return *new(common.Address), err - } - - out0 := *abi.ConvertType(out[0], new(common.Address)).(*common.Address) - - return out0, err - -} - -// Minter is a free data retrieval call binding the contract method 0x07546172. -// -// Solidity: function minter() view returns(address) -func (_Token *TokenSession) Minter() (common.Address, error) { - return _Token.Contract.Minter(&_Token.CallOpts) -} - -// Minter is a free data retrieval call binding the contract method 0x07546172. -// -// Solidity: function minter() view returns(address) -func (_Token *TokenCallerSession) Minter() (common.Address, error) { - return _Token.Contract.Minter(&_Token.CallOpts) -} - -// TotalSupply is a free data retrieval call binding the contract method 0x18160ddd. -// -// Solidity: function totalSupply() view returns(uint256) -func (_Token *TokenCaller) TotalSupply(opts *bind.CallOpts) (*big.Int, error) { - var out []interface{} - err := _Token.contract.Call(opts, &out, "totalSupply") - - if err != nil { - return *new(*big.Int), err - } - - out0 := *abi.ConvertType(out[0], new(*big.Int)).(**big.Int) - - return out0, err - -} - -// TotalSupply is a free data retrieval call binding the contract method 0x18160ddd. -// -// Solidity: function totalSupply() view returns(uint256) -func (_Token *TokenSession) TotalSupply() (*big.Int, error) { - return _Token.Contract.TotalSupply(&_Token.CallOpts) -} - -// TotalSupply is a free data retrieval call binding the contract method 0x18160ddd. -// -// Solidity: function totalSupply() view returns(uint256) -func (_Token *TokenCallerSession) TotalSupply() (*big.Int, error) { - return _Token.Contract.TotalSupply(&_Token.CallOpts) -} - -// Mint is a paid mutator transaction binding the contract method 0x40c10f19. -// -// Solidity: function mint(address _to, uint256 _value) returns(bool) -func (_Token *TokenTransactor) Mint(opts *bind.TransactOpts, _to common.Address, _value *big.Int) (types.Transaction, error) { - return _Token.contract.Transact(opts, "mint", _to, _value) -} - -// Mint is a paid mutator transaction binding the contract method 0x40c10f19. -// -// Solidity: function mint(address _to, uint256 _value) returns(bool) -func (_Token *TokenSession) Mint(_to common.Address, _value *big.Int) (types.Transaction, error) { - return _Token.Contract.Mint(&_Token.TransactOpts, _to, _value) -} - -// Mint is a paid mutator transaction binding the contract method 0x40c10f19. -// -// Solidity: function mint(address _to, uint256 _value) returns(bool) -func (_Token *TokenTransactorSession) Mint(_to common.Address, _value *big.Int) (types.Transaction, error) { - return _Token.Contract.Mint(&_Token.TransactOpts, _to, _value) -} - -// Transfer is a paid mutator transaction binding the contract method 0xa9059cbb. -// -// Solidity: function transfer(address _to, uint256 _value) returns(bool) -func (_Token *TokenTransactor) Transfer(opts *bind.TransactOpts, _to common.Address, _value *big.Int) (types.Transaction, error) { - return _Token.contract.Transact(opts, "transfer", _to, _value) -} - -// Transfer is a paid mutator transaction binding the contract method 0xa9059cbb. -// -// Solidity: function transfer(address _to, uint256 _value) returns(bool) -func (_Token *TokenSession) Transfer(_to common.Address, _value *big.Int) (types.Transaction, error) { - return _Token.Contract.Transfer(&_Token.TransactOpts, _to, _value) -} - -// Transfer is a paid mutator transaction binding the contract method 0xa9059cbb. -// -// Solidity: function transfer(address _to, uint256 _value) returns(bool) -func (_Token *TokenTransactorSession) Transfer(_to common.Address, _value *big.Int) (types.Transaction, error) { - return _Token.Contract.Transfer(&_Token.TransactOpts, _to, _value) -} diff --git a/cmd/rpcdaemon22/commands/contracts/poly.sol b/cmd/rpcdaemon22/commands/contracts/poly.sol deleted file mode 100644 index fab4fbc068b..00000000000 --- a/cmd/rpcdaemon22/commands/contracts/poly.sol +++ /dev/null @@ -1,36 +0,0 @@ -pragma solidity >=0.5.0; - -// solc --allow-paths ., --abi --bin --overwrite --optimize -o cmd/rpcdaemon/commands/contracts/build cmd/rpcdaemon/commands/contracts/poly.sol -// ./build/bin/abigen -abi cmd/rpcdaemon/commands/contracts/build/Poly.abi -bin cmd/rpcdaemon/commands/contracts/build/Poly.bin -pkg contracts -type poly -out cmd/rpcdaemon/commands/contracts/gen_poly.go -contract Poly { - - constructor() { - } - - event DeployEvent (address d); - - /* Deploys self-destructing contract with given salt and emits DeployEvent with the address of the created contract */ - function deploy(uint256 salt) public { - // PUSH1 0x60; PUSH1 0; MSTORE8; NUMBER; PUSH1 1; MSTORE8; PUSH1 0xff; PUSH1 2; MSTORE8; PUSH1 3; PUSH1 0; RETURN; - // Returns code 60ff, which is PUSH1 ; SELFDESTRUCT. Value is determined by the block number where deploy function is called - bytes memory init_code = hex"60606000534360015360ff60025360036000f3"; - address payable d; - assembly{ - d := create2(0, add(init_code, 32), mload(init_code), salt) - } - emit DeployEvent(d); - } - - /* Deploys self-destructing contract with given salt and emits DeployEvent with the address of the created contract */ - function deployAndDestruct(uint256 salt) public { - // PUSH1 0x60; PUSH1 0; MSTORE8; NUMBER; PUSH1 1; MSTORE8; PUSH1 0xff; PUSH1 2; MSTORE8; PUSH1 3; PUSH1 0; RETURN; - // Returns code 60ff, which is PUSH1 ; SELFDESTRUCT. Value is determined by the block number where deploy function is called - bytes memory init_code = hex"60606000534360015360ff60025360036000f3"; - address payable d; - assembly{ - d := create2(0, add(init_code, 32), mload(init_code), salt) - pop(call(100000, d, 0, 0, 0, 0, 0)) - } - emit DeployEvent(d); - } -} diff --git a/cmd/rpcdaemon22/commands/contracts/token.sol b/cmd/rpcdaemon22/commands/contracts/token.sol deleted file mode 100644 index 755bdfddd4f..00000000000 --- a/cmd/rpcdaemon22/commands/contracts/token.sol +++ /dev/null @@ -1,39 +0,0 @@ -pragma solidity >=0.6.0; - -// solc --allow-paths ., --abi --bin --overwrite --optimize -o cmd/rpcdaemon/commands/contracts/build cmd/pics/contracts/token.sol -// ./build/bin/abigen -abi cmd/rpcdaemon/commands/contracts/build/Token.abi -bin cmd/rpcdaemon/commands/contracts/build/Token.bin -pkg contracts -type token -out cmd/rpcdaemon/commands/contracts/gen_token.go -contract Token { - uint256 public totalSupply; - mapping(address => uint256) public balanceOf; - address public minter; - - constructor(address _minter) public { - minter = _minter; - } - - /* Send tokens */ - function transfer(address _to, uint256 _value) public returns (bool) { - uint256 fromBalance = balanceOf[msg.sender]; - uint256 toBalance = balanceOf[_to]; - require(fromBalance >= _value); - // Check if the sender has enough - require(toBalance + _value >= toBalance); - // Check for overflows - balanceOf[msg.sender] = fromBalance - _value; - // Subtract from the sender - balanceOf[_to] = toBalance + _value; - return true; - } - - /* Allows the owner to mint more tokens */ - function mint(address _to, uint256 _value) public returns (bool) { - require(msg.sender == minter); - // Only the minter is allowed to mint - uint256 toBalance = balanceOf[_to]; - require(toBalance + _value >= toBalance); - // Check for overflows - balanceOf[_to] = toBalance + _value; - totalSupply += _value; - return true; - } -} diff --git a/cmd/rpcdaemon22/commands/corner_cases_support_test.go b/cmd/rpcdaemon22/commands/corner_cases_support_test.go deleted file mode 100644 index 0b95ba2c657..00000000000 --- a/cmd/rpcdaemon22/commands/corner_cases_support_test.go +++ /dev/null @@ -1,62 +0,0 @@ -package commands - -import ( - "context" - "testing" - - "github.com/ledgerwatch/erigon-lib/kv/kvcache" - "github.com/ledgerwatch/erigon/cmd/rpcdaemon22/rpcdaemontest" - "github.com/ledgerwatch/erigon/common" - "github.com/ledgerwatch/erigon/rpc" - "github.com/ledgerwatch/erigon/turbo/snapshotsync" - "github.com/stretchr/testify/require" -) - -// TestNotFoundMustReturnNil - next methods - when record not found in db - must return nil instead of error -// see https://github.com/ledgerwatch/erigon/issues/1645 -func TestNotFoundMustReturnNil(t *testing.T) { - require := require.New(t) - db := rpcdaemontest.CreateTestKV(t) - defer db.Close() - stateCache := kvcache.New(kvcache.DefaultCoherentConfig) - api := NewEthAPI( - NewBaseApi(nil, stateCache, snapshotsync.NewBlockReader(), nil, nil, false), - db, nil, nil, nil, 5000000) - ctx := context.Background() - - a, err := api.GetTransactionByBlockNumberAndIndex(ctx, 10_000, 1) - require.Nil(a) - require.Nil(err) - - b, err := api.GetTransactionByBlockHashAndIndex(ctx, common.Hash{}, 1) - require.Nil(b) - require.Nil(err) - - c, err := api.GetTransactionByBlockNumberAndIndex(ctx, 10_000, 1) - require.Nil(c) - require.Nil(err) - - d, err := api.GetTransactionReceipt(ctx, common.Hash{}) - require.Nil(d) - require.Nil(err) - - e, err := api.GetBlockByHash(ctx, rpc.BlockNumberOrHashWithHash(common.Hash{}, true), false) - require.Nil(e) - require.Nil(err) - - f, err := api.GetBlockByNumber(ctx, 10_000, false) - require.Nil(f) - require.Nil(err) - - g, err := api.GetUncleByBlockHashAndIndex(ctx, common.Hash{}, 1) - require.Nil(g) - require.Nil(err) - - h, err := api.GetUncleByBlockNumberAndIndex(ctx, 10_000, 1) - require.Nil(h) - require.Nil(err) - - j, err := api.GetBlockTransactionCountByNumber(ctx, 10_000) - require.Nil(j) - require.Nil(err) -} diff --git a/cmd/rpcdaemon22/commands/daemon.go b/cmd/rpcdaemon22/commands/daemon.go deleted file mode 100644 index ca2029f3fe4..00000000000 --- a/cmd/rpcdaemon22/commands/daemon.go +++ /dev/null @@ -1,135 +0,0 @@ -package commands - -import ( - "github.com/ledgerwatch/erigon-lib/gointerfaces/starknet" - "github.com/ledgerwatch/erigon-lib/gointerfaces/txpool" - "github.com/ledgerwatch/erigon-lib/kv" - "github.com/ledgerwatch/erigon-lib/kv/kvcache" - libstate "github.com/ledgerwatch/erigon-lib/state" - "github.com/ledgerwatch/erigon/cmd/rpcdaemon22/cli/httpcfg" - "github.com/ledgerwatch/erigon/rpc" - "github.com/ledgerwatch/erigon/turbo/rpchelper" - "github.com/ledgerwatch/erigon/turbo/services" -) - -// APIList describes the list of available RPC apis -func APIList(db kv.RoDB, borDb kv.RoDB, eth rpchelper.ApiBackend, txPool txpool.TxpoolClient, mining txpool.MiningClient, - starknet starknet.CAIROVMClient, filters *rpchelper.Filters, stateCache kvcache.Cache, - blockReader services.FullBlockReader, agg *libstate.Aggregator, txNums []uint64, cfg httpcfg.HttpCfg) (list []rpc.API) { - - base := NewBaseApi(filters, stateCache, blockReader, agg, txNums, cfg.WithDatadir) - if cfg.TevmEnabled { - base.EnableTevmExperiment() - } - ethImpl := NewEthAPI(base, db, eth, txPool, mining, cfg.Gascap) - erigonImpl := NewErigonAPI(base, db, eth) - starknetImpl := NewStarknetAPI(base, db, starknet, txPool) - txpoolImpl := NewTxPoolAPI(base, db, txPool) - netImpl := NewNetAPIImpl(eth) - debugImpl := NewPrivateDebugAPI(base, db, cfg.Gascap) - traceImpl := NewTraceAPI(base, db, &cfg) - web3Impl := NewWeb3APIImpl(eth) - dbImpl := NewDBAPIImpl() /* deprecated */ - engineImpl := NewEngineAPI(base, db, eth) - adminImpl := NewAdminAPI(eth) - parityImpl := NewParityAPIImpl(db) - borImpl := NewBorAPI(base, db, borDb) // bor (consensus) specific - - for _, enabledAPI := range cfg.API { - switch enabledAPI { - case "eth": - list = append(list, rpc.API{ - Namespace: "eth", - Public: true, - Service: EthAPI(ethImpl), - Version: "1.0", - }) - case "debug": - list = append(list, rpc.API{ - Namespace: "debug", - Public: true, - Service: PrivateDebugAPI(debugImpl), - Version: "1.0", - }) - case "net": - list = append(list, rpc.API{ - Namespace: "net", - Public: true, - Service: NetAPI(netImpl), - Version: "1.0", - }) - case "txpool": - list = append(list, rpc.API{ - Namespace: "txpool", - Public: true, - Service: TxPoolAPI(txpoolImpl), - Version: "1.0", - }) - case "web3": - list = append(list, rpc.API{ - Namespace: "web3", - Public: true, - Service: Web3API(web3Impl), - Version: "1.0", - }) - case "trace": - list = append(list, rpc.API{ - Namespace: "trace", - Public: true, - Service: TraceAPI(traceImpl), - Version: "1.0", - }) - case "db": /* Deprecated */ - list = append(list, rpc.API{ - Namespace: "db", - Public: true, - Service: DBAPI(dbImpl), - Version: "1.0", - }) - case "erigon": - list = append(list, rpc.API{ - Namespace: "erigon", - Public: true, - Service: ErigonAPI(erigonImpl), - Version: "1.0", - }) - case "starknet": - list = append(list, rpc.API{ - Namespace: "starknet", - Public: true, - Service: StarknetAPI(starknetImpl), - Version: "1.0", - }) - case "engine": - list = append(list, rpc.API{ - Namespace: "engine", - Public: true, - Service: EngineAPI(engineImpl), - Version: "1.0", - }) - case "bor": - list = append(list, rpc.API{ - Namespace: "bor", - Public: true, - Service: BorAPI(borImpl), - Version: "1.0", - }) - case "admin": - list = append(list, rpc.API{ - Namespace: "admin", - Public: false, - Service: AdminAPI(adminImpl), - Version: "1.0", - }) - case "parity": - list = append(list, rpc.API{ - Namespace: "parity", - Public: false, - Service: ParityAPI(parityImpl), - Version: "1.0", - }) - } - } - - return list -} diff --git a/cmd/rpcdaemon22/commands/db_api_deprecated.go b/cmd/rpcdaemon22/commands/db_api_deprecated.go deleted file mode 100644 index 886987e05c1..00000000000 --- a/cmd/rpcdaemon22/commands/db_api_deprecated.go +++ /dev/null @@ -1,52 +0,0 @@ -package commands - -import ( - "context" - "fmt" - - "github.com/ledgerwatch/erigon/common/hexutil" -) - -// DBAPI the interface for the db_ RPC commands (deprecated) -type DBAPI interface { - GetString(_ context.Context, _ string, _ string) (string, error) - PutString(_ context.Context, _ string, _ string, _ string) (bool, error) - GetHex(_ context.Context, _ string, _ string) (hexutil.Bytes, error) - PutHex(_ context.Context, _ string, _ string, _ hexutil.Bytes) (bool, error) -} - -// DBAPIImpl data structure to store things needed for db_ commands -type DBAPIImpl struct { - unused uint64 -} - -// NewDBAPIImpl returns NetAPIImplImpl instance -func NewDBAPIImpl() *DBAPIImpl { - return &DBAPIImpl{ - unused: uint64(0), - } -} - -// GetString implements db_getString. Returns string from the local database. -// Deprecated: This function will be removed in the future. -func (api *DBAPIImpl) GetString(_ context.Context, _ string, _ string) (string, error) { - return "", fmt.Errorf(NotAvailableDeprecated, "db_getString") -} - -// PutString implements db_putString. Stores a string in the local database. -// Deprecated: This function will be removed in the future. -func (api *DBAPIImpl) PutString(_ context.Context, _ string, _ string, _ string) (bool, error) { - return false, fmt.Errorf(NotAvailableDeprecated, "db_putString") -} - -// GetHex implements db_getHex. Returns binary data from the local database. -// Deprecated: This function will be removed in the future. -func (api *DBAPIImpl) GetHex(_ context.Context, _ string, _ string) (hexutil.Bytes, error) { - return hexutil.Bytes(""), fmt.Errorf(NotAvailableDeprecated, "db_getHex") -} - -// PutHex implements db_putHex. Stores binary data in the local database. -// Deprecated: This function will be removed in the future. -func (api *DBAPIImpl) PutHex(_ context.Context, _ string, _ string, _ hexutil.Bytes) (bool, error) { - return false, fmt.Errorf(NotAvailableDeprecated, "db_putHex") -} diff --git a/cmd/rpcdaemon22/commands/debug_api.go b/cmd/rpcdaemon22/commands/debug_api.go deleted file mode 100644 index f692da238d5..00000000000 --- a/cmd/rpcdaemon22/commands/debug_api.go +++ /dev/null @@ -1,273 +0,0 @@ -package commands - -import ( - "context" - "fmt" - - jsoniter "github.com/json-iterator/go" - "github.com/ledgerwatch/erigon-lib/kv" - "github.com/ledgerwatch/erigon/common" - "github.com/ledgerwatch/erigon/common/changeset" - "github.com/ledgerwatch/erigon/common/hexutil" - "github.com/ledgerwatch/erigon/consensus/ethash" - "github.com/ledgerwatch/erigon/core/rawdb" - "github.com/ledgerwatch/erigon/core/state" - "github.com/ledgerwatch/erigon/core/types" - "github.com/ledgerwatch/erigon/eth/stagedsync/stages" - "github.com/ledgerwatch/erigon/eth/tracers" - "github.com/ledgerwatch/erigon/ethdb" - "github.com/ledgerwatch/erigon/internal/ethapi" - "github.com/ledgerwatch/erigon/rpc" - "github.com/ledgerwatch/erigon/turbo/transactions" - "github.com/ledgerwatch/log/v3" -) - -// AccountRangeMaxResults is the maximum number of results to be returned per call -const AccountRangeMaxResults = 256 - -// PrivateDebugAPI Exposed RPC endpoints for debugging use -type PrivateDebugAPI interface { - StorageRangeAt(ctx context.Context, blockHash common.Hash, txIndex uint64, contractAddress common.Address, keyStart hexutil.Bytes, maxResult int) (StorageRangeResult, error) - TraceTransaction(ctx context.Context, hash common.Hash, config *tracers.TraceConfig, stream *jsoniter.Stream) error - TraceBlockByHash(ctx context.Context, hash common.Hash, config *tracers.TraceConfig, stream *jsoniter.Stream) error - TraceBlockByNumber(ctx context.Context, number rpc.BlockNumber, config *tracers.TraceConfig, stream *jsoniter.Stream) error - AccountRange(ctx context.Context, blockNrOrHash rpc.BlockNumberOrHash, start []byte, maxResults int, nocode, nostorage bool) (state.IteratorDump, error) - GetModifiedAccountsByNumber(ctx context.Context, startNum rpc.BlockNumber, endNum *rpc.BlockNumber) ([]common.Address, error) - GetModifiedAccountsByHash(_ context.Context, startHash common.Hash, endHash *common.Hash) ([]common.Address, error) - TraceCall(ctx context.Context, args ethapi.CallArgs, blockNrOrHash rpc.BlockNumberOrHash, config *tracers.TraceConfig, stream *jsoniter.Stream) error - AccountAt(ctx context.Context, blockHash common.Hash, txIndex uint64, account common.Address) (*AccountResult, error) -} - -// PrivateDebugAPIImpl is implementation of the PrivateDebugAPI interface based on remote Db access -type PrivateDebugAPIImpl struct { - *BaseAPI - db kv.RoDB - GasCap uint64 -} - -// NewPrivateDebugAPI returns PrivateDebugAPIImpl instance -func NewPrivateDebugAPI(base *BaseAPI, db kv.RoDB, gascap uint64) *PrivateDebugAPIImpl { - return &PrivateDebugAPIImpl{ - BaseAPI: base, - db: db, - GasCap: gascap, - } -} - -// StorageRangeAt implements debug_storageRangeAt. Returns information about a range of storage locations (if any) for the given address. -func (api *PrivateDebugAPIImpl) StorageRangeAt(ctx context.Context, blockHash common.Hash, txIndex uint64, contractAddress common.Address, keyStart hexutil.Bytes, maxResult int) (StorageRangeResult, error) { - tx, err := api.db.BeginRo(ctx) - if err != nil { - return StorageRangeResult{}, err - } - defer tx.Rollback() - - chainConfig, err := api.chainConfig(tx) - if err != nil { - return StorageRangeResult{}, err - } - - block, err := api.blockByHashWithSenders(tx, blockHash) - if err != nil { - return StorageRangeResult{}, err - } - if block == nil { - return StorageRangeResult{}, nil - } - getHeader := func(hash common.Hash, number uint64) *types.Header { - h, e := api._blockReader.Header(ctx, tx, hash, number) - if e != nil { - log.Error("getHeader error", "number", number, "hash", hash, "err", e) - } - return h - } - - contractHasTEVM := func(contractHash common.Hash) (bool, error) { return false, nil } - if api.TevmEnabled { - contractHasTEVM = ethdb.GetHasTEVM(tx) - } - - _, _, _, _, stateReader, err := transactions.ComputeTxEnv(ctx, block, chainConfig, getHeader, contractHasTEVM, ethash.NewFaker(), tx, blockHash, txIndex) - if err != nil { - return StorageRangeResult{}, err - } - return StorageRangeAt(stateReader, contractAddress, keyStart, maxResult) -} - -// AccountRange implements debug_accountRange. Returns a range of accounts involved in the given block rangeb -func (api *PrivateDebugAPIImpl) AccountRange(ctx context.Context, blockNrOrHash rpc.BlockNumberOrHash, startKey []byte, maxResults int, excludeCode, excludeStorage bool) (state.IteratorDump, error) { - tx, err := api.db.BeginRo(ctx) - if err != nil { - return state.IteratorDump{}, err - } - defer tx.Rollback() - - var blockNumber uint64 - - if number, ok := blockNrOrHash.Number(); ok { - if number == rpc.PendingBlockNumber { - return state.IteratorDump{}, fmt.Errorf("accountRange for pending block not supported") - } - if number == rpc.LatestBlockNumber { - var err error - - blockNumber, err = stages.GetStageProgress(tx, stages.Execution) - if err != nil { - return state.IteratorDump{}, fmt.Errorf("last block has not found: %w", err) - } - } else { - blockNumber = uint64(number) - } - - } else if hash, ok := blockNrOrHash.Hash(); ok { - block, err1 := api.blockByHashWithSenders(tx, hash) - if err1 != nil { - return state.IteratorDump{}, err1 - } - if block == nil { - return state.IteratorDump{}, fmt.Errorf("block %s not found", hash.Hex()) - } - blockNumber = block.NumberU64() - } - - if maxResults > AccountRangeMaxResults || maxResults <= 0 { - maxResults = AccountRangeMaxResults - } - - dumper := state.NewDumper(tx, blockNumber) - res, err := dumper.IteratorDump(excludeCode, excludeStorage, common.BytesToAddress(startKey), maxResults) - if err != nil { - return state.IteratorDump{}, err - } - - hash, err := rawdb.ReadCanonicalHash(tx, blockNumber) - if err != nil { - return state.IteratorDump{}, err - } - if hash != (common.Hash{}) { - header := rawdb.ReadHeader(tx, hash, blockNumber) - if header != nil { - res.Root = header.Root.String() - } - } - - return res, nil -} - -// GetModifiedAccountsByNumber implements debug_getModifiedAccountsByNumber. Returns a list of accounts modified in the given block. -func (api *PrivateDebugAPIImpl) GetModifiedAccountsByNumber(ctx context.Context, startNumber rpc.BlockNumber, endNumber *rpc.BlockNumber) ([]common.Address, error) { - tx, err := api.db.BeginRo(ctx) - if err != nil { - return nil, err - } - defer tx.Rollback() - - latestBlock, err := stages.GetStageProgress(tx, stages.Finish) - if err != nil { - return nil, err - } - - // forces negative numbers to fail (too large) but allows zero - startNum := uint64(startNumber.Int64()) - if startNum > latestBlock { - return nil, fmt.Errorf("start block (%d) is later than the latest block (%d)", startNum, latestBlock) - } - - endNum := startNum + 1 // allows for single param calls - if endNumber != nil { - // forces negative numbers to fail (too large) but allows zero - endNum = uint64(endNumber.Int64()) + 1 - } - - // is endNum too big? - if endNum > latestBlock { - return nil, fmt.Errorf("end block (%d) is later than the latest block (%d)", endNum, latestBlock) - } - - if startNum > endNum { - return nil, fmt.Errorf("start block (%d) must be less than or equal to end block (%d)", startNum, endNum) - } - - return changeset.GetModifiedAccounts(tx, startNum, endNum) -} - -// GetModifiedAccountsByHash implements debug_getModifiedAccountsByHash. Returns a list of accounts modified in the given block. -func (api *PrivateDebugAPIImpl) GetModifiedAccountsByHash(ctx context.Context, startHash common.Hash, endHash *common.Hash) ([]common.Address, error) { - tx, err := api.db.BeginRo(ctx) - if err != nil { - return nil, err - } - defer tx.Rollback() - - startBlock, err := api.blockByHashWithSenders(tx, startHash) - if err != nil { - return nil, err - } - if startBlock == nil { - return nil, fmt.Errorf("start block %x not found", startHash) - } - startNum := startBlock.NumberU64() - endNum := startNum + 1 // allows for single parameter calls - - if endHash != nil { - endBlock, err := api.blockByHashWithSenders(tx, *endHash) - if err != nil { - return nil, err - } - if endBlock == nil { - return nil, fmt.Errorf("end block %x not found", *endHash) - } - endNum = endBlock.NumberU64() + 1 - } - - if startNum > endNum { - return nil, fmt.Errorf("start block (%d) must be less than or equal to end block (%d)", startNum, endNum) - } - - return changeset.GetModifiedAccounts(tx, startNum, endNum) -} - -func (api *PrivateDebugAPIImpl) AccountAt(ctx context.Context, blockHash common.Hash, txIndex uint64, address common.Address) (*AccountResult, error) { - tx, err := api.db.BeginRo(ctx) - if err != nil { - return nil, err - } - defer tx.Rollback() - - chainConfig, err := api.chainConfig(tx) - if err != nil { - return nil, err - } - - block, err := api.blockByHashWithSenders(tx, blockHash) - if err != nil { - return nil, err - } - if block == nil { - return nil, nil - } - getHeader := func(hash common.Hash, number uint64) *types.Header { - return rawdb.ReadHeader(tx, hash, number) - } - contractHasTEVM := func(contractHash common.Hash) (bool, error) { return false, nil } - if api.TevmEnabled { - contractHasTEVM = ethdb.GetHasTEVM(tx) - } - _, _, _, ibs, _, err := transactions.ComputeTxEnv(ctx, block, chainConfig, getHeader, contractHasTEVM, ethash.NewFaker(), tx, blockHash, txIndex) - if err != nil { - return nil, err - } - result := &AccountResult{} - result.Balance.ToInt().Set(ibs.GetBalance(address).ToBig()) - result.Nonce = hexutil.Uint64(ibs.GetNonce(address)) - result.Code = ibs.GetCode(address) - result.CodeHash = ibs.GetCodeHash(address) - return result, nil -} - -type AccountResult struct { - Balance hexutil.Big `json:"balance"` - Nonce hexutil.Uint64 `json:"nonce"` - Code hexutil.Bytes `json:"code"` - CodeHash common.Hash `json:"codeHash"` -} diff --git a/cmd/rpcdaemon22/commands/debug_api_test.go b/cmd/rpcdaemon22/commands/debug_api_test.go deleted file mode 100644 index 87a24a3eef5..00000000000 --- a/cmd/rpcdaemon22/commands/debug_api_test.go +++ /dev/null @@ -1,185 +0,0 @@ -package commands - -import ( - "bytes" - "context" - "encoding/json" - "testing" - - jsoniter "github.com/json-iterator/go" - "github.com/ledgerwatch/erigon-lib/kv/kvcache" - "github.com/ledgerwatch/erigon/cmd/rpcdaemon22/rpcdaemontest" - "github.com/ledgerwatch/erigon/common" - "github.com/ledgerwatch/erigon/eth/tracers" - "github.com/ledgerwatch/erigon/internal/ethapi" - "github.com/ledgerwatch/erigon/rpc" - "github.com/ledgerwatch/erigon/turbo/snapshotsync" -) - -var debugTraceTransactionTests = []struct { - txHash string - gas uint64 - failed bool - returnValue string -}{ - {"3f3cb8a0e13ed2481f97f53f7095b9cbc78b6ffb779f2d3e565146371a8830ea", 21000, false, ""}, - {"f588c6426861d9ad25d5ccc12324a8d213f35ef1ed4153193f0c13eb81ca7f4a", 49189, false, "0000000000000000000000000000000000000000000000000000000000000001"}, - {"b6449d8e167a8826d050afe4c9f07095236ff769a985f02649b1023c2ded2059", 38899, false, ""}, -} - -var debugTraceTransactionNoRefundTests = []struct { - txHash string - gas uint64 - failed bool - returnValue string -}{ - {"3f3cb8a0e13ed2481f97f53f7095b9cbc78b6ffb779f2d3e565146371a8830ea", 21000, false, ""}, - {"f588c6426861d9ad25d5ccc12324a8d213f35ef1ed4153193f0c13eb81ca7f4a", 49189, false, "0000000000000000000000000000000000000000000000000000000000000001"}, - {"b6449d8e167a8826d050afe4c9f07095236ff769a985f02649b1023c2ded2059", 62899, false, ""}, -} - -func TestTraceBlockByNumber(t *testing.T) { - db := rpcdaemontest.CreateTestKV(t) - stateCache := kvcache.New(kvcache.DefaultCoherentConfig) - baseApi := NewBaseApi(nil, stateCache, snapshotsync.NewBlockReader(), nil, nil, false) - ethApi := NewEthAPI(baseApi, db, nil, nil, nil, 5000000) - api := NewPrivateDebugAPI(baseApi, db, 0) - for _, tt := range debugTraceTransactionTests { - var buf bytes.Buffer - stream := jsoniter.NewStream(jsoniter.ConfigDefault, &buf, 4096) - tx, err := ethApi.GetTransactionByHash(context.Background(), common.HexToHash(tt.txHash)) - if err != nil { - t.Errorf("traceBlock %s: %v", tt.txHash, err) - } - txcount, err := ethApi.GetBlockTransactionCountByHash(context.Background(), *tx.BlockHash) - if err != nil { - t.Errorf("traceBlock %s: %v", tt.txHash, err) - } - err = api.TraceBlockByNumber(context.Background(), rpc.BlockNumber(tx.BlockNumber.ToInt().Uint64()), &tracers.TraceConfig{}, stream) - if err != nil { - t.Errorf("traceBlock %s: %v", tt.txHash, err) - } - if err = stream.Flush(); err != nil { - t.Fatalf("error flusing: %v", err) - } - var er []ethapi.ExecutionResult - if err = json.Unmarshal(buf.Bytes(), &er); err != nil { - t.Fatalf("parsing result: %v", err) - } - if len(er) != int(*txcount) { - t.Fatalf("incorrect length: %v", err) - } - } - var buf bytes.Buffer - stream := jsoniter.NewStream(jsoniter.ConfigDefault, &buf, 4096) - err := api.TraceBlockByNumber(context.Background(), rpc.BlockNumber(rpc.LatestBlockNumber), &tracers.TraceConfig{}, stream) - if err != nil { - t.Errorf("traceBlock %v: %v", rpc.LatestBlockNumber, err) - } - if err = stream.Flush(); err != nil { - t.Fatalf("error flusing: %v", err) - } - var er []ethapi.ExecutionResult - if err = json.Unmarshal(buf.Bytes(), &er); err != nil { - t.Fatalf("parsing result: %v", err) - } -} - -func TestTraceBlockByHash(t *testing.T) { - db := rpcdaemontest.CreateTestKV(t) - stateCache := kvcache.New(kvcache.DefaultCoherentConfig) - baseApi := NewBaseApi(nil, stateCache, snapshotsync.NewBlockReader(), nil, nil, false) - ethApi := NewEthAPI(baseApi, db, nil, nil, nil, 5000000) - api := NewPrivateDebugAPI(baseApi, db, 0) - for _, tt := range debugTraceTransactionTests { - var buf bytes.Buffer - stream := jsoniter.NewStream(jsoniter.ConfigDefault, &buf, 4096) - tx, err := ethApi.GetTransactionByHash(context.Background(), common.HexToHash(tt.txHash)) - if err != nil { - t.Errorf("traceBlock %s: %v", tt.txHash, err) - } - txcount, err := ethApi.GetBlockTransactionCountByHash(context.Background(), *tx.BlockHash) - if err != nil { - t.Errorf("traceBlock %s: %v", tt.txHash, err) - } - err = api.TraceBlockByHash(context.Background(), *tx.BlockHash, &tracers.TraceConfig{}, stream) - if err != nil { - t.Errorf("traceBlock %s: %v", tt.txHash, err) - } - if err = stream.Flush(); err != nil { - t.Fatalf("error flusing: %v", err) - } - var er []ethapi.ExecutionResult - if err = json.Unmarshal(buf.Bytes(), &er); err != nil { - t.Fatalf("parsing result: %v", err) - } - if len(er) != int(*txcount) { - t.Fatalf("incorrect length: %v", err) - } - } -} - -func TestTraceTransaction(t *testing.T) { - db := rpcdaemontest.CreateTestKV(t) - stateCache := kvcache.New(kvcache.DefaultCoherentConfig) - api := NewPrivateDebugAPI( - NewBaseApi(nil, stateCache, snapshotsync.NewBlockReader(), nil, nil, false), - db, 0) - for _, tt := range debugTraceTransactionTests { - var buf bytes.Buffer - stream := jsoniter.NewStream(jsoniter.ConfigDefault, &buf, 4096) - err := api.TraceTransaction(context.Background(), common.HexToHash(tt.txHash), &tracers.TraceConfig{}, stream) - if err != nil { - t.Errorf("traceTransaction %s: %v", tt.txHash, err) - } - if err = stream.Flush(); err != nil { - t.Fatalf("error flusing: %v", err) - } - var er ethapi.ExecutionResult - if err = json.Unmarshal(buf.Bytes(), &er); err != nil { - t.Fatalf("parsing result: %v", err) - } - if er.Gas != tt.gas { - t.Errorf("wrong gas for transaction %s, got %d, expected %d", tt.txHash, er.Gas, tt.gas) - } - if er.Failed != tt.failed { - t.Errorf("wrong failed flag for transaction %s, got %t, expected %t", tt.txHash, er.Failed, tt.failed) - } - if er.ReturnValue != tt.returnValue { - t.Errorf("wrong return value for transaction %s, got %s, expected %s", tt.txHash, er.ReturnValue, tt.returnValue) - } - } -} - -func TestTraceTransactionNoRefund(t *testing.T) { - db := rpcdaemontest.CreateTestKV(t) - stateCache := kvcache.New(kvcache.DefaultCoherentConfig) - api := NewPrivateDebugAPI( - NewBaseApi(nil, stateCache, snapshotsync.NewBlockReader(), nil, nil, false), - db, 0) - for _, tt := range debugTraceTransactionNoRefundTests { - var buf bytes.Buffer - stream := jsoniter.NewStream(jsoniter.ConfigDefault, &buf, 4096) - var norefunds = true - err := api.TraceTransaction(context.Background(), common.HexToHash(tt.txHash), &tracers.TraceConfig{NoRefunds: &norefunds}, stream) - if err != nil { - t.Errorf("traceTransaction %s: %v", tt.txHash, err) - } - if err = stream.Flush(); err != nil { - t.Fatalf("error flusing: %v", err) - } - var er ethapi.ExecutionResult - if err = json.Unmarshal(buf.Bytes(), &er); err != nil { - t.Fatalf("parsing result: %v", err) - } - if er.Gas != tt.gas { - t.Errorf("wrong gas for transaction %s, got %d, expected %d", tt.txHash, er.Gas, tt.gas) - } - if er.Failed != tt.failed { - t.Errorf("wrong failed flag for transaction %s, got %t, expected %t", tt.txHash, er.Failed, tt.failed) - } - if er.ReturnValue != tt.returnValue { - t.Errorf("wrong return value for transaction %s, got %s, expected %s", tt.txHash, er.ReturnValue, tt.returnValue) - } - } -} diff --git a/cmd/rpcdaemon22/commands/engine_api.go b/cmd/rpcdaemon22/commands/engine_api.go deleted file mode 100644 index d63d99102e9..00000000000 --- a/cmd/rpcdaemon22/commands/engine_api.go +++ /dev/null @@ -1,257 +0,0 @@ -package commands - -import ( - "context" - "encoding/binary" - "fmt" - "math/big" - - "github.com/holiman/uint256" - "github.com/ledgerwatch/erigon-lib/gointerfaces" - "github.com/ledgerwatch/erigon-lib/gointerfaces/remote" - types2 "github.com/ledgerwatch/erigon-lib/gointerfaces/types" - "github.com/ledgerwatch/erigon-lib/kv" - "github.com/ledgerwatch/erigon/common" - "github.com/ledgerwatch/erigon/common/hexutil" - "github.com/ledgerwatch/erigon/core/types" - "github.com/ledgerwatch/erigon/turbo/rpchelper" - "github.com/ledgerwatch/log/v3" -) - -// ExecutionPayload represents an execution payload (aka slot/block) -type ExecutionPayload struct { - ParentHash common.Hash `json:"parentHash" gencodec:"required"` - FeeRecipient common.Address `json:"feeRecipient" gencodec:"required"` - StateRoot common.Hash `json:"stateRoot" gencodec:"required"` - ReceiptsRoot common.Hash `json:"receiptsRoot" gencodec:"required"` - LogsBloom hexutil.Bytes `json:"logsBloom" gencodec:"required"` - PrevRandao common.Hash `json:"prevRandao" gencodec:"required"` - BlockNumber hexutil.Uint64 `json:"blockNumber" gencodec:"required"` - GasLimit hexutil.Uint64 `json:"gasLimit" gencodec:"required"` - GasUsed hexutil.Uint64 `json:"gasUsed" gencodec:"required"` - Timestamp hexutil.Uint64 `json:"timestamp" gencodec:"required"` - ExtraData hexutil.Bytes `json:"extraData" gencodec:"required"` - BaseFeePerGas *hexutil.Big `json:"baseFeePerGas" gencodec:"required"` - BlockHash common.Hash `json:"blockHash" gencodec:"required"` - Transactions []hexutil.Bytes `json:"transactions" gencodec:"required"` -} - -// PayloadAttributes represent the attributes required to start assembling a payload -type ForkChoiceState struct { - HeadHash common.Hash `json:"headBlockHash" gencodec:"required"` - SafeBlockHash common.Hash `json:"safeBlockHash" gencodec:"required"` - FinalizedBlockHash common.Hash `json:"finalizedBlockHash" gencodec:"required"` -} - -// PayloadAttributes represent the attributes required to start assembling a payload -type PayloadAttributes struct { - Timestamp hexutil.Uint64 `json:"timestamp" gencodec:"required"` - PrevRandao common.Hash `json:"prevRandao" gencodec:"required"` - SuggestedFeeRecipient common.Address `json:"suggestedFeeRecipient" gencodec:"required"` -} - -// TransitionConfiguration represents the correct configurations of the CL and the EL -type TransitionConfiguration struct { - TerminalTotalDifficulty *hexutil.Big `json:"terminalTotalDifficulty" gencodec:"required"` - TerminalBlockHash common.Hash `json:"terminalBlockHash" gencodec:"required"` - TerminalBlockNumber *hexutil.Big `json:"terminalBlockNumber" gencodec:"required"` -} - -// EngineAPI Beacon chain communication endpoint -type EngineAPI interface { - ForkchoiceUpdatedV1(ctx context.Context, forkChoiceState *ForkChoiceState, payloadAttributes *PayloadAttributes) (map[string]interface{}, error) - NewPayloadV1(context.Context, *ExecutionPayload) (map[string]interface{}, error) - GetPayloadV1(ctx context.Context, payloadID hexutil.Bytes) (*ExecutionPayload, error) - ExchangeTransitionConfigurationV1(ctx context.Context, transitionConfiguration TransitionConfiguration) (TransitionConfiguration, error) -} - -// EngineImpl is implementation of the EngineAPI interface -type EngineImpl struct { - *BaseAPI - db kv.RoDB - api rpchelper.ApiBackend -} - -func convertPayloadStatus(x *remote.EnginePayloadStatus) map[string]interface{} { - json := map[string]interface{}{ - "status": x.Status.String(), - } - if x.LatestValidHash != nil { - json["latestValidHash"] = common.Hash(gointerfaces.ConvertH256ToHash(x.LatestValidHash)) - } - if x.ValidationError != "" { - json["validationError"] = x.ValidationError - } - - return json -} - -func (e *EngineImpl) ForkchoiceUpdatedV1(ctx context.Context, forkChoiceState *ForkChoiceState, payloadAttributes *PayloadAttributes) (map[string]interface{}, error) { - log.Debug("Received ForkchoiceUpdated", "head", forkChoiceState.HeadHash, "safe", forkChoiceState.HeadHash, "finalized", forkChoiceState.FinalizedBlockHash, - "build", payloadAttributes != nil) - - var prepareParameters *remote.EnginePayloadAttributes - if payloadAttributes != nil { - prepareParameters = &remote.EnginePayloadAttributes{ - Timestamp: uint64(payloadAttributes.Timestamp), - PrevRandao: gointerfaces.ConvertHashToH256(payloadAttributes.PrevRandao), - SuggestedFeeRecipient: gointerfaces.ConvertAddressToH160(payloadAttributes.SuggestedFeeRecipient), - } - } - reply, err := e.api.EngineForkchoiceUpdatedV1(ctx, &remote.EngineForkChoiceUpdatedRequest{ - ForkchoiceState: &remote.EngineForkChoiceState{ - HeadBlockHash: gointerfaces.ConvertHashToH256(forkChoiceState.HeadHash), - SafeBlockHash: gointerfaces.ConvertHashToH256(forkChoiceState.SafeBlockHash), - FinalizedBlockHash: gointerfaces.ConvertHashToH256(forkChoiceState.FinalizedBlockHash), - }, - PayloadAttributes: prepareParameters, - }) - if err != nil { - return nil, err - } - - json := map[string]interface{}{ - "payloadStatus": convertPayloadStatus(reply.PayloadStatus), - } - if reply.PayloadId != 0 { - encodedPayloadId := make([]byte, 8) - binary.BigEndian.PutUint64(encodedPayloadId, reply.PayloadId) - json["payloadId"] = hexutil.Bytes(encodedPayloadId) - } - - return json, nil -} - -// NewPayloadV1 processes new payloads (blocks) from the beacon chain. -// See https://github.com/ethereum/execution-apis/blob/main/src/engine/specification.md#engine_newpayloadv1 -func (e *EngineImpl) NewPayloadV1(ctx context.Context, payload *ExecutionPayload) (map[string]interface{}, error) { - log.Debug("Received NewPayload", "height", uint64(payload.BlockNumber), "hash", payload.BlockHash) - - var baseFee *uint256.Int - if payload.BaseFeePerGas != nil { - var overflow bool - baseFee, overflow = uint256.FromBig((*big.Int)(payload.BaseFeePerGas)) - if overflow { - log.Warn("NewPayload BaseFeePerGas overflow") - return nil, fmt.Errorf("invalid request") - } - } - - // Convert slice of hexutil.Bytes to a slice of slice of bytes - transactions := make([][]byte, len(payload.Transactions)) - for i, transaction := range payload.Transactions { - transactions[i] = ([]byte)(transaction) - } - res, err := e.api.EngineNewPayloadV1(ctx, &types2.ExecutionPayload{ - ParentHash: gointerfaces.ConvertHashToH256(payload.ParentHash), - Coinbase: gointerfaces.ConvertAddressToH160(payload.FeeRecipient), - StateRoot: gointerfaces.ConvertHashToH256(payload.StateRoot), - ReceiptRoot: gointerfaces.ConvertHashToH256(payload.ReceiptsRoot), - LogsBloom: gointerfaces.ConvertBytesToH2048(([]byte)(payload.LogsBloom)), - PrevRandao: gointerfaces.ConvertHashToH256(payload.PrevRandao), - BlockNumber: uint64(payload.BlockNumber), - GasLimit: uint64(payload.GasLimit), - GasUsed: uint64(payload.GasUsed), - Timestamp: uint64(payload.Timestamp), - ExtraData: payload.ExtraData, - BaseFeePerGas: gointerfaces.ConvertUint256IntToH256(baseFee), - BlockHash: gointerfaces.ConvertHashToH256(payload.BlockHash), - Transactions: transactions, - }) - if err != nil { - log.Warn("NewPayload", "err", err) - return nil, err - } - - return convertPayloadStatus(res), nil -} - -func (e *EngineImpl) GetPayloadV1(ctx context.Context, payloadID hexutil.Bytes) (*ExecutionPayload, error) { - decodedPayloadId := binary.BigEndian.Uint64(payloadID) - log.Info("Received GetPayload", "payloadId", decodedPayloadId) - - payload, err := e.api.EngineGetPayloadV1(ctx, decodedPayloadId) - if err != nil { - return nil, err - } - var bloom types.Bloom = gointerfaces.ConvertH2048ToBloom(payload.LogsBloom) - - var baseFee *big.Int - if payload.BaseFeePerGas != nil { - baseFee = gointerfaces.ConvertH256ToUint256Int(payload.BaseFeePerGas).ToBig() - } - - // Convert slice of hexutil.Bytes to a slice of slice of bytes - transactions := make([]hexutil.Bytes, len(payload.Transactions)) - for i, transaction := range payload.Transactions { - transactions[i] = transaction - } - return &ExecutionPayload{ - ParentHash: gointerfaces.ConvertH256ToHash(payload.ParentHash), - FeeRecipient: gointerfaces.ConvertH160toAddress(payload.Coinbase), - StateRoot: gointerfaces.ConvertH256ToHash(payload.StateRoot), - ReceiptsRoot: gointerfaces.ConvertH256ToHash(payload.ReceiptRoot), - LogsBloom: bloom[:], - PrevRandao: gointerfaces.ConvertH256ToHash(payload.PrevRandao), - BlockNumber: hexutil.Uint64(payload.BlockNumber), - GasLimit: hexutil.Uint64(payload.GasLimit), - GasUsed: hexutil.Uint64(payload.GasUsed), - Timestamp: hexutil.Uint64(payload.Timestamp), - ExtraData: payload.ExtraData, - BaseFeePerGas: (*hexutil.Big)(baseFee), - BlockHash: gointerfaces.ConvertH256ToHash(payload.BlockHash), - Transactions: transactions, - }, nil -} - -// Receives consensus layer's transition configuration and checks if the execution layer has the correct configuration. -// Can also be used to ping the execution layer (heartbeats). -// See https://github.com/ethereum/execution-apis/blob/v1.0.0-alpha.7/src/engine/specification.md#engine_exchangetransitionconfigurationv1 -func (e *EngineImpl) ExchangeTransitionConfigurationV1(ctx context.Context, beaconConfig TransitionConfiguration) (TransitionConfiguration, error) { - tx, err := e.db.BeginRo(ctx) - - if err != nil { - return TransitionConfiguration{}, err - } - - defer tx.Rollback() - - chainConfig, err := e.BaseAPI.chainConfig(tx) - - if err != nil { - return TransitionConfiguration{}, err - } - - terminalTotalDifficulty := chainConfig.TerminalTotalDifficulty - if terminalTotalDifficulty != nil && terminalTotalDifficulty.Cmp((*big.Int)(beaconConfig.TerminalTotalDifficulty)) != 0 { - return TransitionConfiguration{}, fmt.Errorf("the execution layer has a wrong terminal total difficulty. expected %v, but instead got: %d", beaconConfig.TerminalTotalDifficulty, terminalTotalDifficulty) - } - - if chainConfig.TerminalBlockHash != beaconConfig.TerminalBlockHash { - return TransitionConfiguration{}, fmt.Errorf("the execution layer has a wrong terminal block hash. expected %s, but instead got: %s", beaconConfig.TerminalBlockHash, chainConfig.TerminalBlockHash) - } - - terminalBlockNumber := chainConfig.TerminalBlockNumber - if terminalBlockNumber == nil { - terminalBlockNumber = common.Big0 - } - - if terminalBlockNumber.Cmp((*big.Int)(beaconConfig.TerminalBlockNumber)) != 0 { - return TransitionConfiguration{}, fmt.Errorf("the execution layer has a wrong terminal block number. expected %v, but instead got: %d", beaconConfig.TerminalBlockNumber, terminalBlockNumber) - } - - return TransitionConfiguration{ - TerminalTotalDifficulty: (*hexutil.Big)(terminalTotalDifficulty), - TerminalBlockHash: chainConfig.TerminalBlockHash, - TerminalBlockNumber: (*hexutil.Big)(terminalBlockNumber), - }, nil -} - -// NewEngineAPI returns EngineImpl instance -func NewEngineAPI(base *BaseAPI, db kv.RoDB, api rpchelper.ApiBackend) *EngineImpl { - return &EngineImpl{ - BaseAPI: base, - db: db, - api: api, - } -} diff --git a/cmd/rpcdaemon22/commands/engine_api_test.go b/cmd/rpcdaemon22/commands/engine_api_test.go deleted file mode 100644 index a209b8469b6..00000000000 --- a/cmd/rpcdaemon22/commands/engine_api_test.go +++ /dev/null @@ -1,18 +0,0 @@ -package commands - -import ( - "testing" - - "github.com/ledgerwatch/erigon-lib/gointerfaces" - "github.com/ledgerwatch/erigon-lib/gointerfaces/remote" - "github.com/ledgerwatch/erigon/common" - "github.com/stretchr/testify/assert" -) - -// Test case for https://github.com/ethereum/execution-apis/pull/217 responses -func TestZeroLatestValidHash(t *testing.T) { - payloadStatus := remote.EnginePayloadStatus{Status: remote.EngineStatus_INVALID, LatestValidHash: gointerfaces.ConvertHashToH256(common.Hash{})} - json := convertPayloadStatus(&payloadStatus) - assert.Equal(t, "INVALID", json["status"]) - assert.Equal(t, common.Hash{}, json["latestValidHash"]) -} diff --git a/cmd/rpcdaemon22/commands/erigon_api.go b/cmd/rpcdaemon22/commands/erigon_api.go deleted file mode 100644 index f976cf31f05..00000000000 --- a/cmd/rpcdaemon22/commands/erigon_api.go +++ /dev/null @@ -1,52 +0,0 @@ -package commands - -import ( - "context" - - "github.com/ledgerwatch/erigon-lib/kv" - "github.com/ledgerwatch/erigon/common" - "github.com/ledgerwatch/erigon/core/types" - "github.com/ledgerwatch/erigon/p2p" - "github.com/ledgerwatch/erigon/rpc" - "github.com/ledgerwatch/erigon/turbo/rpchelper" -) - -// ErigonAPI Erigon specific routines -type ErigonAPI interface { - // System related (see ./erigon_system.go) - Forks(ctx context.Context) (Forks, error) - - // Blocks related (see ./erigon_blocks.go) - GetHeaderByNumber(ctx context.Context, number rpc.BlockNumber) (*types.Header, error) - GetHeaderByHash(_ context.Context, hash common.Hash) (*types.Header, error) - GetBlockByTimestamp(ctx context.Context, timeStamp rpc.Timestamp, fullTx bool) (map[string]interface{}, error) - - // Receipt related (see ./erigon_receipts.go) - GetLogsByHash(ctx context.Context, hash common.Hash) ([][]*types.Log, error) - //GetLogsByNumber(ctx context.Context, number rpc.BlockNumber) ([][]*types.Log, error) - - // WatchTheBurn / reward related (see ./erigon_issuance.go) - WatchTheBurn(ctx context.Context, blockNr rpc.BlockNumber) (Issuance, error) - - // CumulativeChainTraffic / related to chain traffic (see ./erigon_cumulative_index.go) - CumulativeChainTraffic(ctx context.Context, blockNr rpc.BlockNumber) (ChainTraffic, error) - - // NodeInfo returns a collection of metadata known about the host. - NodeInfo(ctx context.Context) ([]p2p.NodeInfo, error) -} - -// ErigonImpl is implementation of the ErigonAPI interface -type ErigonImpl struct { - *BaseAPI - db kv.RoDB - ethBackend rpchelper.ApiBackend -} - -// NewErigonAPI returns ErigonImpl instance -func NewErigonAPI(base *BaseAPI, db kv.RoDB, eth rpchelper.ApiBackend) *ErigonImpl { - return &ErigonImpl{ - BaseAPI: base, - db: db, - ethBackend: eth, - } -} diff --git a/cmd/rpcdaemon22/commands/erigon_block.go b/cmd/rpcdaemon22/commands/erigon_block.go deleted file mode 100644 index 3d68de3a370..00000000000 --- a/cmd/rpcdaemon22/commands/erigon_block.go +++ /dev/null @@ -1,168 +0,0 @@ -package commands - -import ( - "context" - "errors" - "fmt" - "sort" - - "github.com/ledgerwatch/erigon-lib/kv" - "github.com/ledgerwatch/erigon/common" - "github.com/ledgerwatch/erigon/core/rawdb" - "github.com/ledgerwatch/erigon/core/types" - "github.com/ledgerwatch/erigon/internal/ethapi" - "github.com/ledgerwatch/erigon/rpc" -) - -// GetHeaderByNumber implements erigon_getHeaderByNumber. Returns a block's header given a block number ignoring the block's transaction and uncle list (may be faster). -func (api *ErigonImpl) GetHeaderByNumber(ctx context.Context, blockNumber rpc.BlockNumber) (*types.Header, error) { - // Pending block is only known by the miner - if blockNumber == rpc.PendingBlockNumber { - block := api.pendingBlock() - if block == nil { - return nil, nil - } - return block.Header(), nil - } - - tx, err := api.db.BeginRo(ctx) - if err != nil { - return nil, err - } - defer tx.Rollback() - - blockNum, err := getBlockNumber(blockNumber, tx) - if err != nil { - return nil, err - } - - header, err := api._blockReader.HeaderByNumber(ctx, tx, blockNum) - if err != nil { - return nil, err - } - if header == nil { - return nil, fmt.Errorf("block header not found: %d", blockNum) - } - - return header, nil -} - -// GetHeaderByHash implements erigon_getHeaderByHash. Returns a block's header given a block's hash. -func (api *ErigonImpl) GetHeaderByHash(ctx context.Context, hash common.Hash) (*types.Header, error) { - tx, err := api.db.BeginRo(ctx) - if err != nil { - return nil, err - } - defer tx.Rollback() - - header, err := api._blockReader.HeaderByHash(ctx, tx, hash) - if err != nil { - return nil, err - } - if header == nil { - return nil, fmt.Errorf("block header not found: %s", hash.String()) - } - - return header, nil -} - -func (api *ErigonImpl) GetBlockByTimestamp(ctx context.Context, timeStamp rpc.Timestamp, fullTx bool) (map[string]interface{}, error) { - tx, err := api.db.BeginRo(ctx) - if err != nil { - return nil, err - } - defer tx.Rollback() - - uintTimestamp := timeStamp.TurnIntoUint64() - - currentHeader := rawdb.ReadCurrentHeader(tx) - currenttHeaderTime := currentHeader.Time - highestNumber := currentHeader.Number.Uint64() - - firstHeader, err := api._blockReader.HeaderByNumber(ctx, tx, 0) - if err != nil { - return nil, err - } - - if firstHeader == nil { - return nil, errors.New("genesis header not found") - } - - firstHeaderTime := firstHeader.Time - - if currenttHeaderTime <= uintTimestamp { - blockResponse, err := buildBlockResponse(tx, highestNumber, fullTx) - if err != nil { - return nil, err - } - - return blockResponse, nil - } - - if firstHeaderTime >= uintTimestamp { - blockResponse, err := buildBlockResponse(tx, 0, fullTx) - if err != nil { - return nil, err - } - - return blockResponse, nil - } - - blockNum := sort.Search(int(currentHeader.Number.Uint64()), func(blockNum int) bool { - currentHeader, err := api._blockReader.HeaderByNumber(ctx, tx, uint64(blockNum)) - if err != nil { - return false - } - - if currentHeader == nil { - return false - } - - return currentHeader.Time >= uintTimestamp - }) - - resultingHeader, err := api._blockReader.HeaderByNumber(ctx, tx, uint64(blockNum)) - if err != nil { - return nil, err - } - - if resultingHeader == nil { - return nil, fmt.Errorf("no header found with block num %d", blockNum) - } - - if resultingHeader.Time > uintTimestamp { - response, err := buildBlockResponse(tx, uint64(blockNum)-1, fullTx) - if err != nil { - return nil, err - } - return response, nil - } - - response, err := buildBlockResponse(tx, uint64(blockNum), fullTx) - if err != nil { - return nil, err - } - - return response, nil -} - -func buildBlockResponse(db kv.Tx, blockNum uint64, fullTx bool) (map[string]interface{}, error) { - block, err := rawdb.ReadBlockByNumber(db, blockNum) - if err != nil { - return nil, err - } - - if block == nil { - return nil, nil - } - - response, err := ethapi.RPCMarshalBlock(block, true, fullTx) - - if err == nil && rpc.BlockNumber(block.NumberU64()) == rpc.PendingBlockNumber { - // Pending blocks need to nil out a few fields - for _, field := range []string{"hash", "nonce", "miner"} { - response[field] = nil - } - } - return response, err -} diff --git a/cmd/rpcdaemon22/commands/erigon_cumulative_chain_traffic.go b/cmd/rpcdaemon22/commands/erigon_cumulative_chain_traffic.go deleted file mode 100644 index 640ececce78..00000000000 --- a/cmd/rpcdaemon22/commands/erigon_cumulative_chain_traffic.go +++ /dev/null @@ -1,41 +0,0 @@ -package commands - -import ( - "context" - - "github.com/ledgerwatch/erigon/common/hexutil" - "github.com/ledgerwatch/erigon/core/rawdb" - "github.com/ledgerwatch/erigon/rpc" -) - -// CumulativeGasIndex implements erigon_cumulativeChainTraffic. Returns how much traffic there has been at the specified block number. -// Aka. amount of gas used so far + total transactions issued to the network -func (api *ErigonImpl) CumulativeChainTraffic(ctx context.Context, blockNr rpc.BlockNumber) (ChainTraffic, error) { - tx, err := api.db.BeginRo(ctx) - if err != nil { - return ChainTraffic{}, err - } - defer tx.Rollback() - - blockNumber := uint64(blockNr) - cumulativeGasUsed, err := rawdb.ReadCumulativeGasUsed(tx, blockNumber) - if err != nil { - return ChainTraffic{}, err - } - - _, baseTxId, txCount, err := rawdb.ReadBodyByNumber(tx, blockNumber) - if err != nil { - return ChainTraffic{}, err - } - - cumulativeTransactionCount := baseTxId + uint64(txCount) - return ChainTraffic{ - CumulativeGasUsed: (*hexutil.Big)(cumulativeGasUsed), - CumulativeTransactionsCount: (*hexutil.Uint64)(&cumulativeTransactionCount), - }, nil -} - -type ChainTraffic struct { - CumulativeGasUsed *hexutil.Big `json:"cumulativeGasUsed"` - CumulativeTransactionsCount *hexutil.Uint64 `json:"cumulativeTransactionsCount"` -} diff --git a/cmd/rpcdaemon22/commands/erigon_issuance.go b/cmd/rpcdaemon22/commands/erigon_issuance.go deleted file mode 100644 index 7aef3595e2a..00000000000 --- a/cmd/rpcdaemon22/commands/erigon_issuance.go +++ /dev/null @@ -1,133 +0,0 @@ -package commands - -import ( - "context" - "fmt" - "math/big" - - "github.com/holiman/uint256" - "github.com/ledgerwatch/erigon/common/hexutil" - "github.com/ledgerwatch/erigon/consensus/ethash" - "github.com/ledgerwatch/erigon/core/rawdb" - "github.com/ledgerwatch/erigon/rpc" -) - -// BlockReward returns the block reward for this block -// func (api *ErigonImpl) BlockReward(ctx context.Context, blockNr rpc.BlockNumber) (Issuance, error) { -// tx, err := api.db.Begin(ctx, ethdb.RO) -// if err != nil { -// return Issuance{}, err -// } -// defer tx.Rollback() -// -// return api.rewardCalc(tx, blockNr, "block") // nolint goconst -//} - -// UncleReward returns the uncle reward for this block -// func (api *ErigonImpl) UncleReward(ctx context.Context, blockNr rpc.BlockNumber) (Issuance, error) { -// tx, err := api.db.Begin(ctx, ethdb.RO) -// if err != nil { -// return Issuance{}, err -// } -// defer tx.Rollback() -// -// return api.rewardCalc(tx, blockNr, "uncle") // nolint goconst -//} - -// Issuance implements erigon_issuance. Returns the total issuance (block reward plus uncle reward) for the given block. -func (api *ErigonImpl) WatchTheBurn(ctx context.Context, blockNr rpc.BlockNumber) (Issuance, error) { - tx, err := api.db.BeginRo(ctx) - if err != nil { - return Issuance{}, err - } - defer tx.Rollback() - - chainConfig, err := api.chainConfig(tx) - if err != nil { - return Issuance{}, err - } - if chainConfig.Ethash == nil { - // Clique for example has no issuance - return Issuance{}, nil - } - hash, err := rawdb.ReadCanonicalHash(tx, uint64(blockNr)) - if err != nil { - return Issuance{}, err - } - header := rawdb.ReadHeader(tx, hash, uint64(blockNr)) - if header == nil { - return Issuance{}, fmt.Errorf("could not find block header") - } - - body := rawdb.ReadCanonicalBodyWithTransactions(tx, hash, uint64(blockNr)) - - if body == nil { - return Issuance{}, fmt.Errorf("could not find block body") - } - - minerReward, uncleRewards := ethash.AccumulateRewards(chainConfig, header, body.Uncles) - issuance := minerReward - for _, r := range uncleRewards { - p := r // avoids warning? - issuance.Add(&issuance, &p) - } - - var ret Issuance - ret.BlockReward = (*hexutil.Big)(minerReward.ToBig()) - ret.Issuance = (*hexutil.Big)(issuance.ToBig()) - issuance.Sub(&issuance, &minerReward) - ret.UncleReward = (*hexutil.Big)(issuance.ToBig()) - // Compute how much was burnt - if header.BaseFee != nil { - burnt := header.BaseFee - burnt.Mul(burnt, big.NewInt(int64(header.GasUsed))) - ret.Burnt = (*hexutil.Big)(burnt) - } else { - ret.Burnt = (*hexutil.Big)(big.NewInt(0)) - } - // Compute totalIssued, totalBurnt and the supply of eth - totalIssued, err := rawdb.ReadTotalIssued(tx, uint64(blockNr)) - if err != nil { - return Issuance{}, err - } - totalBurnt, err := rawdb.ReadTotalBurnt(tx, uint64(blockNr)) - if err != nil { - return Issuance{}, err - } - - ret.TotalIssued = (*hexutil.Big)(totalIssued) - ret.TotalBurnt = (*hexutil.Big)(totalBurnt) - - // Compute tips - tips := big.NewInt(0) - - if header.BaseFee != nil { - receipts, err := rawdb.ReadReceiptsByHash(tx, hash) - if err != nil { - return Issuance{}, err - } - - baseFee, overflow := uint256.FromBig(header.BaseFee) - if overflow { - return Issuance{}, fmt.Errorf("baseFee overflow") - } - - for i, transaction := range body.Transactions { - tip := transaction.GetEffectiveGasTip(baseFee).ToBig() - tips.Add(tips, tip.Mul(tip, big.NewInt(int64(receipts[i].GasUsed)))) - } - } - ret.Tips = (*hexutil.Big)(tips) - return ret, nil -} - -// Issuance structure to return information about issuance -type Issuance struct { - BlockReward *hexutil.Big `json:"blockReward"` // Block reward for given block - UncleReward *hexutil.Big `json:"uncleReward"` // Uncle reward for gived block - Issuance *hexutil.Big `json:"issuance"` // Total amount of wei created in the block - Burnt *hexutil.Big `json:"burnt"` // Total amount of wei burned in the block - TotalIssued *hexutil.Big `json:"totalIssued"` // Total amount of wei created in total so far - TotalBurnt *hexutil.Big `json:"totalBurnt"` // Total amount of wei burnt so far - Tips *hexutil.Big `json:"tips"` // Total Tips generated by the block -} diff --git a/cmd/rpcdaemon22/commands/erigon_nodeInfo.go b/cmd/rpcdaemon22/commands/erigon_nodeInfo.go deleted file mode 100644 index 68ef98d9299..00000000000 --- a/cmd/rpcdaemon22/commands/erigon_nodeInfo.go +++ /dev/null @@ -1,16 +0,0 @@ -package commands - -import ( - "context" - - "github.com/ledgerwatch/erigon/p2p" -) - -const ( - // allNodesInfo used in NodeInfo request to receive meta data from all running sentries. - allNodesInfo = 0 -) - -func (api *ErigonImpl) NodeInfo(ctx context.Context) ([]p2p.NodeInfo, error) { - return api.ethBackend.NodeInfo(ctx, allNodesInfo) -} diff --git a/cmd/rpcdaemon22/commands/erigon_receipts.go b/cmd/rpcdaemon22/commands/erigon_receipts.go deleted file mode 100644 index cc9a6bae69f..00000000000 --- a/cmd/rpcdaemon22/commands/erigon_receipts.go +++ /dev/null @@ -1,66 +0,0 @@ -package commands - -import ( - "context" - "fmt" - - "github.com/ledgerwatch/erigon/common" - "github.com/ledgerwatch/erigon/core/types" -) - -// GetLogsByHash implements erigon_getLogsByHash. Returns an array of arrays of logs generated by the transactions in the block given by the block's hash. -func (api *ErigonImpl) GetLogsByHash(ctx context.Context, hash common.Hash) ([][]*types.Log, error) { - tx, err := api.db.BeginRo(ctx) - if err != nil { - return nil, err - } - defer tx.Rollback() - - chainConfig, err := api.chainConfig(tx) - if err != nil { - return nil, err - } - - block, err := api.blockByHashWithSenders(tx, hash) - if err != nil { - return nil, err - } - if block == nil { - return nil, nil - } - receipts, err := api.getReceipts(ctx, tx, chainConfig, block, block.Body().SendersFromTxs()) - if err != nil { - return nil, fmt.Errorf("getReceipts error: %w", err) - } - - logs := make([][]*types.Log, len(receipts)) - for i, receipt := range receipts { - logs[i] = receipt.Logs - } - return logs, nil -} - -// GetLogsByNumber implements erigon_getLogsByHash. Returns all the logs that appear in a block given the block's hash. -// func (api *ErigonImpl) GetLogsByNumber(ctx context.Context, number rpc.BlockNumber) ([][]*types.Log, error) { -// tx, err := api.db.Begin(ctx, false) -// if err != nil { -// return nil, err -// } -// defer tx.Rollback() - -// number := rawdb.ReadHeaderNumber(tx, hash) -// if number == nil { -// return nil, fmt.Errorf("block not found: %x", hash) -// } - -// receipts, err := getReceipts(ctx, tx, *number, hash) -// if err != nil { -// return nil, fmt.Errorf("getReceipts error: %w", err) -// } - -// logs := make([][]*types.Log, len(receipts)) -// for i, receipt := range receipts { -// logs[i] = receipt.Logs -// } -// return logs, nil -// } diff --git a/cmd/rpcdaemon22/commands/erigon_system.go b/cmd/rpcdaemon22/commands/erigon_system.go deleted file mode 100644 index 67f4190fc3d..00000000000 --- a/cmd/rpcdaemon22/commands/erigon_system.go +++ /dev/null @@ -1,31 +0,0 @@ -package commands - -import ( - "context" - - "github.com/ledgerwatch/erigon/common" - "github.com/ledgerwatch/erigon/core/forkid" -) - -// Forks is a data type to record a list of forks passed by this node -type Forks struct { - GenesisHash common.Hash `json:"genesis"` - Forks []uint64 `json:"forks"` -} - -// Forks implements erigon_forks. Returns the genesis block hash and a sorted list of all forks block numbers -func (api *ErigonImpl) Forks(ctx context.Context) (Forks, error) { - tx, err := api.db.BeginRo(ctx) - if err != nil { - return Forks{}, err - } - defer tx.Rollback() - - chainConfig, genesis, err := api.chainConfigWithGenesis(tx) - if err != nil { - return Forks{}, err - } - forksBlocks := forkid.GatherForks(chainConfig) - - return Forks{genesis.Hash(), forksBlocks}, nil -} diff --git a/cmd/rpcdaemon22/commands/error_messages.go b/cmd/rpcdaemon22/commands/error_messages.go deleted file mode 100644 index b593ea59516..00000000000 --- a/cmd/rpcdaemon22/commands/error_messages.go +++ /dev/null @@ -1,10 +0,0 @@ -package commands - -// NotImplemented is the URI prefix for smartcard wallets. -const NotImplemented = "the method is currently not implemented: %s" - -// NotAvailableChainData x -const NotAvailableChainData = "the function %s is not available, please use --private.api.addr option instead of --datadir option" - -// NotAvailableDeprecated x -const NotAvailableDeprecated = "the method has been deprecated: %s" diff --git a/cmd/rpcdaemon22/commands/eth_accounts.go b/cmd/rpcdaemon22/commands/eth_accounts.go deleted file mode 100644 index c55869d3999..00000000000 --- a/cmd/rpcdaemon22/commands/eth_accounts.go +++ /dev/null @@ -1,121 +0,0 @@ -package commands - -import ( - "context" - "fmt" - "math/big" - - "github.com/ledgerwatch/erigon-lib/gointerfaces" - "github.com/ledgerwatch/erigon/turbo/rpchelper" - "google.golang.org/grpc" - - txpool_proto "github.com/ledgerwatch/erigon-lib/gointerfaces/txpool" - "github.com/ledgerwatch/erigon/common" - "github.com/ledgerwatch/erigon/common/hexutil" - "github.com/ledgerwatch/erigon/rpc" -) - -// GetBalance implements eth_getBalance. Returns the balance of an account for a given address. -func (api *APIImpl) GetBalance(ctx context.Context, address common.Address, blockNrOrHash rpc.BlockNumberOrHash) (*hexutil.Big, error) { - tx, err1 := api.db.BeginRo(ctx) - if err1 != nil { - return nil, fmt.Errorf("getBalance cannot open tx: %w", err1) - } - defer tx.Rollback() - reader, err := rpchelper.CreateStateReader(ctx, tx, blockNrOrHash, api.filters, api.stateCache) - if err != nil { - return nil, err - } - - acc, err := reader.ReadAccountData(address) - if err != nil { - return nil, fmt.Errorf("cant get a balance for account %x: %w", address.String(), err) - } - if acc == nil { - // Special case - non-existent account is assumed to have zero balance - return (*hexutil.Big)(big.NewInt(0)), nil - } - - return (*hexutil.Big)(acc.Balance.ToBig()), nil -} - -// GetTransactionCount implements eth_getTransactionCount. Returns the number of transactions sent from an address (the nonce). -func (api *APIImpl) GetTransactionCount(ctx context.Context, address common.Address, blockNrOrHash rpc.BlockNumberOrHash) (*hexutil.Uint64, error) { - if blockNrOrHash.BlockNumber != nil && *blockNrOrHash.BlockNumber == rpc.PendingBlockNumber { - reply, err := api.txPool.Nonce(ctx, &txpool_proto.NonceRequest{ - Address: gointerfaces.ConvertAddressToH160(address), - }, &grpc.EmptyCallOption{}) - if err != nil { - return nil, err - } - if reply.Found { - reply.Nonce++ - return (*hexutil.Uint64)(&reply.Nonce), nil - } - } - tx, err1 := api.db.BeginRo(ctx) - if err1 != nil { - return nil, fmt.Errorf("getTransactionCount cannot open tx: %w", err1) - } - defer tx.Rollback() - reader, err := rpchelper.CreateStateReader(ctx, tx, blockNrOrHash, api.filters, api.stateCache) - if err != nil { - return nil, err - } - nonce := hexutil.Uint64(0) - acc, err := reader.ReadAccountData(address) - if acc == nil || err != nil { - return &nonce, err - } - return (*hexutil.Uint64)(&acc.Nonce), err -} - -// GetCode implements eth_getCode. Returns the byte code at a given address (if it's a smart contract). -func (api *APIImpl) GetCode(ctx context.Context, address common.Address, blockNrOrHash rpc.BlockNumberOrHash) (hexutil.Bytes, error) { - tx, err1 := api.db.BeginRo(ctx) - if err1 != nil { - return nil, fmt.Errorf("getCode cannot open tx: %w", err1) - } - defer tx.Rollback() - reader, err := rpchelper.CreateStateReader(ctx, tx, blockNrOrHash, api.filters, api.stateCache) - if err != nil { - return nil, err - } - - acc, err := reader.ReadAccountData(address) - if acc == nil || err != nil { - return hexutil.Bytes(""), nil - } - res, _ := reader.ReadAccountCode(address, acc.Incarnation, acc.CodeHash) - if res == nil { - return hexutil.Bytes(""), nil - } - return res, nil -} - -// GetStorageAt implements eth_getStorageAt. Returns the value from a storage position at a given address. -func (api *APIImpl) GetStorageAt(ctx context.Context, address common.Address, index string, blockNrOrHash rpc.BlockNumberOrHash) (string, error) { - var empty []byte - - tx, err1 := api.db.BeginRo(ctx) - if err1 != nil { - return hexutil.Encode(common.LeftPadBytes(empty, 32)), err1 - } - defer tx.Rollback() - - reader, err := rpchelper.CreateStateReader(ctx, tx, blockNrOrHash, api.filters, api.stateCache) - if err != nil { - return hexutil.Encode(common.LeftPadBytes(empty, 32)), err - } - acc, err := reader.ReadAccountData(address) - if acc == nil || err != nil { - return hexutil.Encode(common.LeftPadBytes(empty, 32)), err - } - - location := common.HexToHash(index) - res, err := reader.ReadAccountStorage(address, acc.Incarnation, &location) - if err != nil { - res = empty - } - return hexutil.Encode(common.LeftPadBytes(res, 32)), err -} diff --git a/cmd/rpcdaemon22/commands/eth_api.go b/cmd/rpcdaemon22/commands/eth_api.go deleted file mode 100644 index 0ad9730cb4f..00000000000 --- a/cmd/rpcdaemon22/commands/eth_api.go +++ /dev/null @@ -1,359 +0,0 @@ -package commands - -import ( - "bytes" - "context" - "math/big" - "sync" - - lru "github.com/hashicorp/golang-lru" - "github.com/holiman/uint256" - "github.com/ledgerwatch/erigon-lib/gointerfaces/txpool" - "github.com/ledgerwatch/erigon-lib/kv" - "github.com/ledgerwatch/erigon-lib/kv/kvcache" - libstate "github.com/ledgerwatch/erigon-lib/state" - "github.com/ledgerwatch/erigon/common" - "github.com/ledgerwatch/erigon/common/hexutil" - "github.com/ledgerwatch/erigon/common/math" - "github.com/ledgerwatch/erigon/consensus/misc" - "github.com/ledgerwatch/erigon/core/rawdb" - "github.com/ledgerwatch/erigon/core/types" - ethFilters "github.com/ledgerwatch/erigon/eth/filters" - "github.com/ledgerwatch/erigon/internal/ethapi" - "github.com/ledgerwatch/erigon/params" - "github.com/ledgerwatch/erigon/rpc" - "github.com/ledgerwatch/erigon/turbo/rpchelper" - "github.com/ledgerwatch/erigon/turbo/services" -) - -// EthAPI is a collection of functions that are exposed in the -type EthAPI interface { - // Block related (proposed file: ./eth_blocks.go) - GetBlockByNumber(ctx context.Context, number rpc.BlockNumber, fullTx bool) (map[string]interface{}, error) - GetBlockByHash(ctx context.Context, hash rpc.BlockNumberOrHash, fullTx bool) (map[string]interface{}, error) - GetBlockTransactionCountByNumber(ctx context.Context, blockNr rpc.BlockNumber) (*hexutil.Uint, error) - GetBlockTransactionCountByHash(ctx context.Context, blockHash common.Hash) (*hexutil.Uint, error) - - // Transaction related (see ./eth_txs.go) - GetTransactionByHash(ctx context.Context, hash common.Hash) (*RPCTransaction, error) - GetTransactionByBlockHashAndIndex(ctx context.Context, blockHash common.Hash, txIndex hexutil.Uint64) (*RPCTransaction, error) - GetTransactionByBlockNumberAndIndex(ctx context.Context, blockNr rpc.BlockNumber, txIndex hexutil.Uint) (*RPCTransaction, error) - GetRawTransactionByBlockNumberAndIndex(ctx context.Context, blockNr rpc.BlockNumber, index hexutil.Uint) (hexutil.Bytes, error) - GetRawTransactionByBlockHashAndIndex(ctx context.Context, blockHash common.Hash, index hexutil.Uint) (hexutil.Bytes, error) - GetRawTransactionByHash(ctx context.Context, hash common.Hash) (hexutil.Bytes, error) - - // Receipt related (see ./eth_receipts.go) - GetTransactionReceipt(ctx context.Context, hash common.Hash) (map[string]interface{}, error) - GetLogs(ctx context.Context, crit ethFilters.FilterCriteria) ([]*types.Log, error) - GetBlockReceipts(ctx context.Context, number rpc.BlockNumber) ([]map[string]interface{}, error) - - // Uncle related (see ./eth_uncles.go) - GetUncleByBlockNumberAndIndex(ctx context.Context, blockNr rpc.BlockNumber, index hexutil.Uint) (map[string]interface{}, error) - GetUncleByBlockHashAndIndex(ctx context.Context, hash common.Hash, index hexutil.Uint) (map[string]interface{}, error) - GetUncleCountByBlockNumber(ctx context.Context, number rpc.BlockNumber) (*hexutil.Uint, error) - GetUncleCountByBlockHash(ctx context.Context, hash common.Hash) (*hexutil.Uint, error) - - // Filter related (see ./eth_filters.go) - NewPendingTransactionFilter(_ context.Context) (string, error) - NewBlockFilter(_ context.Context) (string, error) - NewFilter(_ context.Context, crit ethFilters.FilterCriteria) (string, error) - UninstallFilter(_ context.Context, index string) (bool, error) - GetFilterChanges(_ context.Context, index string) ([]interface{}, error) - - // Account related (see ./eth_accounts.go) - Accounts(ctx context.Context) ([]common.Address, error) - GetBalance(ctx context.Context, address common.Address, blockNrOrHash rpc.BlockNumberOrHash) (*hexutil.Big, error) - GetTransactionCount(ctx context.Context, address common.Address, blockNrOrHash rpc.BlockNumberOrHash) (*hexutil.Uint64, error) - GetStorageAt(ctx context.Context, address common.Address, index string, blockNrOrHash rpc.BlockNumberOrHash) (string, error) - GetCode(ctx context.Context, address common.Address, blockNrOrHash rpc.BlockNumberOrHash) (hexutil.Bytes, error) - - // System related (see ./eth_system.go) - BlockNumber(ctx context.Context) (hexutil.Uint64, error) - Syncing(ctx context.Context) (interface{}, error) - ChainId(ctx context.Context) (hexutil.Uint64, error) /* called eth_protocolVersion elsewhere */ - ProtocolVersion(_ context.Context) (hexutil.Uint, error) - GasPrice(_ context.Context) (*hexutil.Big, error) - - // Sending related (see ./eth_call.go) - Call(ctx context.Context, args ethapi.CallArgs, blockNrOrHash rpc.BlockNumberOrHash, overrides *ethapi.StateOverrides) (hexutil.Bytes, error) - EstimateGas(ctx context.Context, argsOrNil *ethapi.CallArgs, blockNrOrHash *rpc.BlockNumberOrHash) (hexutil.Uint64, error) - SendRawTransaction(ctx context.Context, encodedTx hexutil.Bytes) (common.Hash, error) - SendTransaction(_ context.Context, txObject interface{}) (common.Hash, error) - Sign(ctx context.Context, _ common.Address, _ hexutil.Bytes) (hexutil.Bytes, error) - SignTransaction(_ context.Context, txObject interface{}) (common.Hash, error) - GetProof(ctx context.Context, address common.Address, storageKeys []string, blockNr rpc.BlockNumber) (*interface{}, error) - CreateAccessList(ctx context.Context, args ethapi.CallArgs, blockNrOrHash *rpc.BlockNumberOrHash, optimizeGas *bool) (*accessListResult, error) - - // Mining related (see ./eth_mining.go) - Coinbase(ctx context.Context) (common.Address, error) - Hashrate(ctx context.Context) (uint64, error) - Mining(ctx context.Context) (bool, error) - GetWork(ctx context.Context) ([4]string, error) - SubmitWork(ctx context.Context, nonce types.BlockNonce, powHash, digest common.Hash) (bool, error) - SubmitHashrate(ctx context.Context, hashRate hexutil.Uint64, id common.Hash) (bool, error) -} - -type BaseAPI struct { - stateCache kvcache.Cache // thread-safe - blocksLRU *lru.Cache // thread-safe - filters *rpchelper.Filters - _chainConfig *params.ChainConfig - _genesis *types.Block - _genesisLock sync.RWMutex - - _blockReader services.FullBlockReader - _txnReader services.TxnReader - _agg *libstate.Aggregator - _txNums []uint64 - TevmEnabled bool // experiment -} - -func NewBaseApi(f *rpchelper.Filters, stateCache kvcache.Cache, blockReader services.FullBlockReader, agg *libstate.Aggregator, txNums []uint64, singleNodeMode bool) *BaseAPI { - blocksLRUSize := 128 // ~32Mb - if !singleNodeMode { - blocksLRUSize = 512 - } - blocksLRU, err := lru.New(blocksLRUSize) - if err != nil { - panic(err) - } - - return &BaseAPI{filters: f, stateCache: stateCache, blocksLRU: blocksLRU, _blockReader: blockReader, _txnReader: blockReader, _agg: agg, _txNums: txNums} -} - -func (api *BaseAPI) chainConfig(tx kv.Tx) (*params.ChainConfig, error) { - cfg, _, err := api.chainConfigWithGenesis(tx) - return cfg, err -} - -func (api *BaseAPI) EnableTevmExperiment() { api.TevmEnabled = true } - -// nolint:unused -func (api *BaseAPI) genesis(tx kv.Tx) (*types.Block, error) { - _, genesis, err := api.chainConfigWithGenesis(tx) - return genesis, err -} - -func (api *BaseAPI) txnLookup(ctx context.Context, tx kv.Tx, txnHash common.Hash) (uint64, bool, error) { - return api._txnReader.TxnLookup(ctx, tx, txnHash) -} - -func (api *BaseAPI) blockByNumberWithSenders(tx kv.Tx, number uint64) (*types.Block, error) { - hash, hashErr := rawdb.ReadCanonicalHash(tx, number) - if hashErr != nil { - return nil, hashErr - } - return api.blockWithSenders(tx, hash, number) -} -func (api *BaseAPI) blockByHashWithSenders(tx kv.Tx, hash common.Hash) (*types.Block, error) { - if api.blocksLRU != nil { - if it, ok := api.blocksLRU.Get(hash); ok && it != nil { - return it.(*types.Block), nil - } - } - number := rawdb.ReadHeaderNumber(tx, hash) - if number == nil { - return nil, nil - } - return api.blockWithSenders(tx, hash, *number) -} -func (api *BaseAPI) blockWithSenders(tx kv.Tx, hash common.Hash, number uint64) (*types.Block, error) { - if api.blocksLRU != nil { - if it, ok := api.blocksLRU.Get(hash); ok && it != nil { - return it.(*types.Block), nil - } - } - block, _, err := api._blockReader.BlockWithSenders(context.Background(), tx, hash, number) - if err != nil { - return nil, err - } - if block == nil { // don't save nil's to cache - return nil, nil - } - // don't save empty blocks to cache, because in Erigon - // if block become non-canonical - we remove it's transactions, but block can become canonical in future - if block.Transactions().Len() == 0 { - return block, nil - } - if api.blocksLRU != nil { - // calc fields before put to cache - for _, txn := range block.Transactions() { - txn.Hash() - } - block.Hash() - api.blocksLRU.Add(hash, block) - } - return block, nil -} - -func (api *BaseAPI) chainConfigWithGenesis(tx kv.Tx) (*params.ChainConfig, *types.Block, error) { - api._genesisLock.RLock() - cc, genesisBlock := api._chainConfig, api._genesis - api._genesisLock.RUnlock() - - if cc != nil { - return cc, genesisBlock, nil - } - genesisBlock, err := rawdb.ReadBlockByNumber(tx, 0) - if err != nil { - return nil, nil, err - } - cc, err = rawdb.ReadChainConfig(tx, genesisBlock.Hash()) - if err != nil { - return nil, nil, err - } - if cc != nil && genesisBlock != nil { - api._genesisLock.Lock() - api._genesis = genesisBlock - api._chainConfig = cc - api._genesisLock.Unlock() - } - return cc, genesisBlock, nil -} - -func (api *BaseAPI) pendingBlock() *types.Block { - return api.filters.LastPendingBlock() -} - -func (api *BaseAPI) blockByRPCNumber(number rpc.BlockNumber, tx kv.Tx) (*types.Block, error) { - if number == rpc.PendingBlockNumber { - return api.pendingBlock(), nil - } - - n, err := getBlockNumber(number, tx) - if err != nil { - return nil, err - } - - block, err := api.blockByNumberWithSenders(tx, n) - return block, err -} - -// APIImpl is implementation of the EthAPI interface based on remote Db access -type APIImpl struct { - *BaseAPI - ethBackend rpchelper.ApiBackend - txPool txpool.TxpoolClient - mining txpool.MiningClient - db kv.RoDB - GasCap uint64 -} - -// NewEthAPI returns APIImpl instance -func NewEthAPI(base *BaseAPI, db kv.RoDB, eth rpchelper.ApiBackend, txPool txpool.TxpoolClient, mining txpool.MiningClient, gascap uint64) *APIImpl { - if gascap == 0 { - gascap = uint64(math.MaxUint64 / 2) - } - - return &APIImpl{ - BaseAPI: base, - db: db, - ethBackend: eth, - txPool: txPool, - mining: mining, - GasCap: gascap, - } -} - -// RPCTransaction represents a transaction that will serialize to the RPC representation of a transaction -type RPCTransaction struct { - BlockHash *common.Hash `json:"blockHash"` - BlockNumber *hexutil.Big `json:"blockNumber"` - From common.Address `json:"from"` - Gas hexutil.Uint64 `json:"gas"` - GasPrice *hexutil.Big `json:"gasPrice,omitempty"` - Tip *hexutil.Big `json:"maxPriorityFeePerGas,omitempty"` - FeeCap *hexutil.Big `json:"maxFeePerGas,omitempty"` - Hash common.Hash `json:"hash"` - Input hexutil.Bytes `json:"input"` - Nonce hexutil.Uint64 `json:"nonce"` - To *common.Address `json:"to"` - TransactionIndex *hexutil.Uint64 `json:"transactionIndex"` - Value *hexutil.Big `json:"value"` - Type hexutil.Uint64 `json:"type"` - Accesses *types.AccessList `json:"accessList,omitempty"` - ChainID *hexutil.Big `json:"chainId,omitempty"` - V *hexutil.Big `json:"v"` - R *hexutil.Big `json:"r"` - S *hexutil.Big `json:"s"` -} - -// newRPCTransaction returns a transaction that will serialize to the RPC -// representation, with the given location metadata set (if available). -func newRPCTransaction(tx types.Transaction, blockHash common.Hash, blockNumber uint64, index uint64, baseFee *big.Int) *RPCTransaction { - // Determine the signer. For replay-protected transactions, use the most permissive - // signer, because we assume that signers are backwards-compatible with old - // transactions. For non-protected transactions, the homestead signer signer is used - // because the return value of ChainId is zero for those transactions. - var chainId *big.Int - result := &RPCTransaction{ - Type: hexutil.Uint64(tx.Type()), - Gas: hexutil.Uint64(tx.GetGas()), - Hash: tx.Hash(), - Input: hexutil.Bytes(tx.GetData()), - Nonce: hexutil.Uint64(tx.GetNonce()), - To: tx.GetTo(), - Value: (*hexutil.Big)(tx.GetValue().ToBig()), - } - switch t := tx.(type) { - case *types.LegacyTx: - chainId = types.DeriveChainId(&t.V).ToBig() - result.GasPrice = (*hexutil.Big)(t.GasPrice.ToBig()) - result.V = (*hexutil.Big)(t.V.ToBig()) - result.R = (*hexutil.Big)(t.R.ToBig()) - result.S = (*hexutil.Big)(t.S.ToBig()) - case *types.AccessListTx: - chainId = t.ChainID.ToBig() - result.ChainID = (*hexutil.Big)(chainId) - result.GasPrice = (*hexutil.Big)(t.GasPrice.ToBig()) - result.V = (*hexutil.Big)(t.V.ToBig()) - result.R = (*hexutil.Big)(t.R.ToBig()) - result.S = (*hexutil.Big)(t.S.ToBig()) - result.Accesses = &t.AccessList - case *types.DynamicFeeTransaction: - chainId = t.ChainID.ToBig() - result.ChainID = (*hexutil.Big)(chainId) - result.Tip = (*hexutil.Big)(t.Tip.ToBig()) - result.FeeCap = (*hexutil.Big)(t.FeeCap.ToBig()) - result.V = (*hexutil.Big)(t.V.ToBig()) - result.R = (*hexutil.Big)(t.R.ToBig()) - result.S = (*hexutil.Big)(t.S.ToBig()) - result.Accesses = &t.AccessList - baseFee, overflow := uint256.FromBig(baseFee) - if baseFee != nil && !overflow && blockHash != (common.Hash{}) { - // price = min(tip + baseFee, gasFeeCap) - price := math.Min256(new(uint256.Int).Add(tx.GetTip(), baseFee), tx.GetFeeCap()) - result.GasPrice = (*hexutil.Big)(price.ToBig()) - } else { - result.GasPrice = nil - } - } - signer := types.LatestSignerForChainID(chainId) - result.From, _ = tx.Sender(*signer) - if blockHash != (common.Hash{}) { - result.BlockHash = &blockHash - result.BlockNumber = (*hexutil.Big)(new(big.Int).SetUint64(blockNumber)) - result.TransactionIndex = (*hexutil.Uint64)(&index) - } - return result -} - -// newRPCPendingTransaction returns a pending transaction that will serialize to the RPC representation -func newRPCPendingTransaction(tx types.Transaction, current *types.Header, config *params.ChainConfig) *RPCTransaction { - var baseFee *big.Int - if current != nil { - baseFee = misc.CalcBaseFee(config, current) - } - return newRPCTransaction(tx, common.Hash{}, 0, 0, baseFee) -} - -// newRPCRawTransactionFromBlockIndex returns the bytes of a transaction given a block and a transaction index. -func newRPCRawTransactionFromBlockIndex(b *types.Block, index uint64) (hexutil.Bytes, error) { - txs := b.Transactions() - if index >= uint64(len(txs)) { - return nil, nil - } - var buf bytes.Buffer - err := txs[index].MarshalBinary(&buf) - return buf.Bytes(), err -} diff --git a/cmd/rpcdaemon22/commands/eth_api_test.go b/cmd/rpcdaemon22/commands/eth_api_test.go deleted file mode 100644 index ffee9fe095c..00000000000 --- a/cmd/rpcdaemon22/commands/eth_api_test.go +++ /dev/null @@ -1,219 +0,0 @@ -package commands - -import ( - "context" - "fmt" - "testing" - - "github.com/ledgerwatch/erigon/core" - "github.com/ledgerwatch/erigon/internal/ethapi" - "github.com/ledgerwatch/erigon/rpc" - "github.com/ledgerwatch/erigon/turbo/snapshotsync" - "github.com/stretchr/testify/assert" - - "github.com/ledgerwatch/erigon-lib/kv/kvcache" - "github.com/ledgerwatch/erigon/cmd/rpcdaemon22/rpcdaemontest" - "github.com/ledgerwatch/erigon/common" -) - -func TestGetTransactionReceipt(t *testing.T) { - db := rpcdaemontest.CreateTestKV(t) - stateCache := kvcache.New(kvcache.DefaultCoherentConfig) - api := NewEthAPI(NewBaseApi(nil, stateCache, snapshotsync.NewBlockReader(), nil, nil, false), db, nil, nil, nil, 5000000) - // Call GetTransactionReceipt for transaction which is not in the database - if _, err := api.GetTransactionReceipt(context.Background(), common.Hash{}); err != nil { - t.Errorf("calling GetTransactionReceipt with empty hash: %v", err) - } -} - -func TestGetTransactionReceiptUnprotected(t *testing.T) { - db := rpcdaemontest.CreateTestKV(t) - stateCache := kvcache.New(kvcache.DefaultCoherentConfig) - api := NewEthAPI(NewBaseApi(nil, stateCache, snapshotsync.NewBlockReader(), nil, nil, false), db, nil, nil, nil, 5000000) - // Call GetTransactionReceipt for un-protected transaction - if _, err := api.GetTransactionReceipt(context.Background(), common.HexToHash("0x3f3cb8a0e13ed2481f97f53f7095b9cbc78b6ffb779f2d3e565146371a8830ea")); err != nil { - t.Errorf("calling GetTransactionReceipt for unprotected tx: %v", err) - } -} - -// EIP-1898 test cases - -func TestGetStorageAt_ByBlockNumber_WithRequireCanonicalDefault(t *testing.T) { - assert := assert.New(t) - db := rpcdaemontest.CreateTestKV(t) - stateCache := kvcache.New(kvcache.DefaultCoherentConfig) - api := NewEthAPI(NewBaseApi(nil, stateCache, snapshotsync.NewBlockReader(), nil, nil, false), db, nil, nil, nil, 5000000) - addr := common.HexToAddress("0x71562b71999873db5b286df957af199ec94617f7") - - result, err := api.GetStorageAt(context.Background(), addr, "0x0", rpc.BlockNumberOrHashWithNumber(0)) - if err != nil { - t.Errorf("calling GetStorageAt: %v", err) - } - - assert.Equal(common.HexToHash("0x0").String(), result) -} - -func TestGetStorageAt_ByBlockHash_WithRequireCanonicalDefault(t *testing.T) { - assert := assert.New(t) - m, _, _ := rpcdaemontest.CreateTestSentry(t) - db := m.DB - stateCache := kvcache.New(kvcache.DefaultCoherentConfig) - api := NewEthAPI(NewBaseApi(nil, stateCache, snapshotsync.NewBlockReader(), nil, nil, false), db, nil, nil, nil, 5000000) - addr := common.HexToAddress("0x71562b71999873db5b286df957af199ec94617f7") - - result, err := api.GetStorageAt(context.Background(), addr, "0x0", rpc.BlockNumberOrHashWithHash(m.Genesis.Hash(), false)) - if err != nil { - t.Errorf("calling GetStorageAt: %v", err) - } - - assert.Equal(common.HexToHash("0x0").String(), result) -} - -func TestGetStorageAt_ByBlockHash_WithRequireCanonicalTrue(t *testing.T) { - assert := assert.New(t) - m, _, _ := rpcdaemontest.CreateTestSentry(t) - db := m.DB - stateCache := kvcache.New(kvcache.DefaultCoherentConfig) - api := NewEthAPI(NewBaseApi(nil, stateCache, snapshotsync.NewBlockReader(), nil, nil, false), db, nil, nil, nil, 5000000) - addr := common.HexToAddress("0x71562b71999873db5b286df957af199ec94617f7") - - result, err := api.GetStorageAt(context.Background(), addr, "0x0", rpc.BlockNumberOrHashWithHash(m.Genesis.Hash(), true)) - if err != nil { - t.Errorf("calling GetStorageAt: %v", err) - } - - assert.Equal(common.HexToHash("0x0").String(), result) -} - -func TestGetStorageAt_ByBlockHash_WithRequireCanonicalDefault_BlockNotFoundError(t *testing.T) { - m, _, _ := rpcdaemontest.CreateTestSentry(t) - db := m.DB - stateCache := kvcache.New(kvcache.DefaultCoherentConfig) - api := NewEthAPI(NewBaseApi(nil, stateCache, snapshotsync.NewBlockReader(), nil, nil, false), db, nil, nil, nil, 5000000) - addr := common.HexToAddress("0x71562b71999873db5b286df957af199ec94617f7") - - offChain, err := core.GenerateChain(m.ChainConfig, m.Genesis, m.Engine, m.DB, 1, func(i int, block *core.BlockGen) { - }, true) - if err != nil { - t.Fatal(err) - } - offChainBlock := offChain.Blocks[0] - - if _, err := api.GetStorageAt(context.Background(), addr, "0x0", rpc.BlockNumberOrHashWithHash(offChainBlock.Hash(), false)); err != nil { - if fmt.Sprintf("%v", err) != fmt.Sprintf("block %s not found", offChainBlock.Hash().String()[2:]) { - t.Errorf("wrong error: %v", err) - } - } else { - t.Error("error expected") - } -} - -func TestGetStorageAt_ByBlockHash_WithRequireCanonicalTrue_BlockNotFoundError(t *testing.T) { - m, _, _ := rpcdaemontest.CreateTestSentry(t) - db := m.DB - stateCache := kvcache.New(kvcache.DefaultCoherentConfig) - api := NewEthAPI(NewBaseApi(nil, stateCache, snapshotsync.NewBlockReader(), nil, nil, false), db, nil, nil, nil, 5000000) - addr := common.HexToAddress("0x71562b71999873db5b286df957af199ec94617f7") - - offChain, err := core.GenerateChain(m.ChainConfig, m.Genesis, m.Engine, m.DB, 1, func(i int, block *core.BlockGen) { - }, true) - if err != nil { - t.Fatal(err) - } - offChainBlock := offChain.Blocks[0] - - if _, err := api.GetStorageAt(context.Background(), addr, "0x0", rpc.BlockNumberOrHashWithHash(offChainBlock.Hash(), true)); err != nil { - if fmt.Sprintf("%v", err) != fmt.Sprintf("block %s not found", offChainBlock.Hash().String()[2:]) { - t.Errorf("wrong error: %v", err) - } - } else { - t.Error("error expected") - } -} - -func TestGetStorageAt_ByBlockHash_WithRequireCanonicalDefault_NonCanonicalBlock(t *testing.T) { - assert := assert.New(t) - m, _, orphanedChain := rpcdaemontest.CreateTestSentry(t) - db := m.DB - stateCache := kvcache.New(kvcache.DefaultCoherentConfig) - api := NewEthAPI(NewBaseApi(nil, stateCache, snapshotsync.NewBlockReader(), nil, nil, false), db, nil, nil, nil, 5000000) - addr := common.HexToAddress("0x71562b71999873db5b286df957af199ec94617f7") - - orphanedBlock := orphanedChain[0].Blocks[0] - - result, err := api.GetStorageAt(context.Background(), addr, "0x0", rpc.BlockNumberOrHashWithHash(orphanedBlock.Hash(), false)) - if err != nil { - if fmt.Sprintf("%v", err) != fmt.Sprintf("hash %s is not currently canonical", orphanedBlock.Hash().String()[2:]) { - t.Errorf("wrong error: %v", err) - } - } else { - t.Error("error expected") - } - - assert.Equal(common.HexToHash("0x0").String(), result) -} - -func TestGetStorageAt_ByBlockHash_WithRequireCanonicalTrue_NonCanonicalBlock(t *testing.T) { - m, _, orphanedChain := rpcdaemontest.CreateTestSentry(t) - db := m.DB - stateCache := kvcache.New(kvcache.DefaultCoherentConfig) - api := NewEthAPI(NewBaseApi(nil, stateCache, snapshotsync.NewBlockReader(), nil, nil, false), db, nil, nil, nil, 5000000) - addr := common.HexToAddress("0x71562b71999873db5b286df957af199ec94617f7") - - orphanedBlock := orphanedChain[0].Blocks[0] - - if _, err := api.GetStorageAt(context.Background(), addr, "0x0", rpc.BlockNumberOrHashWithHash(orphanedBlock.Hash(), true)); err != nil { - if fmt.Sprintf("%v", err) != fmt.Sprintf("hash %s is not currently canonical", orphanedBlock.Hash().String()[2:]) { - t.Errorf("wrong error: %v", err) - } - } else { - t.Error("error expected") - } -} - -func TestCall_ByBlockHash_WithRequireCanonicalDefault_NonCanonicalBlock(t *testing.T) { - m, _, orphanedChain := rpcdaemontest.CreateTestSentry(t) - db := m.DB - stateCache := kvcache.New(kvcache.DefaultCoherentConfig) - api := NewEthAPI(NewBaseApi(nil, stateCache, snapshotsync.NewBlockReader(), nil, nil, false), db, nil, nil, nil, 5000000) - from := common.HexToAddress("0x71562b71999873db5b286df957af199ec94617f7") - to := common.HexToAddress("0x0d3ab14bbad3d99f4203bd7a11acb94882050e7e") - - orphanedBlock := orphanedChain[0].Blocks[0] - - if _, err := api.Call(context.Background(), ethapi.CallArgs{ - From: &from, - To: &to, - }, rpc.BlockNumberOrHashWithHash(orphanedBlock.Hash(), false), nil); err != nil { - if fmt.Sprintf("%v", err) != fmt.Sprintf("hash %s is not currently canonical", orphanedBlock.Hash().String()[2:]) { - /* Not sure. Here https://github.com/ethereum/EIPs/blob/master/EIPS/eip-1898.md it is not explicitly said that - eth_call should only work with canonical blocks. - But since there is no point in changing the state of non-canonical block, it ignores RequireCanonical. */ - t.Errorf("wrong error: %v", err) - } - } else { - t.Error("error expected") - } -} - -func TestCall_ByBlockHash_WithRequireCanonicalTrue_NonCanonicalBlock(t *testing.T) { - m, _, orphanedChain := rpcdaemontest.CreateTestSentry(t) - db := m.DB - stateCache := kvcache.New(kvcache.DefaultCoherentConfig) - api := NewEthAPI(NewBaseApi(nil, stateCache, snapshotsync.NewBlockReader(), nil, nil, false), db, nil, nil, nil, 5000000) - from := common.HexToAddress("0x71562b71999873db5b286df957af199ec94617f7") - to := common.HexToAddress("0x0d3ab14bbad3d99f4203bd7a11acb94882050e7e") - - orphanedBlock := orphanedChain[0].Blocks[0] - - if _, err := api.Call(context.Background(), ethapi.CallArgs{ - From: &from, - To: &to, - }, rpc.BlockNumberOrHashWithHash(orphanedBlock.Hash(), true), nil); err != nil { - if fmt.Sprintf("%v", err) != fmt.Sprintf("hash %s is not currently canonical", orphanedBlock.Hash().String()[2:]) { - t.Errorf("wrong error: %v", err) - } - } else { - t.Error("error expected") - } -} diff --git a/cmd/rpcdaemon22/commands/eth_block.go b/cmd/rpcdaemon22/commands/eth_block.go deleted file mode 100644 index 811291c9cbf..00000000000 --- a/cmd/rpcdaemon22/commands/eth_block.go +++ /dev/null @@ -1,320 +0,0 @@ -package commands - -import ( - "context" - "fmt" - "math/big" - "time" - - "github.com/ledgerwatch/erigon/common" - "github.com/ledgerwatch/erigon/common/hexutil" - "github.com/ledgerwatch/erigon/common/math" - "github.com/ledgerwatch/erigon/core" - "github.com/ledgerwatch/erigon/core/rawdb" - "github.com/ledgerwatch/erigon/core/state" - "github.com/ledgerwatch/erigon/core/types" - "github.com/ledgerwatch/erigon/core/vm" - "github.com/ledgerwatch/erigon/ethdb" - "github.com/ledgerwatch/erigon/rpc" - "github.com/ledgerwatch/erigon/turbo/adapter/ethapi" - "github.com/ledgerwatch/erigon/turbo/rpchelper" - "github.com/ledgerwatch/erigon/turbo/transactions" - "github.com/ledgerwatch/log/v3" - "golang.org/x/crypto/sha3" -) - -func (api *APIImpl) CallBundle(ctx context.Context, txHashes []common.Hash, stateBlockNumberOrHash rpc.BlockNumberOrHash, timeoutMilliSecondsPtr *int64) (map[string]interface{}, error) { - tx, err := api.db.BeginRo(ctx) - if err != nil { - return nil, err - } - defer tx.Rollback() - - chainConfig, err := api.chainConfig(tx) - if err != nil { - return nil, err - } - - if len(txHashes) == 0 { - return nil, nil - } - - var txs types.Transactions - - for _, txHash := range txHashes { - blockNum, ok, err := api.txnLookup(ctx, tx, txHash) - if err != nil { - return nil, err - } - if !ok { - return nil, nil - } - block, err := api.blockByNumberWithSenders(tx, blockNum) - if err != nil { - return nil, err - } - if block == nil { - return nil, nil - } - var txn types.Transaction - for _, transaction := range block.Transactions() { - if transaction.Hash() == txHash { - txn = transaction - break - } - } - if txn == nil { - return nil, nil // not error, see https://github.com/ledgerwatch/turbo-geth/issues/1645 - } - txs = append(txs, txn) - } - defer func(start time.Time) { log.Trace("Executing EVM call finished", "runtime", time.Since(start)) }(time.Now()) - - stateBlockNumber, hash, latest, err := rpchelper.GetBlockNumber(stateBlockNumberOrHash, tx, api.filters) - if err != nil { - return nil, err - } - - var stateReader state.StateReader - if latest { - cacheView, err := api.stateCache.View(ctx, tx) - if err != nil { - return nil, err - } - stateReader = state.NewCachedReader2(cacheView, tx) - } else { - stateReader = state.NewPlainState(tx, stateBlockNumber) - } - st := state.New(stateReader) - - parent := rawdb.ReadHeader(tx, hash, stateBlockNumber) - if parent == nil { - return nil, fmt.Errorf("block %d(%x) not found", stateBlockNumber, hash) - } - - blockNumber := stateBlockNumber + 1 - - timestamp := parent.Time // Dont care about the timestamp - - coinbase := parent.Coinbase - header := &types.Header{ - ParentHash: parent.Hash(), - Number: big.NewInt(int64(blockNumber)), - GasLimit: parent.GasLimit, - Time: timestamp, - Difficulty: parent.Difficulty, - Coinbase: coinbase, - } - - // Get a new instance of the EVM - signer := types.MakeSigner(chainConfig, blockNumber) - rules := chainConfig.Rules(blockNumber) - firstMsg, err := txs[0].AsMessage(*signer, nil, rules) - if err != nil { - return nil, err - } - - contractHasTEVM := func(contractHash common.Hash) (bool, error) { return false, nil } - if api.TevmEnabled { - contractHasTEVM = ethdb.GetHasTEVM(tx) - } - - blockCtx, txCtx := transactions.GetEvmContext(firstMsg, header, stateBlockNumberOrHash.RequireCanonical, tx, contractHasTEVM, api._blockReader) - evm := vm.NewEVM(blockCtx, txCtx, st, chainConfig, vm.Config{Debug: false}) - - timeoutMilliSeconds := int64(5000) - if timeoutMilliSecondsPtr != nil { - timeoutMilliSeconds = *timeoutMilliSecondsPtr - } - timeout := time.Millisecond * time.Duration(timeoutMilliSeconds) - // Setup context so it may be cancelled the call has completed - // or, in case of unmetered gas, setup a context with a timeout. - var cancel context.CancelFunc - if timeout > 0 { - ctx, cancel = context.WithTimeout(ctx, timeout) - } else { - ctx, cancel = context.WithCancel(ctx) - } - // Make sure the context is cancelled when the call has completed - // this makes sure resources are cleaned up. - defer cancel() - - // Wait for the context to be done and cancel the evm. Even if the - // EVM has finished, cancelling may be done (repeatedly) - go func() { - <-ctx.Done() - evm.Cancel() - }() - - // Setup the gas pool (also for unmetered requests) - // and apply the message. - gp := new(core.GasPool).AddGas(math.MaxUint64) - - results := []map[string]interface{}{} - - bundleHash := sha3.NewLegacyKeccak256() - for _, txn := range txs { - msg, err := txn.AsMessage(*signer, nil, rules) - if err != nil { - return nil, err - } - // Execute the transaction message - result, err := core.ApplyMessage(evm, msg, gp, true /* refunds */, false /* gasBailout */) - if err != nil { - return nil, err - } - // If the timer caused an abort, return an appropriate error message - if evm.Cancelled() { - return nil, fmt.Errorf("execution aborted (timeout = %v)", timeout) - } - - txHash := txn.Hash().String() - jsonResult := map[string]interface{}{ - "txHash": txHash, - "gasUsed": result.UsedGas, - } - bundleHash.Write(txn.Hash().Bytes()) - if result.Err != nil { - jsonResult["error"] = result.Err.Error() - } else { - jsonResult["value"] = common.BytesToHash(result.Return()) - } - - results = append(results, jsonResult) - } - - ret := map[string]interface{}{} - ret["results"] = results - ret["bundleHash"] = hexutil.Encode(bundleHash.Sum(nil)) - return ret, nil -} - -// GetBlockByNumber implements eth_getBlockByNumber. Returns information about a block given the block's number. -func (api *APIImpl) GetBlockByNumber(ctx context.Context, number rpc.BlockNumber, fullTx bool) (map[string]interface{}, error) { - tx, err := api.db.BeginRo(ctx) - if err != nil { - return nil, err - } - defer tx.Rollback() - b, err := api.blockByRPCNumber(number, tx) - if err != nil { - return nil, err - } - if b == nil { - return nil, nil - } - additionalFields := make(map[string]interface{}) - td, err := rawdb.ReadTd(tx, b.Hash(), b.NumberU64()) - if err != nil { - return nil, err - } - additionalFields["totalDifficulty"] = (*hexutil.Big)(td) - response, err := ethapi.RPCMarshalBlock(b, true, fullTx, additionalFields) - - if err == nil && number == rpc.PendingBlockNumber { - // Pending blocks need to nil out a few fields - for _, field := range []string{"hash", "nonce", "miner"} { - response[field] = nil - } - } - return response, err -} - -// GetBlockByHash implements eth_getBlockByHash. Returns information about a block given the block's hash. -func (api *APIImpl) GetBlockByHash(ctx context.Context, numberOrHash rpc.BlockNumberOrHash, fullTx bool) (map[string]interface{}, error) { - if numberOrHash.BlockHash == nil { - // some web3.js based apps (like ethstats client) for some reason call - // eth_getBlockByHash with a block number as a parameter - // so no matter how weird that is, we would love to support that. - if numberOrHash.BlockNumber == nil { - return nil, nil // not error, see https://github.com/ledgerwatch/erigon/issues/1645 - } - return api.GetBlockByNumber(ctx, *numberOrHash.BlockNumber, fullTx) - } - - hash := *numberOrHash.BlockHash - tx, err := api.db.BeginRo(ctx) - if err != nil { - return nil, err - } - defer tx.Rollback() - - additionalFields := make(map[string]interface{}) - - block, err := api.blockByHashWithSenders(tx, hash) - if err != nil { - return nil, err - } - if block == nil { - return nil, nil // not error, see https://github.com/ledgerwatch/erigon/issues/1645 - } - number := block.NumberU64() - - td, err := rawdb.ReadTd(tx, hash, number) - if err != nil { - return nil, err - } - additionalFields["totalDifficulty"] = (*hexutil.Big)(td) - response, err := ethapi.RPCMarshalBlock(block, true, fullTx, additionalFields) - - if err == nil && int64(number) == rpc.PendingBlockNumber.Int64() { - // Pending blocks need to nil out a few fields - for _, field := range []string{"hash", "nonce", "miner"} { - response[field] = nil - } - } - return response, err -} - -// GetBlockTransactionCountByNumber implements eth_getBlockTransactionCountByNumber. Returns the number of transactions in a block given the block's block number. -func (api *APIImpl) GetBlockTransactionCountByNumber(ctx context.Context, blockNr rpc.BlockNumber) (*hexutil.Uint, error) { - tx, err := api.db.BeginRo(ctx) - if err != nil { - return nil, err - } - defer tx.Rollback() - if blockNr == rpc.PendingBlockNumber { - b, err := api.blockByRPCNumber(blockNr, tx) - if err != nil { - return nil, err - } - if b == nil { - return nil, nil - } - n := hexutil.Uint(len(b.Transactions())) - return &n, nil - } - blockNum, err := getBlockNumber(blockNr, tx) - if err != nil { - return nil, err - } - body, _, txAmount, err := rawdb.ReadBodyByNumber(tx, blockNum) - if err != nil { - return nil, err - } - if body == nil { - return nil, nil - } - n := hexutil.Uint(txAmount) - return &n, nil -} - -// GetBlockTransactionCountByHash implements eth_getBlockTransactionCountByHash. Returns the number of transactions in a block given the block's block hash. -func (api *APIImpl) GetBlockTransactionCountByHash(ctx context.Context, blockHash common.Hash) (*hexutil.Uint, error) { - tx, err := api.db.BeginRo(ctx) - if err != nil { - return nil, err - } - defer tx.Rollback() - - num := rawdb.ReadHeaderNumber(tx, blockHash) - if num == nil { - return nil, nil - } - body, _, txAmount := rawdb.ReadBody(tx, blockHash, *num) - if body == nil { - return nil, nil - } - n := hexutil.Uint(txAmount) - return &n, nil -} diff --git a/cmd/rpcdaemon22/commands/eth_call.go b/cmd/rpcdaemon22/commands/eth_call.go deleted file mode 100644 index a015b1e94b2..00000000000 --- a/cmd/rpcdaemon22/commands/eth_call.go +++ /dev/null @@ -1,453 +0,0 @@ -package commands - -import ( - "context" - "errors" - "fmt" - "math/big" - - "github.com/holiman/uint256" - "github.com/ledgerwatch/erigon-lib/gointerfaces" - txpool_proto "github.com/ledgerwatch/erigon-lib/gointerfaces/txpool" - "github.com/ledgerwatch/erigon-lib/kv" - "github.com/ledgerwatch/erigon/common" - "github.com/ledgerwatch/erigon/common/hexutil" - "github.com/ledgerwatch/erigon/core" - "github.com/ledgerwatch/erigon/core/rawdb" - "github.com/ledgerwatch/erigon/core/state" - "github.com/ledgerwatch/erigon/core/types" - "github.com/ledgerwatch/erigon/core/vm" - "github.com/ledgerwatch/erigon/crypto" - "github.com/ledgerwatch/erigon/eth/tracers/logger" - "github.com/ledgerwatch/erigon/ethdb" - "github.com/ledgerwatch/erigon/internal/ethapi" - "github.com/ledgerwatch/erigon/params" - "github.com/ledgerwatch/erigon/rpc" - "github.com/ledgerwatch/erigon/turbo/rpchelper" - "github.com/ledgerwatch/erigon/turbo/transactions" - "github.com/ledgerwatch/log/v3" - "google.golang.org/grpc" -) - -// Call implements eth_call. Executes a new message call immediately without creating a transaction on the block chain. -func (api *APIImpl) Call(ctx context.Context, args ethapi.CallArgs, blockNrOrHash rpc.BlockNumberOrHash, overrides *ethapi.StateOverrides) (hexutil.Bytes, error) { - tx, err := api.db.BeginRo(ctx) - if err != nil { - return nil, err - } - defer tx.Rollback() - - chainConfig, err := api.chainConfig(tx) - if err != nil { - return nil, err - } - - if args.Gas == nil || uint64(*args.Gas) == 0 { - args.Gas = (*hexutil.Uint64)(&api.GasCap) - } - - contractHasTEVM := func(contractHash common.Hash) (bool, error) { return false, nil } - if api.TevmEnabled { - contractHasTEVM = ethdb.GetHasTEVM(tx) - } - - blockNumber, hash, _, err := rpchelper.GetCanonicalBlockNumber(blockNrOrHash, tx, api.filters) // DoCall cannot be executed on non-canonical blocks - if err != nil { - return nil, err - } - block, err := api.BaseAPI.blockWithSenders(tx, hash, blockNumber) - if err != nil { - return nil, err - } - if block == nil { - return nil, nil - } - - result, err := transactions.DoCall(ctx, args, tx, blockNrOrHash, block, overrides, api.GasCap, chainConfig, api.filters, api.stateCache, contractHasTEVM, api._blockReader) - if err != nil { - return nil, err - } - - // If the result contains a revert reason, try to unpack and return it. - if len(result.Revert()) > 0 { - return nil, ethapi.NewRevertError(result) - } - - return result.Return(), result.Err -} - -func HeaderByNumberOrHash(ctx context.Context, tx kv.Tx, blockNrOrHash rpc.BlockNumberOrHash) (*types.Header, error) { - if blockLabel, ok := blockNrOrHash.Number(); ok { - blockNum, err := getBlockNumber(blockLabel, tx) - if err != nil { - return nil, err - } - return rawdb.ReadHeaderByNumber(tx, blockNum), nil - } - if hash, ok := blockNrOrHash.Hash(); ok { - header, err := rawdb.ReadHeaderByHash(tx, hash) - if err != nil { - return nil, err - } - if header == nil { - return nil, errors.New("header for hash not found") - } - - if blockNrOrHash.RequireCanonical { - can, err := rawdb.ReadCanonicalHash(tx, header.Number.Uint64()) - if err != nil { - return nil, err - } - if can != hash { - return nil, errors.New("hash is not currently canonical") - } - } - - h := rawdb.ReadHeader(tx, hash, header.Number.Uint64()) - if h == nil { - return nil, errors.New("header found, but block body is missing") - } - return h, nil - } - return nil, errors.New("invalid arguments; neither block nor hash specified") -} - -// EstimateGas implements eth_estimateGas. Returns an estimate of how much gas is necessary to allow the transaction to complete. The transaction will not be added to the blockchain. -func (api *APIImpl) EstimateGas(ctx context.Context, argsOrNil *ethapi.CallArgs, blockNrOrHash *rpc.BlockNumberOrHash) (hexutil.Uint64, error) { - var args ethapi.CallArgs - // if we actually get CallArgs here, we use them - if argsOrNil != nil { - args = *argsOrNil - } - - bNrOrHash := rpc.BlockNumberOrHashWithNumber(rpc.PendingBlockNumber) - if blockNrOrHash != nil { - bNrOrHash = *blockNrOrHash - } - - dbtx, err := api.db.BeginRo(ctx) - if err != nil { - return 0, err - } - defer dbtx.Rollback() - - // Binary search the gas requirement, as it may be higher than the amount used - var ( - lo = params.TxGas - 1 - hi uint64 - cap uint64 - ) - // Use zero address if sender unspecified. - if args.From == nil { - args.From = new(common.Address) - } - - // Determine the highest gas limit can be used during the estimation. - if args.Gas != nil && uint64(*args.Gas) >= params.TxGas { - hi = uint64(*args.Gas) - } else { - // Retrieve the block to act as the gas ceiling - h, err := HeaderByNumberOrHash(ctx, dbtx, bNrOrHash) - if err != nil { - return 0, err - } - hi = h.GasLimit - } - - var feeCap *big.Int - if args.GasPrice != nil && (args.MaxFeePerGas != nil || args.MaxPriorityFeePerGas != nil) { - return 0, errors.New("both gasPrice and (maxFeePerGas or maxPriorityFeePerGas) specified") - } else if args.GasPrice != nil { - feeCap = args.GasPrice.ToInt() - } else if args.MaxFeePerGas != nil { - feeCap = args.MaxFeePerGas.ToInt() - } else { - feeCap = common.Big0 - } - // Recap the highest gas limit with account's available balance. - if feeCap.Sign() != 0 { - cacheView, err := api.stateCache.View(ctx, dbtx) - if err != nil { - return 0, err - } - stateReader := state.NewCachedReader2(cacheView, dbtx) - state := state.New(stateReader) - if state == nil { - return 0, fmt.Errorf("can't get the current state") - } - - balance := state.GetBalance(*args.From) // from can't be nil - available := balance.ToBig() - if args.Value != nil { - if args.Value.ToInt().Cmp(available) >= 0 { - return 0, errors.New("insufficient funds for transfer") - } - available.Sub(available, args.Value.ToInt()) - } - allowance := new(big.Int).Div(available, feeCap) - - // If the allowance is larger than maximum uint64, skip checking - if allowance.IsUint64() && hi > allowance.Uint64() { - transfer := args.Value - if transfer == nil { - transfer = new(hexutil.Big) - } - log.Warn("Gas estimation capped by limited funds", "original", hi, "balance", balance, - "sent", transfer.ToInt(), "maxFeePerGas", feeCap, "fundable", allowance) - hi = allowance.Uint64() - } - } - - // Recap the highest gas allowance with specified gascap. - if hi > api.GasCap { - log.Warn("Caller gas above allowance, capping", "requested", hi, "cap", api.GasCap) - hi = api.GasCap - } - cap = hi - var lastBlockNum = rpc.LatestBlockNumber - - chainConfig, err := api.chainConfig(dbtx) - if err != nil { - return 0, err - } - - contractHasTEVM := func(contractHash common.Hash) (bool, error) { return false, nil } - if api.TevmEnabled { - contractHasTEVM = ethdb.GetHasTEVM(dbtx) - } - - // Create a helper to check if a gas allowance results in an executable transaction - executable := func(gas uint64) (bool, *core.ExecutionResult, error) { - args.Gas = (*hexutil.Uint64)(&gas) - - numOrHash := rpc.BlockNumberOrHash{BlockNumber: &lastBlockNum} - blockNumber, hash, _, err := rpchelper.GetCanonicalBlockNumber(numOrHash, dbtx, api.filters) // DoCall cannot be executed on non-canonical blocks - if err != nil { - return false, nil, err - } - block, err := api.BaseAPI.blockWithSenders(dbtx, hash, blockNumber) - if err != nil { - return false, nil, err - } - if block == nil { - return false, nil, nil - } - - result, err := transactions.DoCall(ctx, args, dbtx, numOrHash, block, nil, - api.GasCap, chainConfig, api.filters, api.stateCache, contractHasTEVM, api._blockReader) - if err != nil { - if errors.Is(err, core.ErrIntrinsicGas) { - // Special case, raise gas limit - return true, nil, nil - } - - // Bail out - return true, nil, err - } - return result.Failed(), result, nil - } - // Execute the binary search and hone in on an executable gas limit - for lo+1 < hi { - mid := (hi + lo) / 2 - failed, _, err := executable(mid) - - // If the error is not nil(consensus error), it means the provided message - // call or transaction will never be accepted no matter how much gas it is - // assigened. Return the error directly, don't struggle any more. - if err != nil { - return 0, err - } - if failed { - lo = mid - } else { - hi = mid - } - } - // Reject the transaction as invalid if it still fails at the highest allowance - if hi == cap { - failed, result, err := executable(hi) - if err != nil { - return 0, err - } - if failed { - if result != nil && !errors.Is(result.Err, vm.ErrOutOfGas) { - if len(result.Revert()) > 0 { - return 0, ethapi.NewRevertError(result) - } - return 0, result.Err - } - // Otherwise, the specified gas cap is too low - return 0, fmt.Errorf("gas required exceeds allowance (%d)", cap) - } - } - return hexutil.Uint64(hi), nil -} - -// GetProof not implemented -func (api *APIImpl) GetProof(ctx context.Context, address common.Address, storageKeys []string, blockNr rpc.BlockNumber) (*interface{}, error) { - var stub interface{} - return &stub, fmt.Errorf(NotImplemented, "eth_getProof") -} - -// accessListResult returns an optional accesslist -// Its the result of the `eth_createAccessList` RPC call. -// It contains an error if the transaction itself failed. -type accessListResult struct { - Accesslist *types.AccessList `json:"accessList"` - Error string `json:"error,omitempty"` - GasUsed hexutil.Uint64 `json:"gasUsed"` -} - -// CreateAccessList implements eth_createAccessList. It creates an access list for the given transaction. -// If the accesslist creation fails an error is returned. -// If the transaction itself fails, an vmErr is returned. -func (api *APIImpl) CreateAccessList(ctx context.Context, args ethapi.CallArgs, blockNrOrHash *rpc.BlockNumberOrHash, optimizeGas *bool) (*accessListResult, error) { - bNrOrHash := rpc.BlockNumberOrHashWithNumber(rpc.PendingBlockNumber) - if blockNrOrHash != nil { - bNrOrHash = *blockNrOrHash - } - - tx, err := api.db.BeginRo(ctx) - if err != nil { - return nil, err - } - defer tx.Rollback() - - chainConfig, err := api.chainConfig(tx) - if err != nil { - return nil, err - } - contractHasTEVM := func(contractHash common.Hash) (bool, error) { return false, nil } - if api.TevmEnabled { - contractHasTEVM = ethdb.GetHasTEVM(tx) - } - blockNumber, hash, latest, err := rpchelper.GetCanonicalBlockNumber(bNrOrHash, tx, api.filters) // DoCall cannot be executed on non-canonical blocks - if err != nil { - return nil, err - } - block, err := api.BaseAPI.blockWithSenders(tx, hash, blockNumber) - if err != nil { - return nil, err - } - if block == nil { - return nil, nil - } - var stateReader state.StateReader - if latest { - cacheView, err := api.stateCache.View(ctx, tx) - if err != nil { - return nil, err - } - stateReader = state.NewCachedReader2(cacheView, tx) - } else { - stateReader = state.NewPlainState(tx, blockNumber) - } - - header := block.Header() - // If the gas amount is not set, extract this as it will depend on access - // lists and we'll need to reestimate every time - nogas := args.Gas == nil - - var to common.Address - if args.To != nil { - to = *args.To - } else { - // Require nonce to calculate address of created contract - if args.Nonce == nil { - var nonce uint64 - reply, err := api.txPool.Nonce(ctx, &txpool_proto.NonceRequest{ - Address: gointerfaces.ConvertAddressToH160(*args.From), - }, &grpc.EmptyCallOption{}) - if err != nil { - return nil, err - } - if reply.Found { - nonce = reply.Nonce + 1 - } - args.Nonce = (*hexutil.Uint64)(&nonce) - } - to = crypto.CreateAddress(*args.From, uint64(*args.Nonce)) - } - - // Retrieve the precompiles since they don't need to be added to the access list - precompiles := vm.ActivePrecompiles(chainConfig.Rules(blockNumber)) - - // Create an initial tracer - prevTracer := logger.NewAccessListTracer(nil, *args.From, to, precompiles) - if args.AccessList != nil { - prevTracer = logger.NewAccessListTracer(*args.AccessList, *args.From, to, precompiles) - } - for { - state := state.New(stateReader) - // Retrieve the current access list to expand - accessList := prevTracer.AccessList() - log.Trace("Creating access list", "input", accessList) - - // If no gas amount was specified, each unique access list needs it's own - // gas calculation. This is quite expensive, but we need to be accurate - // and it's convered by the sender only anyway. - if nogas { - args.Gas = nil - } - // Set the accesslist to the last al - args.AccessList = &accessList - baseFee, _ := uint256.FromBig(header.BaseFee) - msg, err := args.ToMessage(api.GasCap, baseFee) - if err != nil { - return nil, err - } - - // Apply the transaction with the access list tracer - tracer := logger.NewAccessListTracer(accessList, *args.From, to, precompiles) - config := vm.Config{Tracer: tracer, Debug: true, NoBaseFee: true} - blockCtx, txCtx := transactions.GetEvmContext(msg, header, bNrOrHash.RequireCanonical, tx, contractHasTEVM, api._blockReader) - - evm := vm.NewEVM(blockCtx, txCtx, state, chainConfig, config) - gp := new(core.GasPool).AddGas(msg.Gas()) - res, err := core.ApplyMessage(evm, msg, gp, true /* refunds */, false /* gasBailout */) - if err != nil { - return nil, err - } - if tracer.Equal(prevTracer) { - var errString string - if res.Err != nil { - errString = res.Err.Error() - } - accessList := &accessListResult{Accesslist: &accessList, Error: errString, GasUsed: hexutil.Uint64(res.UsedGas)} - if optimizeGas != nil && *optimizeGas { - optimizeToInAccessList(accessList, to) - } - return accessList, nil - } - prevTracer = tracer - } -} - -// to address is warm already, so we can save by adding it to the access list -// only if we are adding a lot of its storage slots as well -func optimizeToInAccessList(accessList *accessListResult, to common.Address) { - indexToRemove := -1 - - for i := 0; i < len(*accessList.Accesslist); i++ { - entry := (*accessList.Accesslist)[i] - if entry.Address != to { - continue - } - - // https://eips.ethereum.org/EIPS/eip-2930#charging-less-for-accesses-in-the-access-list - accessListSavingPerSlot := params.ColdSloadCostEIP2929 - params.WarmStorageReadCostEIP2929 - params.TxAccessListStorageKeyGas - - numSlots := uint64(len(entry.StorageKeys)) - if numSlots*accessListSavingPerSlot <= params.TxAccessListAddressGas { - indexToRemove = i - } - } - - if indexToRemove >= 0 { - *accessList.Accesslist = removeIndex(*accessList.Accesslist, indexToRemove) - } -} - -func removeIndex(s types.AccessList, index int) types.AccessList { - return append(s[:index], s[index+1:]...) -} diff --git a/cmd/rpcdaemon22/commands/eth_call_test.go b/cmd/rpcdaemon22/commands/eth_call_test.go deleted file mode 100644 index 714b47de951..00000000000 --- a/cmd/rpcdaemon22/commands/eth_call_test.go +++ /dev/null @@ -1,257 +0,0 @@ -package commands - -import ( - "context" - "fmt" - "testing" - - "github.com/ledgerwatch/erigon-lib/kv/kvcache" - "github.com/ledgerwatch/erigon/cmd/rpcdaemon22/rpcdaemontest" - "github.com/ledgerwatch/erigon/common" - "github.com/ledgerwatch/erigon/core/rawdb" - "github.com/ledgerwatch/erigon/internal/ethapi" - "github.com/ledgerwatch/erigon/rpc" - "github.com/ledgerwatch/erigon/turbo/snapshotsync" -) - -func TestEstimateGas(t *testing.T) { - db := rpcdaemontest.CreateTestKV(t) - stateCache := kvcache.New(kvcache.DefaultCoherentConfig) - api := NewEthAPI(NewBaseApi(nil, stateCache, snapshotsync.NewBlockReader(), nil, nil, false), db, nil, nil, nil, 5000000) - var from = common.HexToAddress("0x71562b71999873db5b286df957af199ec94617f7") - var to = common.HexToAddress("0x0d3ab14bbad3d99f4203bd7a11acb94882050e7e") - if _, err := api.EstimateGas(context.Background(), ðapi.CallArgs{ - From: &from, - To: &to, - }, nil); err != nil { - t.Errorf("calling EstimateGas: %v", err) - } -} - -func TestEthCallNonCanonical(t *testing.T) { - db := rpcdaemontest.CreateTestKV(t) - stateCache := kvcache.New(kvcache.DefaultCoherentConfig) - api := NewEthAPI(NewBaseApi(nil, stateCache, snapshotsync.NewBlockReader(), nil, nil, false), db, nil, nil, nil, 5000000) - var from = common.HexToAddress("0x71562b71999873db5b286df957af199ec94617f7") - var to = common.HexToAddress("0x0d3ab14bbad3d99f4203bd7a11acb94882050e7e") - if _, err := api.Call(context.Background(), ethapi.CallArgs{ - From: &from, - To: &to, - }, rpc.BlockNumberOrHashWithHash(common.HexToHash("0x3fcb7c0d4569fddc89cbea54b42f163e0c789351d98810a513895ab44b47020b"), true), nil); err != nil { - if fmt.Sprintf("%v", err) != "hash 3fcb7c0d4569fddc89cbea54b42f163e0c789351d98810a513895ab44b47020b is not currently canonical" { - t.Errorf("wrong error: %v", err) - } - } -} - -func TestGetBlockByTimestampLatestTime(t *testing.T) { - ctx := context.Background() - db := rpcdaemontest.CreateTestKV(t) - - tx, err := db.BeginRo(ctx) - if err != nil { - t.Errorf("fail at beginning tx") - } - defer tx.Rollback() - - stateCache := kvcache.New(kvcache.DefaultCoherentConfig) - api := NewErigonAPI(NewBaseApi(nil, stateCache, snapshotsync.NewBlockReader(), nil, nil, false), db, nil) - - latestBlock := rawdb.ReadCurrentBlock(tx) - response, err := ethapi.RPCMarshalBlock(latestBlock, true, false) - - if err != nil { - t.Error("couldn't get the rpc marshal block") - } - - if err == nil && rpc.BlockNumber(latestBlock.NumberU64()) == rpc.PendingBlockNumber { - // Pending blocks need to nil out a few fields - for _, field := range []string{"hash", "nonce", "miner"} { - response[field] = nil - } - } - - block, err := api.GetBlockByTimestamp(ctx, rpc.Timestamp(latestBlock.Header().Time), false) - if err != nil { - t.Errorf("couldn't retrieve block %v", err) - } - - if block["timestamp"] != response["timestamp"] || block["hash"] != response["hash"] { - t.Errorf("Retrieved the wrong block.\nexpected block hash: %s expected timestamp: %d\nblock hash retrieved: %s timestamp retrieved: %d", response["hash"], response["timestamp"], block["hash"], block["timestamp"]) - } -} - -func TestGetBlockByTimestampOldestTime(t *testing.T) { - ctx := context.Background() - db := rpcdaemontest.CreateTestKV(t) - - tx, err := db.BeginRo(ctx) - if err != nil { - t.Errorf("failed at beginning tx") - } - defer tx.Rollback() - - stateCache := kvcache.New(kvcache.DefaultCoherentConfig) - api := NewErigonAPI(NewBaseApi(nil, stateCache, snapshotsync.NewBlockReader(), nil, nil, false), db, nil) - - oldestBlock, err := rawdb.ReadBlockByNumber(tx, 0) - if err != nil { - t.Error("couldn't retrieve oldest block") - } - - response, err := ethapi.RPCMarshalBlock(oldestBlock, true, false) - - if err != nil { - t.Error("couldn't get the rpc marshal block") - } - - if err == nil && rpc.BlockNumber(oldestBlock.NumberU64()) == rpc.PendingBlockNumber { - // Pending blocks need to nil out a few fields - for _, field := range []string{"hash", "nonce", "miner"} { - response[field] = nil - } - } - - block, err := api.GetBlockByTimestamp(ctx, rpc.Timestamp(oldestBlock.Header().Time), false) - if err != nil { - t.Errorf("couldn't retrieve block %v", err) - } - - if block["timestamp"] != response["timestamp"] || block["hash"] != response["hash"] { - t.Errorf("Retrieved the wrong block.\nexpected block hash: %s expected timestamp: %d\nblock hash retrieved: %s timestamp retrieved: %d", response["hash"], response["timestamp"], block["hash"], block["timestamp"]) - } -} - -func TestGetBlockByTimeHigherThanLatestBlock(t *testing.T) { - ctx := context.Background() - db := rpcdaemontest.CreateTestKV(t) - - tx, err := db.BeginRo(ctx) - if err != nil { - t.Errorf("fail at beginning tx") - } - defer tx.Rollback() - - stateCache := kvcache.New(kvcache.DefaultCoherentConfig) - api := NewErigonAPI(NewBaseApi(nil, stateCache, snapshotsync.NewBlockReader(), nil, nil, false), db, nil) - - latestBlock := rawdb.ReadCurrentBlock(tx) - - response, err := ethapi.RPCMarshalBlock(latestBlock, true, false) - - if err != nil { - t.Error("couldn't get the rpc marshal block") - } - - if err == nil && rpc.BlockNumber(latestBlock.NumberU64()) == rpc.PendingBlockNumber { - // Pending blocks need to nil out a few fields - for _, field := range []string{"hash", "nonce", "miner"} { - response[field] = nil - } - } - - block, err := api.GetBlockByTimestamp(ctx, rpc.Timestamp(latestBlock.Header().Time+999999999999), false) - if err != nil { - t.Errorf("couldn't retrieve block %v", err) - } - - if block["timestamp"] != response["timestamp"] || block["hash"] != response["hash"] { - t.Errorf("Retrieved the wrong block.\nexpected block hash: %s expected timestamp: %d\nblock hash retrieved: %s timestamp retrieved: %d", response["hash"], response["timestamp"], block["hash"], block["timestamp"]) - } -} - -func TestGetBlockByTimeMiddle(t *testing.T) { - ctx := context.Background() - db := rpcdaemontest.CreateTestKV(t) - - tx, err := db.BeginRo(ctx) - if err != nil { - t.Errorf("fail at beginning tx") - } - defer tx.Rollback() - - stateCache := kvcache.New(kvcache.DefaultCoherentConfig) - api := NewErigonAPI(NewBaseApi(nil, stateCache, snapshotsync.NewBlockReader(), nil, nil, false), db, nil) - - currentHeader := rawdb.ReadCurrentHeader(tx) - oldestHeader, err := api._blockReader.HeaderByNumber(ctx, tx, 0) - if err != nil { - t.Errorf("error getting the oldest header %s", err) - } - if oldestHeader == nil { - t.Error("couldn't find oldest header") - } - - middleNumber := (currentHeader.Number.Uint64() + oldestHeader.Number.Uint64()) / 2 - middleBlock, err := rawdb.ReadBlockByNumber(tx, middleNumber) - if err != nil { - t.Error("couldn't retrieve middle block") - } - - response, err := ethapi.RPCMarshalBlock(middleBlock, true, false) - - if err != nil { - t.Error("couldn't get the rpc marshal block") - } - - if err == nil && rpc.BlockNumber(middleBlock.NumberU64()) == rpc.PendingBlockNumber { - // Pending blocks need to nil out a few fields - for _, field := range []string{"hash", "nonce", "miner"} { - response[field] = nil - } - } - - block, err := api.GetBlockByTimestamp(ctx, rpc.Timestamp(middleBlock.Header().Time), false) - if err != nil { - t.Errorf("couldn't retrieve block %v", err) - } - - if block["timestamp"] != response["timestamp"] || block["hash"] != response["hash"] { - t.Errorf("Retrieved the wrong block.\nexpected block hash: %s expected timestamp: %d\nblock hash retrieved: %s timestamp retrieved: %d", response["hash"], response["timestamp"], block["hash"], block["timestamp"]) - } -} - -func TestGetBlockByTimestamp(t *testing.T) { - ctx := context.Background() - db := rpcdaemontest.CreateTestKV(t) - - tx, err := db.BeginRo(ctx) - if err != nil { - t.Errorf("fail at beginning tx") - } - defer tx.Rollback() - - stateCache := kvcache.New(kvcache.DefaultCoherentConfig) - api := NewErigonAPI(NewBaseApi(nil, stateCache, snapshotsync.NewBlockReader(), nil, nil, false), db, nil) - - highestBlockNumber := rawdb.ReadCurrentHeader(tx).Number - pickedBlock, err := rawdb.ReadBlockByNumber(tx, highestBlockNumber.Uint64()/3) - if err != nil { - t.Errorf("couldn't get block %v", pickedBlock.Number()) - } - - if pickedBlock == nil { - t.Error("couldn't retrieve picked block") - } - response, err := ethapi.RPCMarshalBlock(pickedBlock, true, false) - - if err != nil { - t.Error("couldn't get the rpc marshal block") - } - - if err == nil && rpc.BlockNumber(pickedBlock.NumberU64()) == rpc.PendingBlockNumber { - // Pending blocks need to nil out a few fields - for _, field := range []string{"hash", "nonce", "miner"} { - response[field] = nil - } - } - - block, err := api.GetBlockByTimestamp(ctx, rpc.Timestamp(pickedBlock.Header().Time), false) - if err != nil { - t.Errorf("couldn't retrieve block %v", err) - } - - if block["timestamp"] != response["timestamp"] || block["hash"] != response["hash"] { - t.Errorf("Retrieved the wrong block.\nexpected block hash: %s expected timestamp: %d\nblock hash retrieved: %s timestamp retrieved: %d", response["hash"], response["timestamp"], block["hash"], block["timestamp"]) - } -} diff --git a/cmd/rpcdaemon22/commands/eth_deprecated.go b/cmd/rpcdaemon22/commands/eth_deprecated.go deleted file mode 100644 index 46d918f641f..00000000000 --- a/cmd/rpcdaemon22/commands/eth_deprecated.go +++ /dev/null @@ -1,26 +0,0 @@ -package commands - -import ( - "context" - "fmt" - - "github.com/ledgerwatch/erigon/common" - "github.com/ledgerwatch/erigon/common/hexutil" -) - -// Accounts implements eth_accounts. Returns a list of addresses owned by the client. -// Deprecated: This function will be removed in the future. -func (api *APIImpl) Accounts(ctx context.Context) ([]common.Address, error) { - return []common.Address{}, fmt.Errorf(NotAvailableDeprecated, "eth_accounts") -} - -// Sign implements eth_sign. Calculates an Ethereum specific signature with: sign(keccak256('\\x19Ethereum Signed Message:\\n' + len(message) + message))). -// Deprecated: This function will be removed in the future. -func (api *APIImpl) Sign(ctx context.Context, _ common.Address, _ hexutil.Bytes) (hexutil.Bytes, error) { - return hexutil.Bytes(""), fmt.Errorf(NotAvailableDeprecated, "eth_sign") -} - -// SignTransaction deprecated -func (api *APIImpl) SignTransaction(_ context.Context, txObject interface{}) (common.Hash, error) { - return common.Hash{0}, fmt.Errorf(NotAvailableDeprecated, "eth_signTransaction") -} diff --git a/cmd/rpcdaemon22/commands/eth_filters.go b/cmd/rpcdaemon22/commands/eth_filters.go deleted file mode 100644 index b604880b2d5..00000000000 --- a/cmd/rpcdaemon22/commands/eth_filters.go +++ /dev/null @@ -1,262 +0,0 @@ -package commands - -import ( - "context" - - "github.com/ledgerwatch/erigon/common/debug" - "github.com/ledgerwatch/erigon/common/hexutil" - "github.com/ledgerwatch/erigon/core/types" - "github.com/ledgerwatch/erigon/eth/filters" - "github.com/ledgerwatch/erigon/rpc" - "github.com/ledgerwatch/erigon/turbo/rpchelper" - "github.com/ledgerwatch/log/v3" -) - -// NewPendingTransactionFilter new transaction filter -func (api *APIImpl) NewPendingTransactionFilter(_ context.Context) (string, error) { - if api.filters == nil { - return "", rpc.ErrNotificationsUnsupported - } - txsCh := make(chan []types.Transaction, 1) - id := api.filters.SubscribePendingTxs(txsCh) - go func() { - for { - select { - case txs, ok := <-txsCh: - if !ok { - return - } - api.filters.AddPendingTxs(id, txs) - } - } - }() - return "0x" + string(id), nil -} - -// NewBlockFilter implements eth_newBlockFilter. Creates a filter in the node, to notify when a new block arrives. -func (api *APIImpl) NewBlockFilter(_ context.Context) (string, error) { - if api.filters == nil { - return "", rpc.ErrNotificationsUnsupported - } - ch := make(chan *types.Header, 1) - id := api.filters.SubscribeNewHeads(ch) - go func() { - for { - select { - case block, ok := <-ch: - if !ok { - return - } - api.filters.AddPendingBlock(id, block) - } - } - }() - return "0x" + string(id), nil -} - -// NewFilter implements eth_newFilter. Creates an arbitrary filter object, based on filter options, to notify when the state changes (logs). -func (api *APIImpl) NewFilter(_ context.Context, crit filters.FilterCriteria) (string, error) { - if api.filters == nil { - return "", rpc.ErrNotificationsUnsupported - } - logs := make(chan *types.Log, 1) - id := api.filters.SubscribeLogs(logs, crit) - go func() { - for { - select { - case lg, ok := <-logs: - if !ok { - return - } - api.filters.AddLogs(id, lg) - } - } - }() - return hexutil.EncodeUint64(uint64(id)), nil -} - -// UninstallFilter new transaction filter -func (api *APIImpl) UninstallFilter(_ context.Context, index string) (bool, error) { - if api.filters == nil { - return false, rpc.ErrNotificationsUnsupported - } - var isDeleted bool - // remove 0x - cutIndex := index - if len(index) >= 2 && index[0] == '0' && (index[1] == 'x' || index[1] == 'X') { - cutIndex = index[2:] - } - isDeleted = api.filters.UnsubscribeHeads(rpchelper.HeadsSubID(cutIndex)) || - api.filters.UnsubscribePendingTxs(rpchelper.PendingTxsSubID(cutIndex)) - id, err := hexutil.DecodeUint64(index) - if err == nil { - return isDeleted || api.filters.UnsubscribeLogs(rpchelper.LogsSubID(id)), nil - } - - return isDeleted, nil -} - -// GetFilterChanges implements eth_getFilterChanges. Polling method for a previously-created filter, which returns an array of logs which occurred since last poll. -func (api *APIImpl) GetFilterChanges(_ context.Context, index string) ([]interface{}, error) { - if api.filters == nil { - return nil, rpc.ErrNotificationsUnsupported - } - stub := make([]interface{}, 0) - - // remove 0x - cutIndex := index - if len(index) >= 2 && index[0] == '0' && (index[1] == 'x' || index[1] == 'X') { - cutIndex = index[2:] - } - if blocks, ok := api.filters.ReadPendingBlocks(rpchelper.HeadsSubID(cutIndex)); ok { - for _, v := range blocks { - stub = append(stub, v.Hash()) - } - return stub, nil - } - if txs, ok := api.filters.ReadPendingTxs(rpchelper.PendingTxsSubID(cutIndex)); ok { - for _, v := range txs { - for _, tx := range v { - stub = append(stub, tx.Hash()) - } - return stub, nil - } - return stub, nil - } - id, err := hexutil.DecodeUint64(index) - if err != nil { - return stub, nil - } - if logs, ok := api.filters.ReadLogs(rpchelper.LogsSubID(id)); ok { - for _, v := range logs { - stub = append(stub, v) - } - return stub, nil - } - return stub, nil -} - -// NewHeads send a notification each time a new (header) block is appended to the chain. -func (api *APIImpl) NewHeads(ctx context.Context) (*rpc.Subscription, error) { - if api.filters == nil { - return &rpc.Subscription{}, rpc.ErrNotificationsUnsupported - } - notifier, supported := rpc.NotifierFromContext(ctx) - if !supported { - return &rpc.Subscription{}, rpc.ErrNotificationsUnsupported - } - - rpcSub := notifier.CreateSubscription() - - go func() { - defer debug.LogPanic() - headers := make(chan *types.Header, 1) - - id := api.filters.SubscribeNewHeads(headers) - defer api.filters.UnsubscribeHeads(id) - - for { - select { - case h, ok := <-headers: - if h != nil { - err := notifier.Notify(rpcSub.ID, h) - if err != nil { - log.Warn("error while notifying subscription", "err", err) - return - } - } - if !ok { - log.Warn("new heads channel was closed") - return - } - case <-rpcSub.Err(): - return - } - } - }() - - return rpcSub, nil -} - -// NewPendingTransactions send a notification each time a new (header) block is appended to the chain. -func (api *APIImpl) NewPendingTransactions(ctx context.Context) (*rpc.Subscription, error) { - if api.filters == nil { - return &rpc.Subscription{}, rpc.ErrNotificationsUnsupported - } - notifier, supported := rpc.NotifierFromContext(ctx) - if !supported { - return &rpc.Subscription{}, rpc.ErrNotificationsUnsupported - } - - rpcSub := notifier.CreateSubscription() - - go func() { - defer debug.LogPanic() - txsCh := make(chan []types.Transaction, 1) - id := api.filters.SubscribePendingTxs(txsCh) - defer api.filters.UnsubscribePendingTxs(id) - - for { - select { - case txs, ok := <-txsCh: - for _, t := range txs { - if t != nil { - err := notifier.Notify(rpcSub.ID, t.Hash()) - if err != nil { - log.Warn("error while notifying subscription", "err", err) - return - } - } - } - if !ok { - log.Warn("new pending transactions channel was closed") - return - } - case <-rpcSub.Err(): - return - } - } - }() - - return rpcSub, nil -} - -// Logs send a notification each time a new log appears. -func (api *APIImpl) Logs(ctx context.Context, crit filters.FilterCriteria) (*rpc.Subscription, error) { - if api.filters == nil { - return &rpc.Subscription{}, rpc.ErrNotificationsUnsupported - } - notifier, supported := rpc.NotifierFromContext(ctx) - if !supported { - return &rpc.Subscription{}, rpc.ErrNotificationsUnsupported - } - - rpcSub := notifier.CreateSubscription() - - go func() { - defer debug.LogPanic() - logs := make(chan *types.Log, 1) - id := api.filters.SubscribeLogs(logs, crit) - defer api.filters.UnsubscribeLogs(id) - for { - select { - case h, ok := <-logs: - if h != nil { - err := notifier.Notify(rpcSub.ID, h) - if err != nil { - log.Warn("error while notifying subscription", "err", err) - return - } - } - if !ok { - log.Warn("log channel was closed") - return - } - case <-rpcSub.Err(): - return - } - } - }() - - return rpcSub, nil -} diff --git a/cmd/rpcdaemon22/commands/eth_filters_test.go b/cmd/rpcdaemon22/commands/eth_filters_test.go deleted file mode 100644 index 329fd4ac2c9..00000000000 --- a/cmd/rpcdaemon22/commands/eth_filters_test.go +++ /dev/null @@ -1,45 +0,0 @@ -package commands - -import ( - "testing" - - "github.com/ledgerwatch/erigon-lib/gointerfaces/txpool" - "github.com/ledgerwatch/erigon-lib/kv/kvcache" - "github.com/ledgerwatch/erigon/cmd/rpcdaemon22/rpcdaemontest" - "github.com/ledgerwatch/erigon/eth/filters" - "github.com/ledgerwatch/erigon/turbo/rpchelper" - "github.com/ledgerwatch/erigon/turbo/snapshotsync" - "github.com/ledgerwatch/erigon/turbo/stages" - "github.com/stretchr/testify/assert" -) - -func TestNewFilters(t *testing.T) { - assert := assert.New(t) - db := rpcdaemontest.CreateTestKV(t) - stateCache := kvcache.New(kvcache.DefaultCoherentConfig) - ctx, conn := rpcdaemontest.CreateTestGrpcConn(t, stages.Mock(t)) - mining := txpool.NewMiningClient(conn) - ff := rpchelper.New(ctx, nil, nil, mining, func() {}) - api := NewEthAPI(NewBaseApi(ff, stateCache, snapshotsync.NewBlockReader(), nil, nil, false), db, nil, nil, nil, 5000000) - - ptf, err := api.NewPendingTransactionFilter(ctx) - assert.Nil(err) - - nf, err := api.NewFilter(ctx, filters.FilterCriteria{}) - assert.Nil(err) - - bf, err := api.NewBlockFilter(ctx) - assert.Nil(err) - - ok, err := api.UninstallFilter(ctx, nf) - assert.Nil(err) - assert.Equal(ok, true) - - ok, err = api.UninstallFilter(ctx, bf) - assert.Nil(err) - assert.Equal(ok, true) - - ok, err = api.UninstallFilter(ctx, ptf) - assert.Nil(err) - assert.Equal(ok, true) -} diff --git a/cmd/rpcdaemon22/commands/eth_ming_test.go b/cmd/rpcdaemon22/commands/eth_ming_test.go deleted file mode 100644 index a3d39e9e101..00000000000 --- a/cmd/rpcdaemon22/commands/eth_ming_test.go +++ /dev/null @@ -1,64 +0,0 @@ -package commands - -import ( - "math/big" - "testing" - "time" - - "github.com/ledgerwatch/erigon-lib/gointerfaces/txpool" - "github.com/ledgerwatch/erigon-lib/kv/kvcache" - "github.com/ledgerwatch/erigon/cmd/rpcdaemon22/rpcdaemontest" - "github.com/ledgerwatch/erigon/core/types" - "github.com/ledgerwatch/erigon/rlp" - "github.com/ledgerwatch/erigon/turbo/rpchelper" - "github.com/ledgerwatch/erigon/turbo/snapshotsync" - "github.com/ledgerwatch/erigon/turbo/stages" - "github.com/stretchr/testify/require" -) - -func TestPendingBlock(t *testing.T) { - ctx, conn := rpcdaemontest.CreateTestGrpcConn(t, stages.Mock(t)) - mining := txpool.NewMiningClient(conn) - ff := rpchelper.New(ctx, nil, nil, mining, func() {}) - stateCache := kvcache.New(kvcache.DefaultCoherentConfig) - api := NewEthAPI(NewBaseApi(ff, stateCache, snapshotsync.NewBlockReader(), nil, nil, false), nil, nil, nil, mining, 5000000) - expect := uint64(12345) - b, err := rlp.EncodeToBytes(types.NewBlockWithHeader(&types.Header{Number: big.NewInt(int64(expect))})) - require.NoError(t, err) - ch := make(chan *types.Block, 1) - id := ff.SubscribePendingBlock(ch) - defer ff.UnsubscribePendingBlock(id) - - ff.HandlePendingBlock(&txpool.OnPendingBlockReply{RplBlock: b}) - block := api.pendingBlock() - - require.Equal(t, block.NumberU64(), expect) - select { - case got := <-ch: - require.Equal(t, expect, got.NumberU64()) - case <-time.After(100 * time.Millisecond): - t.Fatalf("timeout waiting for expected notification") - } -} - -func TestPendingLogs(t *testing.T) { - ctx, conn := rpcdaemontest.CreateTestGrpcConn(t, stages.Mock(t)) - mining := txpool.NewMiningClient(conn) - ff := rpchelper.New(ctx, nil, nil, mining, func() {}) - expect := []byte{211} - - ch := make(chan types.Logs, 1) - defer close(ch) - id := ff.SubscribePendingLogs(ch) - defer ff.UnsubscribePendingLogs(id) - - b, err := rlp.EncodeToBytes([]*types.Log{{Data: expect}}) - require.NoError(t, err) - ff.HandlePendingLogs(&txpool.OnPendingLogsReply{RplLogs: b}) - select { - case logs := <-ch: - require.Equal(t, expect, logs[0].Data) - case <-time.After(100 * time.Millisecond): - t.Fatalf("timeout waiting for expected notification") - } -} diff --git a/cmd/rpcdaemon22/commands/eth_mining.go b/cmd/rpcdaemon22/commands/eth_mining.go deleted file mode 100644 index 9f4cf4982e9..00000000000 --- a/cmd/rpcdaemon22/commands/eth_mining.go +++ /dev/null @@ -1,94 +0,0 @@ -package commands - -import ( - "context" - "errors" - - "github.com/ledgerwatch/erigon-lib/gointerfaces/txpool" - "github.com/ledgerwatch/erigon/common" - "github.com/ledgerwatch/erigon/common/hexutil" - "github.com/ledgerwatch/erigon/core/types" - "google.golang.org/grpc/status" -) - -// Coinbase implements eth_coinbase. Returns the current client coinbase address. -func (api *APIImpl) Coinbase(ctx context.Context) (common.Address, error) { - return api.ethBackend.Etherbase(ctx) -} - -// Hashrate implements eth_hashrate. Returns the number of hashes per second that the node is mining with. -func (api *APIImpl) Hashrate(ctx context.Context) (uint64, error) { - repl, err := api.mining.HashRate(ctx, &txpool.HashRateRequest{}) - if err != nil { - if s, ok := status.FromError(err); ok { - return 0, errors.New(s.Message()) - } - return 0, err - } - return repl.HashRate, err -} - -// Mining returns an indication if this node is currently mining. -func (api *APIImpl) Mining(ctx context.Context) (bool, error) { - repl, err := api.mining.Mining(ctx, &txpool.MiningRequest{}) - if err != nil { - if s, ok := status.FromError(err); ok { - return false, errors.New(s.Message()) - } - return false, err - } - return repl.Enabled && repl.Running, err -} - -// GetWork returns a work package for external miner. -// -// The work package consists of 3 strings: -// result[0] - 32 bytes hex encoded current block header pow-hash -// result[1] - 32 bytes hex encoded seed hash used for DAG -// result[2] - 32 bytes hex encoded boundary condition ("target"), 2^256/difficulty -// result[3] - hex encoded block number -func (api *APIImpl) GetWork(ctx context.Context) ([4]string, error) { - var res [4]string - repl, err := api.mining.GetWork(ctx, &txpool.GetWorkRequest{}) - if err != nil { - if s, ok := status.FromError(err); ok { - return res, errors.New(s.Message()) - } - return res, err - } - res[0] = repl.HeaderHash - res[1] = repl.SeedHash - res[2] = repl.Target - res[3] = repl.BlockNumber - return res, nil -} - -// SubmitWork can be used by external miner to submit their POW solution. -// It returns an indication if the work was accepted. -// Note either an invalid solution, a stale work a non-existent work will return false. -func (api *APIImpl) SubmitWork(ctx context.Context, nonce types.BlockNonce, powHash, digest common.Hash) (bool, error) { - repl, err := api.mining.SubmitWork(ctx, &txpool.SubmitWorkRequest{BlockNonce: nonce[:], PowHash: powHash.Bytes(), Digest: digest.Bytes()}) - if err != nil { - if s, ok := status.FromError(err); ok { - return false, errors.New(s.Message()) - } - return false, err - } - return repl.Ok, nil -} - -// SubmitHashrate can be used for remote miners to submit their hash rate. -// This enables the node to report the combined hash rate of all miners -// which submit work through this node. -// -// It accepts the miner hash rate and an identifier which must be unique -func (api *APIImpl) SubmitHashrate(ctx context.Context, hashRate hexutil.Uint64, id common.Hash) (bool, error) { - repl, err := api.mining.SubmitHashRate(ctx, &txpool.SubmitHashRateRequest{Rate: uint64(hashRate), Id: id.Bytes()}) - if err != nil { - if s, ok := status.FromError(err); ok { - return false, errors.New(s.Message()) - } - return false, err - } - return repl.Ok, nil -} diff --git a/cmd/rpcdaemon22/commands/eth_receipts.go b/cmd/rpcdaemon22/commands/eth_receipts.go deleted file mode 100644 index 0dd4d4d6efa..00000000000 --- a/cmd/rpcdaemon22/commands/eth_receipts.go +++ /dev/null @@ -1,455 +0,0 @@ -package commands - -import ( - "context" - "fmt" - "math/big" - "sort" - "time" - - "github.com/holiman/uint256" - "github.com/ledgerwatch/erigon-lib/kv" - libstate "github.com/ledgerwatch/erigon-lib/state" - "github.com/ledgerwatch/log/v3" - - "github.com/RoaringBitmap/roaring/roaring64" - "github.com/ledgerwatch/erigon/common" - "github.com/ledgerwatch/erigon/common/hexutil" - "github.com/ledgerwatch/erigon/consensus/ethash" - "github.com/ledgerwatch/erigon/core" - "github.com/ledgerwatch/erigon/core/rawdb" - "github.com/ledgerwatch/erigon/core/state" - "github.com/ledgerwatch/erigon/core/types" - "github.com/ledgerwatch/erigon/core/vm" - "github.com/ledgerwatch/erigon/eth/filters" - "github.com/ledgerwatch/erigon/ethdb" - "github.com/ledgerwatch/erigon/params" - "github.com/ledgerwatch/erigon/rpc" - "github.com/ledgerwatch/erigon/turbo/transactions" -) - -func (api *BaseAPI) getReceipts(ctx context.Context, tx kv.Tx, chainConfig *params.ChainConfig, block *types.Block, senders []common.Address) (types.Receipts, error) { - if cached := rawdb.ReadReceipts(tx, block, senders); cached != nil { - return cached, nil - } - - getHeader := func(hash common.Hash, number uint64) *types.Header { - h, e := api._blockReader.Header(ctx, tx, hash, number) - if e != nil { - log.Error("getHeader error", "number", number, "hash", hash, "err", e) - } - return h - } - contractHasTEVM := ethdb.GetHasTEVM(tx) - _, _, _, ibs, _, err := transactions.ComputeTxEnv(ctx, block, chainConfig, getHeader, contractHasTEVM, ethash.NewFaker(), tx, block.Hash(), 0) - if err != nil { - return nil, err - } - - usedGas := new(uint64) - gp := new(core.GasPool).AddGas(block.GasLimit()) - - ethashFaker := ethash.NewFaker() - noopWriter := state.NewNoopWriter() - - receipts := make(types.Receipts, len(block.Transactions())) - - for i, txn := range block.Transactions() { - ibs.Prepare(txn.Hash(), block.Hash(), i) - header := block.Header() - receipt, _, err := core.ApplyTransaction(chainConfig, core.GetHashFn(header, getHeader), ethashFaker, nil, gp, ibs, noopWriter, header, txn, usedGas, vm.Config{}, contractHasTEVM) - if err != nil { - return nil, err - } - receipt.BlockHash = block.Hash() - receipts[i] = receipt - } - - return receipts, nil -} - -// GetLogs implements eth_getLogs. Returns an array of logs matching a given filter object. -func (api *APIImpl) GetLogs(ctx context.Context, crit filters.FilterCriteria) ([]*types.Log, error) { - start := time.Now() - var begin, end uint64 - logs := []*types.Log{} - - tx, beginErr := api.db.BeginRo(ctx) - if beginErr != nil { - return logs, beginErr - } - defer tx.Rollback() - - if crit.BlockHash != nil { - number := rawdb.ReadHeaderNumber(tx, *crit.BlockHash) - if number == nil { - return nil, fmt.Errorf("block not found: %x", *crit.BlockHash) - } - begin = *number - end = *number - } else { - // Convert the RPC block numbers into internal representations - latest, err := getLatestBlockNumber(tx) - if err != nil { - return nil, err - } - - begin = latest - if crit.FromBlock != nil { - if crit.FromBlock.Sign() >= 0 { - begin = crit.FromBlock.Uint64() - } else if !crit.FromBlock.IsInt64() || crit.FromBlock.Int64() != int64(rpc.LatestBlockNumber) { - return nil, fmt.Errorf("negative value for FromBlock: %v", crit.FromBlock) - } - } - end = latest - if crit.ToBlock != nil { - if crit.ToBlock.Sign() >= 0 { - end = crit.ToBlock.Uint64() - } else if !crit.ToBlock.IsInt64() || crit.ToBlock.Int64() != int64(rpc.LatestBlockNumber) { - return nil, fmt.Errorf("negative value for ToBlock: %v", crit.ToBlock) - } - } - } - if end < begin { - return nil, fmt.Errorf("end (%d) < begin (%d)", end, begin) - } - chainConfig, err := api.chainConfig(tx) - if err != nil { - return nil, err - } - - var fromTxNum, toTxNum uint64 - if begin > 0 { - fromTxNum = api._txNums[begin-1] - } - toTxNum = api._txNums[end] // end is an inclusive bound - - txNumbers := roaring64.New() - txNumbers.AddRange(fromTxNum, toTxNum) // [min,max) - - ac := api._agg.MakeContext() - - topicsBitmap, err := getTopicsBitmap(ac, tx, crit.Topics, fromTxNum, toTxNum) - if err != nil { - return nil, err - } - if topicsBitmap != nil { - txNumbers.And(topicsBitmap) - } - - var addrBitmap *roaring64.Bitmap - for _, addr := range crit.Addresses { - var bitmapForORing roaring64.Bitmap - it := ac.LogAddrIterator(addr.Bytes(), fromTxNum, toTxNum, nil) - for it.HasNext() { - bitmapForORing.Add(it.Next()) - } - if addrBitmap == nil { - addrBitmap = &bitmapForORing - continue - } - addrBitmap = roaring64.Or(addrBitmap, &bitmapForORing) - } - - if addrBitmap != nil { - txNumbers.And(addrBitmap) - } - - if txNumbers.GetCardinality() == 0 { - return logs, nil - } - var lastBlockNum uint64 - var lastBlockHash common.Hash - var lastHeader *types.Header - var lastSigner *types.Signer - var lastRules *params.Rules - stateReader := state.NewHistoryReader22(ac, nil /* ReadIndices */) - iter := txNumbers.Iterator() - for iter.HasNext() { - txNum := iter.Next() - // Find block number - blockNum := uint64(sort.Search(len(api._txNums), func(i int) bool { - return api._txNums[i] > txNum - })) - if blockNum > lastBlockNum { - if lastHeader, err = api._blockReader.HeaderByNumber(ctx, nil, blockNum); err != nil { - return nil, err - } - lastBlockNum = blockNum - lastBlockHash = lastHeader.Hash() - lastSigner = types.MakeSigner(chainConfig, blockNum) - lastRules = chainConfig.Rules(blockNum) - } - var startTxNum uint64 - if blockNum > 0 { - startTxNum = api._txNums[blockNum-1] - } - txIndex := txNum - startTxNum - 1 - //fmt.Printf("txNum=%d, blockNum=%d, txIndex=%d\n", txNum, blockNum, txIndex) - txn, err := api._txnReader.TxnByIdxInBlock(ctx, nil, blockNum, int(txIndex)) - if err != nil { - return nil, err - } - txHash := txn.Hash() - msg, err := txn.AsMessage(*lastSigner, lastHeader.BaseFee, lastRules) - if err != nil { - return nil, err - } - contractHasTEVM := func(contractHash common.Hash) (bool, error) { return false, nil } - blockCtx, txCtx := transactions.GetEvmContext(msg, lastHeader, true /* requireCanonical */, tx, contractHasTEVM, api._blockReader) - stateReader.SetTxNum(txNum) - vmConfig := vm.Config{} - vmConfig.SkipAnalysis = core.SkipAnalysis(chainConfig, blockNum) - ibs := state.New(stateReader) - evm := vm.NewEVM(blockCtx, txCtx, ibs, chainConfig, vmConfig) - - gp := new(core.GasPool).AddGas(msg.Gas()) - ibs.Prepare(txHash, lastBlockHash, int(txIndex)) - _, err = core.ApplyMessage(evm, msg, gp, true /* refunds */, false /* gasBailout */) - if err != nil { - return nil, err - } - filtered := filterLogs(ibs.GetLogs(txHash), crit.Addresses, crit.Topics) - for _, log := range filtered { - log.BlockNumber = blockNum - log.BlockHash = lastBlockHash - log.TxHash = txHash - log.Index = 0 - } - logs = append(logs, filtered...) - } - stats := api._agg.GetAndResetStats() - log.Info("Finished", "duration", time.Since(start), "history queries", stats.HistoryQueries, "ef search duration", stats.EfSearchTime) - return logs, nil -} - -// The Topic list restricts matches to particular event topics. Each event has a list -// of topics. Topics matches a prefix of that list. An empty element slice matches any -// topic. Non-empty elements represent an alternative that matches any of the -// contained topics. -// -// Examples: -// {} or nil matches any topic list -// {{A}} matches topic A in first position -// {{}, {B}} matches any topic in first position AND B in second position -// {{A}, {B}} matches topic A in first position AND B in second position -// {{A, B}, {C, D}} matches topic (A OR B) in first position AND (C OR D) in second position -func getTopicsBitmap(ac *libstate.AggregatorContext, c kv.Tx, topics [][]common.Hash, from, to uint64) (*roaring64.Bitmap, error) { - var result *roaring64.Bitmap - for _, sub := range topics { - var bitmapForORing roaring64.Bitmap - for _, topic := range sub { - it := ac.LogTopicIterator(topic.Bytes(), from, to, nil) - for it.HasNext() { - bitmapForORing.Add(it.Next()) - } - } - - if bitmapForORing.GetCardinality() == 0 { - continue - } - if result == nil { - result = &bitmapForORing - continue - } - result = roaring64.And(&bitmapForORing, result) - } - return result, nil -} - -// GetTransactionReceipt implements eth_getTransactionReceipt. Returns the receipt of a transaction given the transaction's hash. -func (api *APIImpl) GetTransactionReceipt(ctx context.Context, hash common.Hash) (map[string]interface{}, error) { - tx, err := api.db.BeginRo(ctx) - if err != nil { - return nil, err - } - defer tx.Rollback() - - var blockNum uint64 - var ok bool - - blockNum, ok, err = api.txnLookup(ctx, tx, hash) - if !ok || blockNum == 0 { - // It is not an ideal solution (ideal solution requires extending TxnLookupReply proto type to include bool flag indicating absense of result), - // but 0 block number is used here to mean that the transaction is not found - return nil, nil - } - if err != nil { - return nil, err - } - - block, err := api.blockByNumberWithSenders(tx, blockNum) - if err != nil { - return nil, err - } - if block == nil { - return nil, nil // not error, see https://github.com/ledgerwatch/erigon/issues/1645 - } - - cc, err := api.chainConfig(tx) - if err != nil { - return nil, err - } - - var txnIndex uint64 - var txn types.Transaction - for idx, transaction := range block.Transactions() { - if transaction.Hash() == hash { - txn = transaction - txnIndex = uint64(idx) - break - } - } - - if txn == nil { - if cc.Bor == nil { - return nil, nil - } - - borTx, blockHash, _, _, err := rawdb.ReadBorTransactionForBlockNumber(tx, blockNum) - if err != nil { - return nil, err - } - if borTx == nil { - return nil, nil - } - borReceipt := rawdb.ReadBorReceipt(tx, blockHash, blockNum) - return marshalReceipt(borReceipt, borTx, cc, block, hash), nil - } - - receipts, err := api.getReceipts(ctx, tx, cc, block, block.Body().SendersFromTxs()) - if err != nil { - return nil, fmt.Errorf("getReceipts error: %w", err) - } - if len(receipts) <= int(txnIndex) { - return nil, fmt.Errorf("block has less receipts than expected: %d <= %d, block: %d", len(receipts), int(txnIndex), blockNum) - } - return marshalReceipt(receipts[txnIndex], block.Transactions()[txnIndex], cc, block, hash), nil -} - -// GetBlockReceipts - receipts for individual block -// func (api *APIImpl) GetBlockReceipts(ctx context.Context, number rpc.BlockNumber) ([]map[string]interface{}, error) { -func (api *APIImpl) GetBlockReceipts(ctx context.Context, number rpc.BlockNumber) ([]map[string]interface{}, error) { - tx, err := api.db.BeginRo(ctx) - if err != nil { - return nil, err - } - defer tx.Rollback() - - blockNum, err := getBlockNumber(number, tx) - if err != nil { - return nil, err - } - block, err := api.blockByNumberWithSenders(tx, blockNum) - if err != nil { - return nil, err - } - if block == nil { - return nil, nil - } - chainConfig, err := api.chainConfig(tx) - if err != nil { - return nil, err - } - receipts, err := api.getReceipts(ctx, tx, chainConfig, block, block.Body().SendersFromTxs()) - if err != nil { - return nil, fmt.Errorf("getReceipts error: %w", err) - } - result := make([]map[string]interface{}, 0, len(receipts)) - for _, receipt := range receipts { - txn := block.Transactions()[receipt.TransactionIndex] - result = append(result, marshalReceipt(receipt, txn, chainConfig, block, txn.Hash())) - } - - return result, nil -} - -func marshalReceipt(receipt *types.Receipt, txn types.Transaction, chainConfig *params.ChainConfig, block *types.Block, hash common.Hash) map[string]interface{} { - var chainId *big.Int - switch t := txn.(type) { - case *types.LegacyTx: - if t.Protected() { - chainId = types.DeriveChainId(&t.V).ToBig() - } - case *types.AccessListTx: - chainId = t.ChainID.ToBig() - case *types.DynamicFeeTransaction: - chainId = t.ChainID.ToBig() - } - signer := types.LatestSignerForChainID(chainId) - from, _ := txn.Sender(*signer) - - fields := map[string]interface{}{ - "blockHash": receipt.BlockHash, - "blockNumber": hexutil.Uint64(receipt.BlockNumber.Uint64()), - "transactionHash": hash, - "transactionIndex": hexutil.Uint64(receipt.TransactionIndex), - "from": from, - "to": txn.GetTo(), - "type": hexutil.Uint(txn.Type()), - "gasUsed": hexutil.Uint64(receipt.GasUsed), - "cumulativeGasUsed": hexutil.Uint64(receipt.CumulativeGasUsed), - "contractAddress": nil, - "logs": receipt.Logs, - "logsBloom": types.CreateBloom(types.Receipts{receipt}), - } - - if !chainConfig.IsLondon(block.NumberU64()) { - fields["effectiveGasPrice"] = hexutil.Uint64(txn.GetPrice().Uint64()) - } else { - baseFee, _ := uint256.FromBig(block.BaseFee()) - gasPrice := new(big.Int).Add(block.BaseFee(), txn.GetEffectiveGasTip(baseFee).ToBig()) - fields["effectiveGasPrice"] = hexutil.Uint64(gasPrice.Uint64()) - } - // Assign receipt status. - fields["status"] = hexutil.Uint64(receipt.Status) - if receipt.Logs == nil { - fields["logs"] = [][]*types.Log{} - } - // If the ContractAddress is 20 0x0 bytes, assume it is not a contract creation - if receipt.ContractAddress != (common.Address{}) { - fields["contractAddress"] = receipt.ContractAddress - } - return fields -} - -func includes(addresses []common.Address, a common.Address) bool { - for _, addr := range addresses { - if addr == a { - return true - } - } - - return false -} - -// filterLogs creates a slice of logs matching the given criteria. -func filterLogs(logs []*types.Log, addresses []common.Address, topics [][]common.Hash) []*types.Log { - result := make(types.Logs, 0, len(logs)) -Logs: - for _, log := range logs { - - if len(addresses) > 0 && !includes(addresses, log.Address) { - continue - } - // If the to filtered topics is greater than the amount of topics in logs, skip. - if len(topics) > len(log.Topics) { - continue Logs - } - for i, sub := range topics { - match := len(sub) == 0 // empty rule set == wildcard - for _, topic := range sub { - if log.Topics[i] == topic { - match = true - break - } - } - if !match { - continue Logs - } - } - result = append(result, log) - } - return result -} diff --git a/cmd/rpcdaemon22/commands/eth_subscribe_test.go b/cmd/rpcdaemon22/commands/eth_subscribe_test.go deleted file mode 100644 index 9a5d220ba65..00000000000 --- a/cmd/rpcdaemon22/commands/eth_subscribe_test.go +++ /dev/null @@ -1,58 +0,0 @@ -package commands - -import ( - "testing" - - "github.com/ledgerwatch/erigon-lib/gointerfaces/remote" - "github.com/ledgerwatch/erigon-lib/gointerfaces/sentry" - "github.com/ledgerwatch/erigon/cmd/rpcdaemon22/rpcdaemontest" - "github.com/ledgerwatch/erigon/cmd/rpcdaemon22/rpcservices" - "github.com/ledgerwatch/erigon/common" - "github.com/ledgerwatch/erigon/core" - "github.com/ledgerwatch/erigon/core/types" - "github.com/ledgerwatch/erigon/eth/protocols/eth" - "github.com/ledgerwatch/erigon/rlp" - "github.com/ledgerwatch/erigon/turbo/rpchelper" - "github.com/ledgerwatch/erigon/turbo/snapshotsync" - "github.com/ledgerwatch/erigon/turbo/stages" - "github.com/stretchr/testify/require" -) - -func TestEthSubscribe(t *testing.T) { - m, require := stages.Mock(t), require.New(t) - chain, err := core.GenerateChain(m.ChainConfig, m.Genesis, m.Engine, m.DB, 7, func(i int, b *core.BlockGen) { - b.SetCoinbase(common.Address{1}) - }, false /* intermediateHashes */) - require.NoError(err) - - b, err := rlp.EncodeToBytes(ð.BlockHeadersPacket66{ - RequestId: 1, - BlockHeadersPacket: chain.Headers, - }) - require.NoError(err) - - m.ReceiveWg.Add(1) - for _, err = range m.Send(&sentry.InboundMessage{Id: sentry.MessageId_BLOCK_HEADERS_66, Data: b, PeerId: m.PeerId}) { - require.NoError(err) - } - m.ReceiveWg.Wait() // Wait for all messages to be processed before we proceeed - - ctx, conn := rpcdaemontest.CreateTestGrpcConn(t, m) - backend := rpcservices.NewRemoteBackend(remote.NewETHBACKENDClient(conn), m.DB, snapshotsync.NewBlockReader()) - ff := rpchelper.New(ctx, backend, nil, nil, func() {}) - - newHeads := make(chan *types.Header) - id := ff.SubscribeNewHeads(newHeads) - defer ff.UnsubscribeHeads(id) - - initialCycle := true - highestSeenHeader := chain.TopBlock.NumberU64() - if _, err := stages.StageLoopStep(m.Ctx, m.DB, m.Sync, highestSeenHeader, m.Notifications, initialCycle, m.UpdateHead, nil); err != nil { - t.Fatal(err) - } - - for i := uint64(1); i <= highestSeenHeader; i++ { - header := <-newHeads - require.Equal(i, header.Number.Uint64()) - } -} diff --git a/cmd/rpcdaemon22/commands/eth_system.go b/cmd/rpcdaemon22/commands/eth_system.go deleted file mode 100644 index c327893192c..00000000000 --- a/cmd/rpcdaemon22/commands/eth_system.go +++ /dev/null @@ -1,221 +0,0 @@ -package commands - -import ( - "context" - - "github.com/ledgerwatch/erigon-lib/kv" - "github.com/ledgerwatch/erigon/common" - "github.com/ledgerwatch/erigon/common/hexutil" - "github.com/ledgerwatch/erigon/core/rawdb" - "github.com/ledgerwatch/erigon/core/types" - "github.com/ledgerwatch/erigon/eth/ethconfig" - "github.com/ledgerwatch/erigon/eth/gasprice" - "github.com/ledgerwatch/erigon/eth/stagedsync/stages" - "github.com/ledgerwatch/erigon/params" - "github.com/ledgerwatch/erigon/rpc" -) - -// BlockNumber implements eth_blockNumber. Returns the block number of most recent block. -func (api *APIImpl) BlockNumber(ctx context.Context) (hexutil.Uint64, error) { - tx, err := api.db.BeginRo(ctx) - if err != nil { - return 0, err - } - defer tx.Rollback() - blockNum, err := getLatestBlockNumber(tx) - if err != nil { - return 0, err - } - return hexutil.Uint64(blockNum), nil -} - -// Syncing implements eth_syncing. Returns a data object detailing the status of the sync process or false if not syncing. -func (api *APIImpl) Syncing(ctx context.Context) (interface{}, error) { - tx, err := api.db.BeginRo(ctx) - if err != nil { - return nil, err - } - defer tx.Rollback() - highestBlock, err := stages.GetStageProgress(tx, stages.Headers) - if err != nil { - return false, err - } - - currentBlock, err := stages.GetStageProgress(tx, stages.Finish) - if err != nil { - return false, err - } - - if currentBlock > 0 && currentBlock >= highestBlock { // Return not syncing if the synchronisation already completed - return false, nil - } - - // Otherwise gather the block sync stats - type S struct { - StageName string `json:"stage_name"` - BlockNumber hexutil.Uint64 `json:"block_number"` - } - stagesMap := make([]S, len(stages.AllStages)) - for i, stage := range stages.AllStages { - progress, err := stages.GetStageProgress(tx, stage) - if err != nil { - return nil, err - } - stagesMap[i].StageName = string(stage) - stagesMap[i].BlockNumber = hexutil.Uint64(progress) - } - - return map[string]interface{}{ - "currentBlock": hexutil.Uint64(currentBlock), - "highestBlock": hexutil.Uint64(highestBlock), - "stages": stagesMap, - }, nil -} - -// ChainId implements eth_chainId. Returns the current ethereum chainId. -func (api *APIImpl) ChainId(ctx context.Context) (hexutil.Uint64, error) { - tx, err := api.db.BeginRo(ctx) - if err != nil { - return 0, err - } - defer tx.Rollback() - - chainConfig, err := api.chainConfig(tx) - if err != nil { - return 0, err - } - return hexutil.Uint64(chainConfig.ChainID.Uint64()), nil -} - -// ChainID alias of ChainId - just for convenience -func (api *APIImpl) ChainID(ctx context.Context) (hexutil.Uint64, error) { - return api.ChainId(ctx) -} - -// ProtocolVersion implements eth_protocolVersion. Returns the current ethereum protocol version. -func (api *APIImpl) ProtocolVersion(ctx context.Context) (hexutil.Uint, error) { - ver, err := api.ethBackend.ProtocolVersion(ctx) - if err != nil { - return 0, err - } - return hexutil.Uint(ver), nil -} - -// GasPrice implements eth_gasPrice. Returns the current price per gas in wei. -func (api *APIImpl) GasPrice(ctx context.Context) (*hexutil.Big, error) { - tx, err := api.db.BeginRo(ctx) - if err != nil { - return nil, err - } - defer tx.Rollback() - cc, err := api.chainConfig(tx) - if err != nil { - return nil, err - } - oracle := gasprice.NewOracle(NewGasPriceOracleBackend(tx, cc, api.BaseAPI), ethconfig.Defaults.GPO) - tipcap, err := oracle.SuggestTipCap(ctx) - if err != nil { - return nil, err - } - if head := rawdb.ReadCurrentHeader(tx); head != nil && head.BaseFee != nil { - tipcap.Add(tipcap, head.BaseFee) - } - return (*hexutil.Big)(tipcap), err -} - -// MaxPriorityFeePerGas returns a suggestion for a gas tip cap for dynamic fee transactions. -func (api *APIImpl) MaxPriorityFeePerGas(ctx context.Context) (*hexutil.Big, error) { - tx, err := api.db.BeginRo(ctx) - if err != nil { - return nil, err - } - defer tx.Rollback() - cc, err := api.chainConfig(tx) - if err != nil { - return nil, err - } - oracle := gasprice.NewOracle(NewGasPriceOracleBackend(tx, cc, api.BaseAPI), ethconfig.Defaults.GPO) - tipcap, err := oracle.SuggestTipCap(ctx) - if err != nil { - return nil, err - } - return (*hexutil.Big)(tipcap), err -} - -type feeHistoryResult struct { - OldestBlock *hexutil.Big `json:"oldestBlock"` - Reward [][]*hexutil.Big `json:"reward,omitempty"` - BaseFee []*hexutil.Big `json:"baseFeePerGas,omitempty"` - GasUsedRatio []float64 `json:"gasUsedRatio"` -} - -func (api *APIImpl) FeeHistory(ctx context.Context, blockCount rpc.DecimalOrHex, lastBlock rpc.BlockNumber, rewardPercentiles []float64) (*feeHistoryResult, error) { - tx, err := api.db.BeginRo(ctx) - if err != nil { - return nil, err - } - defer tx.Rollback() - cc, err := api.chainConfig(tx) - if err != nil { - return nil, err - } - oracle := gasprice.NewOracle(NewGasPriceOracleBackend(tx, cc, api.BaseAPI), ethconfig.Defaults.GPO) - - oldest, reward, baseFee, gasUsed, err := oracle.FeeHistory(ctx, int(blockCount), lastBlock, rewardPercentiles) - if err != nil { - return nil, err - } - results := &feeHistoryResult{ - OldestBlock: (*hexutil.Big)(oldest), - GasUsedRatio: gasUsed, - } - if reward != nil { - results.Reward = make([][]*hexutil.Big, len(reward)) - for i, w := range reward { - results.Reward[i] = make([]*hexutil.Big, len(w)) - for j, v := range w { - results.Reward[i][j] = (*hexutil.Big)(v) - } - } - } - if baseFee != nil { - results.BaseFee = make([]*hexutil.Big, len(baseFee)) - for i, v := range baseFee { - results.BaseFee[i] = (*hexutil.Big)(v) - } - } - return results, nil -} - -type GasPriceOracleBackend struct { - tx kv.Tx - cc *params.ChainConfig - baseApi *BaseAPI -} - -func NewGasPriceOracleBackend(tx kv.Tx, cc *params.ChainConfig, baseApi *BaseAPI) *GasPriceOracleBackend { - return &GasPriceOracleBackend{tx: tx, cc: cc, baseApi: baseApi} -} - -func (b *GasPriceOracleBackend) HeaderByNumber(ctx context.Context, number rpc.BlockNumber) (*types.Header, error) { - header, err := b.baseApi._blockReader.HeaderByNumber(ctx, b.tx, uint64(number.Int64())) - if err != nil { - return nil, err - } - if header == nil { - return nil, nil - } - return header, nil -} -func (b *GasPriceOracleBackend) BlockByNumber(ctx context.Context, number rpc.BlockNumber) (*types.Block, error) { - return b.baseApi.blockByRPCNumber(number, b.tx) -} -func (b *GasPriceOracleBackend) ChainConfig() *params.ChainConfig { - return b.cc -} -func (b *GasPriceOracleBackend) GetReceipts(ctx context.Context, hash common.Hash) (types.Receipts, error) { - return rawdb.ReadReceiptsByHash(b.tx, hash) -} -func (b *GasPriceOracleBackend) PendingBlockAndReceipts() (*types.Block, types.Receipts) { - return nil, nil -} diff --git a/cmd/rpcdaemon22/commands/eth_txs.go b/cmd/rpcdaemon22/commands/eth_txs.go deleted file mode 100644 index 2cf26862d70..00000000000 --- a/cmd/rpcdaemon22/commands/eth_txs.go +++ /dev/null @@ -1,240 +0,0 @@ -package commands - -import ( - "bytes" - "context" - "math/big" - - "github.com/ledgerwatch/erigon-lib/gointerfaces" - "github.com/ledgerwatch/erigon-lib/gointerfaces/txpool" - "github.com/ledgerwatch/erigon-lib/gointerfaces/types" - "github.com/ledgerwatch/erigon/common" - "github.com/ledgerwatch/erigon/common/hexutil" - "github.com/ledgerwatch/erigon/core/rawdb" - types2 "github.com/ledgerwatch/erigon/core/types" - "github.com/ledgerwatch/erigon/rlp" - "github.com/ledgerwatch/erigon/rpc" -) - -// GetTransactionByHash implements eth_getTransactionByHash. Returns information about a transaction given the transaction's hash. -func (api *APIImpl) GetTransactionByHash(ctx context.Context, hash common.Hash) (*RPCTransaction, error) { - tx, err := api.db.BeginRo(ctx) - if err != nil { - return nil, err - } - defer tx.Rollback() - chainConfig, err := api.chainConfig(tx) - if err != nil { - return nil, err - } - - // https://infura.io/docs/ethereum/json-rpc/eth-getTransactionByHash - blockNum, ok, err := api.txnLookup(ctx, tx, hash) - if err != nil { - return nil, err - } - if ok { - block, err := api.blockByNumberWithSenders(tx, blockNum) - if err != nil { - return nil, err - } - if block == nil { - return nil, nil - } - blockHash := block.Hash() - var txnIndex uint64 - var txn types2.Transaction - for i, transaction := range block.Transactions() { - if transaction.Hash() == hash { - txn = transaction - txnIndex = uint64(i) - break - } - } - - // Add GasPrice for the DynamicFeeTransaction - var baseFee *big.Int - if chainConfig.IsLondon(blockNum) && blockHash != (common.Hash{}) { - baseFee = block.BaseFee() - } - - // if no transaction was found then we return nil - if txn == nil { - return nil, nil - - } - - return newRPCTransaction(txn, blockHash, blockNum, txnIndex, baseFee), nil - } - - curHeader := rawdb.ReadCurrentHeader(tx) - if curHeader == nil { - return nil, nil - } - - // No finalized transaction, try to retrieve it from the pool - reply, err := api.txPool.Transactions(ctx, &txpool.TransactionsRequest{Hashes: []*types.H256{gointerfaces.ConvertHashToH256(hash)}}) - if err != nil { - return nil, err - } - if len(reply.RlpTxs[0]) > 0 { - s := rlp.NewStream(bytes.NewReader(reply.RlpTxs[0]), uint64(len(reply.RlpTxs[0]))) - txn, err := types2.DecodeTransaction(s) - if err != nil { - return nil, err - } - - // if no transaction was found in the txpool then we return nil and an error warning that we didn't find the transaction by the hash - if txn == nil { - return nil, nil - } - - return newRPCPendingTransaction(txn, curHeader, chainConfig), nil - } - - // Transaction unknown, return as such - return nil, nil -} - -// GetRawTransactionByHash returns the bytes of the transaction for the given hash. -func (api *APIImpl) GetRawTransactionByHash(ctx context.Context, hash common.Hash) (hexutil.Bytes, error) { - tx, err := api.db.BeginRo(ctx) - if err != nil { - return nil, err - } - defer tx.Rollback() - - // https://infura.io/docs/ethereum/json-rpc/eth-getTransactionByHash - blockNum, ok, err := api.txnLookup(ctx, tx, hash) - if err != nil { - return nil, err - } - if !ok { - return nil, nil - } - block, err := api.blockByNumberWithSenders(tx, blockNum) - if err != nil { - return nil, err - } - if block == nil { - return nil, nil - } - var txn types2.Transaction - for _, transaction := range block.Transactions() { - if transaction.Hash() == hash { - txn = transaction - break - } - } - - if txn != nil { - var buf bytes.Buffer - err = txn.MarshalBinary(&buf) - return buf.Bytes(), err - } - - // No finalized transaction, try to retrieve it from the pool - reply, err := api.txPool.Transactions(ctx, &txpool.TransactionsRequest{Hashes: []*types.H256{gointerfaces.ConvertHashToH256(hash)}}) - if err != nil { - return nil, err - } - if len(reply.RlpTxs[0]) > 0 { - return reply.RlpTxs[0], nil - } - return nil, nil -} - -// GetTransactionByBlockHashAndIndex implements eth_getTransactionByBlockHashAndIndex. Returns information about a transaction given the block's hash and a transaction index. -func (api *APIImpl) GetTransactionByBlockHashAndIndex(ctx context.Context, blockHash common.Hash, txIndex hexutil.Uint64) (*RPCTransaction, error) { - tx, err := api.db.BeginRo(ctx) - if err != nil { - return nil, err - } - defer tx.Rollback() - - // https://infura.io/docs/ethereum/json-rpc/eth-getTransactionByBlockHashAndIndex - block, err := api.blockByHashWithSenders(tx, blockHash) - if err != nil { - return nil, err - } - if block == nil { - return nil, nil // not error, see https://github.com/ledgerwatch/erigon/issues/1645 - } - - txs := block.Transactions() - if uint64(txIndex) >= uint64(len(txs)) { - return nil, nil // not error - } - - return newRPCTransaction(txs[txIndex], block.Hash(), block.NumberU64(), uint64(txIndex), block.BaseFee()), nil -} - -// GetRawTransactionByBlockHashAndIndex returns the bytes of the transaction for the given block hash and index. -func (api *APIImpl) GetRawTransactionByBlockHashAndIndex(ctx context.Context, blockHash common.Hash, index hexutil.Uint) (hexutil.Bytes, error) { - tx, err := api.db.BeginRo(ctx) - if err != nil { - return nil, err - } - defer tx.Rollback() - - // https://infura.io/docs/ethereum/json-rpc/eth-getRawTransactionByBlockHashAndIndex - block, err := api.blockByHashWithSenders(tx, blockHash) - if err != nil { - return nil, err - } - if block == nil { - return nil, nil // not error, see https://github.com/ledgerwatch/erigon/issues/1645 - } - - return newRPCRawTransactionFromBlockIndex(block, uint64(index)) -} - -// GetTransactionByBlockNumberAndIndex implements eth_getTransactionByBlockNumberAndIndex. Returns information about a transaction given a block number and transaction index. -func (api *APIImpl) GetTransactionByBlockNumberAndIndex(ctx context.Context, blockNr rpc.BlockNumber, txIndex hexutil.Uint) (*RPCTransaction, error) { - tx, err := api.db.BeginRo(ctx) - if err != nil { - return nil, err - } - defer tx.Rollback() - - // https://infura.io/docs/ethereum/json-rpc/eth-getTransactionByBlockNumberAndIndex - blockNum, err := getBlockNumber(blockNr, tx) - if err != nil { - return nil, err - } - - block, err := api.blockByNumberWithSenders(tx, blockNum) - if err != nil { - return nil, err - } - if block == nil { - return nil, nil // not error, see https://github.com/ledgerwatch/erigon/issues/1645 - } - - txs := block.Transactions() - if uint64(txIndex) >= uint64(len(txs)) { - return nil, nil // not error - } - - return newRPCTransaction(txs[txIndex], block.Hash(), block.NumberU64(), uint64(txIndex), block.BaseFee()), nil -} - -// GetRawTransactionByBlockNumberAndIndex returns the bytes of the transaction for the given block number and index. -func (api *APIImpl) GetRawTransactionByBlockNumberAndIndex(ctx context.Context, blockNr rpc.BlockNumber, index hexutil.Uint) (hexutil.Bytes, error) { - tx, err := api.db.BeginRo(ctx) - if err != nil { - return nil, err - } - defer tx.Rollback() - - // https://infura.io/docs/ethereum/json-rpc/eth-getRawTransactionByBlockNumberAndIndex - block, err := api.blockByRPCNumber(blockNr, tx) - if err != nil { - return nil, err - } - if block == nil { - return nil, nil // not error, see https://github.com/ledgerwatch/erigon/issues/1645 - } - - return newRPCRawTransactionFromBlockIndex(block, uint64(index)) -} diff --git a/cmd/rpcdaemon22/commands/eth_uncles.go b/cmd/rpcdaemon22/commands/eth_uncles.go deleted file mode 100644 index 4d21345ffc0..00000000000 --- a/cmd/rpcdaemon22/commands/eth_uncles.go +++ /dev/null @@ -1,133 +0,0 @@ -package commands - -import ( - "context" - - "github.com/ledgerwatch/erigon/common" - "github.com/ledgerwatch/erigon/common/hexutil" - "github.com/ledgerwatch/erigon/core/rawdb" - "github.com/ledgerwatch/erigon/core/types" - "github.com/ledgerwatch/erigon/rpc" - "github.com/ledgerwatch/erigon/turbo/adapter/ethapi" - "github.com/ledgerwatch/log/v3" -) - -// GetUncleByBlockNumberAndIndex implements eth_getUncleByBlockNumberAndIndex. Returns information about an uncle given a block's number and the index of the uncle. -func (api *APIImpl) GetUncleByBlockNumberAndIndex(ctx context.Context, number rpc.BlockNumber, index hexutil.Uint) (map[string]interface{}, error) { - tx, err := api.db.BeginRo(ctx) - if err != nil { - return nil, err - } - defer tx.Rollback() - - blockNum, err := getBlockNumber(number, tx) - if err != nil { - return nil, err - } - block, err := api.blockByNumberWithSenders(tx, blockNum) - if err != nil { - return nil, err - } - if block == nil { - return nil, nil // not error, see https://github.com/ledgerwatch/erigon/issues/1645 - } - hash := block.Hash() - additionalFields := make(map[string]interface{}) - td, err := rawdb.ReadTd(tx, block.Hash(), blockNum) - if err != nil { - return nil, err - } - additionalFields["totalDifficulty"] = (*hexutil.Big)(td) - - uncles := block.Uncles() - if index >= hexutil.Uint(len(uncles)) { - log.Trace("Requested uncle not found", "number", block.Number(), "hash", hash, "index", index) - return nil, nil - } - uncle := types.NewBlockWithHeader(uncles[index]) - return ethapi.RPCMarshalBlock(uncle, false, false, additionalFields) -} - -// GetUncleByBlockHashAndIndex implements eth_getUncleByBlockHashAndIndex. Returns information about an uncle given a block's hash and the index of the uncle. -func (api *APIImpl) GetUncleByBlockHashAndIndex(ctx context.Context, hash common.Hash, index hexutil.Uint) (map[string]interface{}, error) { - tx, err := api.db.BeginRo(ctx) - if err != nil { - return nil, err - } - defer tx.Rollback() - - block, err := api.blockByHashWithSenders(tx, hash) - if err != nil { - return nil, err - } - if block == nil { - return nil, nil // not error, see https://github.com/ledgerwatch/erigon/issues/1645 - } - number := block.NumberU64() - additionalFields := make(map[string]interface{}) - td, err := rawdb.ReadTd(tx, hash, number) - if err != nil { - return nil, err - } - additionalFields["totalDifficulty"] = (*hexutil.Big)(td) - - uncles := block.Uncles() - if index >= hexutil.Uint(len(uncles)) { - log.Trace("Requested uncle not found", "number", block.Number(), "hash", hash, "index", index) - return nil, nil - } - uncle := types.NewBlockWithHeader(uncles[index]) - - return ethapi.RPCMarshalBlock(uncle, false, false, additionalFields) -} - -// GetUncleCountByBlockNumber implements eth_getUncleCountByBlockNumber. Returns the number of uncles in the block, if any. -func (api *APIImpl) GetUncleCountByBlockNumber(ctx context.Context, number rpc.BlockNumber) (*hexutil.Uint, error) { - n := hexutil.Uint(0) - - tx, err := api.db.BeginRo(ctx) - if err != nil { - return &n, err - } - defer tx.Rollback() - - blockNum, err := getBlockNumber(number, tx) - if err != nil { - return &n, err - } - - block, err := api.blockByNumberWithSenders(tx, blockNum) - if err != nil { - return nil, err - } - if block == nil { - return nil, nil // not error, see https://github.com/ledgerwatch/erigon/issues/1645 - } - n = hexutil.Uint(len(block.Uncles())) - return &n, nil -} - -// GetUncleCountByBlockHash implements eth_getUncleCountByBlockHash. Returns the number of uncles in the block, if any. -func (api *APIImpl) GetUncleCountByBlockHash(ctx context.Context, hash common.Hash) (*hexutil.Uint, error) { - n := hexutil.Uint(0) - tx, err := api.db.BeginRo(ctx) - if err != nil { - return &n, err - } - defer tx.Rollback() - - number := rawdb.ReadHeaderNumber(tx, hash) - if number == nil { - return nil, nil // not error, see https://github.com/ledgerwatch/erigon/issues/1645 - } - - block, err := api.blockWithSenders(tx, hash, *number) - if err != nil { - return nil, err - } - if block == nil { - return nil, nil // not error, see https://github.com/ledgerwatch/erigon/issues/1645 - } - n = hexutil.Uint(len(block.Uncles())) - return &n, nil -} diff --git a/cmd/rpcdaemon22/commands/get_chain_config_test.go b/cmd/rpcdaemon22/commands/get_chain_config_test.go deleted file mode 100644 index a4ed4fdad65..00000000000 --- a/cmd/rpcdaemon22/commands/get_chain_config_test.go +++ /dev/null @@ -1,39 +0,0 @@ -package commands - -import ( - "context" - "testing" - - "github.com/ledgerwatch/erigon-lib/kv/memdb" - "github.com/ledgerwatch/erigon/core" -) - -func TestGetChainConfig(t *testing.T) { - db := memdb.NewTestDB(t) - config, _, err := core.CommitGenesisBlock(db, core.DefaultGenesisBlock()) - if err != nil { - t.Fatalf("setting up genensis block: %v", err) - } - - tx, txErr := db.BeginRo(context.Background()) - if txErr != nil { - t.Fatalf("error starting tx: %v", txErr) - } - defer tx.Rollback() - - api := &BaseAPI{} - config1, err1 := api.chainConfig(tx) - if err1 != nil { - t.Fatalf("reading chain config: %v", err1) - } - if config.String() != config1.String() { - t.Fatalf("read different config: %s, expected %s", config1.String(), config.String()) - } - config2, err2 := api.chainConfig(tx) - if err2 != nil { - t.Fatalf("reading chain config: %v", err2) - } - if config.String() != config2.String() { - t.Fatalf("read different config: %s, expected %s", config2.String(), config.String()) - } -} diff --git a/cmd/rpcdaemon22/commands/net_api.go b/cmd/rpcdaemon22/commands/net_api.go deleted file mode 100644 index 2a094aa2ee7..00000000000 --- a/cmd/rpcdaemon22/commands/net_api.go +++ /dev/null @@ -1,66 +0,0 @@ -package commands - -import ( - "context" - "fmt" - "strconv" - - "github.com/ledgerwatch/erigon/common/hexutil" - "github.com/ledgerwatch/erigon/turbo/rpchelper" -) - -// NetAPI the interface for the net_ RPC commands -type NetAPI interface { - Listening(_ context.Context) (bool, error) - Version(_ context.Context) (string, error) - PeerCount(_ context.Context) (hexutil.Uint, error) -} - -// NetAPIImpl data structure to store things needed for net_ commands -type NetAPIImpl struct { - ethBackend rpchelper.ApiBackend -} - -// NewNetAPIImpl returns NetAPIImplImpl instance -func NewNetAPIImpl(eth rpchelper.ApiBackend) *NetAPIImpl { - return &NetAPIImpl{ - ethBackend: eth, - } -} - -// Listening implements net_listening. Returns true if client is actively listening for network connections. -// TODO: Remove hard coded value -func (api *NetAPIImpl) Listening(_ context.Context) (bool, error) { - return true, nil -} - -// Version implements net_version. Returns the current network id. -func (api *NetAPIImpl) Version(ctx context.Context) (string, error) { - if api.ethBackend == nil { - // We're running in --datadir mode or otherwise cannot get the backend - return "", fmt.Errorf(NotAvailableChainData, "net_version") - } - - res, err := api.ethBackend.NetVersion(ctx) - if err != nil { - return "", err - } - - return strconv.FormatUint(res, 10), nil -} - -// PeerCount implements net_peerCount. Returns number of peers currently -// connected to the first sentry server. -func (api *NetAPIImpl) PeerCount(ctx context.Context) (hexutil.Uint, error) { - if api.ethBackend == nil { - // We're running in --datadir mode or otherwise cannot get the backend - return 0, fmt.Errorf(NotAvailableChainData, "net_peerCount") - } - - res, err := api.ethBackend.NetPeerCount(ctx) - if err != nil { - return 0, err - } - - return hexutil.Uint(res), nil -} diff --git a/cmd/rpcdaemon22/commands/parity_api.go b/cmd/rpcdaemon22/commands/parity_api.go deleted file mode 100644 index 5e6d7bac23b..00000000000 --- a/cmd/rpcdaemon22/commands/parity_api.go +++ /dev/null @@ -1,89 +0,0 @@ -package commands - -import ( - "context" - "encoding/binary" - "fmt" - - "github.com/ledgerwatch/erigon-lib/kv" - "github.com/ledgerwatch/erigon/common" - "github.com/ledgerwatch/erigon/common/hexutil" - "github.com/ledgerwatch/erigon/core/state" - "github.com/ledgerwatch/erigon/rpc" -) - -var latestTag = common.BytesToHash([]byte("latest")) - -var ErrWrongTag = fmt.Errorf("listStorageKeys wrong block tag or number: must be '%s' ('latest')", latestTag) - -// ParityAPI the interface for the parity_ RPC commands -type ParityAPI interface { - ListStorageKeys(ctx context.Context, account common.Address, quantity int, offset *hexutil.Bytes, blockNumber rpc.BlockNumberOrHash) ([]hexutil.Bytes, error) -} - -// ParityAPIImpl data structure to store things needed for parity_ commands -type ParityAPIImpl struct { - db kv.RoDB -} - -// NewParityAPIImpl returns ParityAPIImpl instance -func NewParityAPIImpl(db kv.RoDB) *ParityAPIImpl { - return &ParityAPIImpl{ - db: db, - } -} - -// ListStorageKeys implements parity_listStorageKeys. Returns all storage keys of the given address -func (api *ParityAPIImpl) ListStorageKeys(ctx context.Context, account common.Address, quantity int, offset *hexutil.Bytes, blockNumberOrTag rpc.BlockNumberOrHash) ([]hexutil.Bytes, error) { - if err := api.checkBlockNumber(blockNumberOrTag); err != nil { - return nil, err - } - - tx, err := api.db.BeginRo(ctx) - if err != nil { - return nil, fmt.Errorf("listStorageKeys cannot open tx: %w", err) - } - defer tx.Rollback() - a, err := state.NewPlainStateReader(tx).ReadAccountData(account) - if err != nil { - return nil, err - } else if a == nil { - return nil, fmt.Errorf("acc not found") - } - - b := make([]byte, 8) - binary.BigEndian.PutUint64(b, a.GetIncarnation()) - seekBytes := append(account.Bytes(), b...) - - c, err := tx.CursorDupSort(kv.PlainState) - if err != nil { - return nil, err - } - defer c.Close() - keys := make([]hexutil.Bytes, 0) - var v []byte - var seekVal []byte - if offset != nil { - seekVal = *offset - } - - for v, err = c.SeekBothRange(seekBytes, seekVal); v != nil && len(keys) != quantity && err == nil; _, v, err = c.NextDup() { - if len(v) > common.HashLength { - keys = append(keys, v[:common.HashLength]) - } else { - keys = append(keys, v) - } - } - if err != nil { - return nil, err - } - return keys, nil -} - -func (api *ParityAPIImpl) checkBlockNumber(blockNumber rpc.BlockNumberOrHash) error { - num, isNum := blockNumber.Number() - if isNum && rpc.LatestBlockNumber == num { - return nil - } - return ErrWrongTag -} diff --git a/cmd/rpcdaemon22/commands/parity_api_test.go b/cmd/rpcdaemon22/commands/parity_api_test.go deleted file mode 100644 index 0117eddfab8..00000000000 --- a/cmd/rpcdaemon22/commands/parity_api_test.go +++ /dev/null @@ -1,105 +0,0 @@ -package commands - -import ( - "context" - "fmt" - "testing" - - "github.com/ledgerwatch/erigon/cmd/rpcdaemon22/rpcdaemontest" - "github.com/ledgerwatch/erigon/common" - "github.com/ledgerwatch/erigon/common/hexutil" - "github.com/ledgerwatch/erigon/rpc" - "github.com/stretchr/testify/assert" -) - -var latestBlock = rpc.BlockNumberOrHashWithNumber(rpc.LatestBlockNumber) - -func TestParityAPIImpl_ListStorageKeys_NoOffset(t *testing.T) { - assert := assert.New(t) - db := rpcdaemontest.CreateTestKV(t) - api := NewParityAPIImpl(db) - answers := []string{ - "0000000000000000000000000000000000000000000000000000000000000000", - "0000000000000000000000000000000000000000000000000000000000000002", - "0a2127994676ca91e4eb3d2a1e46ec9dcee074dc2643bb5ebd4e9ac6541a3148", - "0fe673b4bc06161f39bc26f4e8831c810a72ffe69e5c8cb26f7f54752618e696", - "120e23dcb7e4437386073613853db77b10011a2404eefc716b97c7767e37f8eb", - } - addr := common.HexToAddress("0x920fd5070602feaea2e251e9e7238b6c376bcae5") - result, err := api.ListStorageKeys(context.Background(), addr, 5, nil, latestBlock) - if err != nil { - t.Errorf("calling ListStorageKeys: %v", err) - } - assert.Equal(len(answers), len(result)) - for k, v := range result { - assert.Equal(answers[k], common.Bytes2Hex(v)) - } -} - -func TestParityAPIImpl_ListStorageKeys_WithOffset_ExistingPrefix(t *testing.T) { - assert := assert.New(t) - db := rpcdaemontest.CreateTestKV(t) - api := NewParityAPIImpl(db) - answers := []string{ - "29d05770ca9ee7088a64e18c8e5160fc62c3c2179dc8ef9b4dbc970c9e51b4d8", - "29edc84535d98b29835079d685b97b41ee8e831e343cc80793057e462353a26d", - "2c05ac60f9aa2df5e64ef977f271e4b9a2d13951f123a2cb5f5d4ad5eb344f1a", - "4644be453c81744b6842ddf615d7fca0e14a23b09734be63d44c23452de95631", - "4974416255391052161ba8184fe652f3bf8c915592c65f7de127af8e637dce5d", - } - addr := common.HexToAddress("0x920fd5070602feaea2e251e9e7238b6c376bcae5") - offset := common.Hex2Bytes("29") - b := hexutil.Bytes(offset) - result, err := api.ListStorageKeys(context.Background(), addr, 5, &b, latestBlock) - if err != nil { - t.Errorf("calling ListStorageKeys: %v", err) - } - assert.Equal(len(answers), len(result)) - for k, v := range result { - assert.Equal(answers[k], common.Bytes2Hex(v)) - } -} - -func TestParityAPIImpl_ListStorageKeys_WithOffset_NonExistingPrefix(t *testing.T) { - assert := assert.New(t) - db := rpcdaemontest.CreateTestKV(t) - api := NewParityAPIImpl(db) - answers := []string{ - "4644be453c81744b6842ddf615d7fca0e14a23b09734be63d44c23452de95631", - "4974416255391052161ba8184fe652f3bf8c915592c65f7de127af8e637dce5d", - } - addr := common.HexToAddress("0x920fd5070602feaea2e251e9e7238b6c376bcae5") - offset := common.Hex2Bytes("30") - b := hexutil.Bytes(offset) - result, err := api.ListStorageKeys(context.Background(), addr, 2, &b, latestBlock) - if err != nil { - t.Errorf("calling ListStorageKeys: %v", err) - } - assert.Equal(len(answers), len(result)) - for k, v := range result { - assert.Equal(answers[k], common.Bytes2Hex(v)) - } -} - -func TestParityAPIImpl_ListStorageKeys_WithOffset_EmptyResponse(t *testing.T) { - assert := assert.New(t) - db := rpcdaemontest.CreateTestKV(t) - api := NewParityAPIImpl(db) - addr := common.HexToAddress("0x920fd5070602feaea2e251e9e7238b6c376bcae5") - offset := common.Hex2Bytes("ff") - b := hexutil.Bytes(offset) - result, err := api.ListStorageKeys(context.Background(), addr, 2, &b, latestBlock) - if err != nil { - t.Errorf("calling ListStorageKeys: %v", err) - } - assert.Equal(0, len(result)) -} - -func TestParityAPIImpl_ListStorageKeys_AccNotFound(t *testing.T) { - assert := assert.New(t) - db := rpcdaemontest.CreateTestKV(t) - api := NewParityAPIImpl(db) - addr := common.HexToAddress("0x920fd5070602feaea2e251e9e7238b6c376bcaef") - _, err := api.ListStorageKeys(context.Background(), addr, 2, nil, latestBlock) - assert.Error(err, fmt.Errorf("acc not found")) -} diff --git a/cmd/rpcdaemon22/commands/rpc_block.go b/cmd/rpcdaemon22/commands/rpc_block.go deleted file mode 100644 index 9c001ba8ac7..00000000000 --- a/cmd/rpcdaemon22/commands/rpc_block.go +++ /dev/null @@ -1,45 +0,0 @@ -package commands - -import ( - "fmt" - - "github.com/ledgerwatch/erigon-lib/kv" - "github.com/ledgerwatch/erigon/common" - "github.com/ledgerwatch/erigon/core/rawdb" - "github.com/ledgerwatch/erigon/eth/stagedsync/stages" - "github.com/ledgerwatch/erigon/rpc" -) - -func getBlockNumber(number rpc.BlockNumber, tx kv.Tx) (uint64, error) { - var blockNum uint64 - var err error - if number == rpc.LatestBlockNumber || number == rpc.PendingBlockNumber { - blockNum, err = getLatestBlockNumber(tx) - if err != nil { - return 0, err - } - } else if number == rpc.EarliestBlockNumber { - blockNum = 0 - } else { - blockNum = uint64(number.Int64()) - } - - return blockNum, nil -} - -func getLatestBlockNumber(tx kv.Tx) (uint64, error) { - forkchoiceHeadHash := rawdb.ReadForkchoiceHead(tx) - if forkchoiceHeadHash != (common.Hash{}) { - forkchoiceHeadNum := rawdb.ReadHeaderNumber(tx, forkchoiceHeadHash) - if forkchoiceHeadNum != nil { - return *forkchoiceHeadNum, nil - } - } - - blockNum, err := stages.GetStageProgress(tx, stages.Execution) - if err != nil { - return 0, fmt.Errorf("getting latest block number: %w", err) - } - - return blockNum, nil -} diff --git a/cmd/rpcdaemon22/commands/send_transaction.go b/cmd/rpcdaemon22/commands/send_transaction.go deleted file mode 100644 index 7ce01f31395..00000000000 --- a/cmd/rpcdaemon22/commands/send_transaction.go +++ /dev/null @@ -1,96 +0,0 @@ -package commands - -import ( - "bytes" - "context" - "errors" - "fmt" - "math/big" - - txPoolProto "github.com/ledgerwatch/erigon-lib/gointerfaces/txpool" - "github.com/ledgerwatch/erigon/common" - "github.com/ledgerwatch/erigon/common/hexutil" - "github.com/ledgerwatch/erigon/core/rawdb" - "github.com/ledgerwatch/erigon/core/types" - "github.com/ledgerwatch/erigon/crypto" - "github.com/ledgerwatch/erigon/eth/ethconfig" - "github.com/ledgerwatch/erigon/params" - "github.com/ledgerwatch/erigon/rlp" - "github.com/ledgerwatch/log/v3" -) - -// SendRawTransaction implements eth_sendRawTransaction. Creates new message call transaction or a contract creation for previously-signed transactions. -func (api *APIImpl) SendRawTransaction(ctx context.Context, encodedTx hexutil.Bytes) (common.Hash, error) { - txn, err := types.DecodeTransaction(rlp.NewStream(bytes.NewReader(encodedTx), uint64(len(encodedTx)))) - if err != nil { - return common.Hash{}, err - } - - // If the transaction fee cap is already specified, ensure the - // fee of the given transaction is _reasonable_. - if err := checkTxFee(txn.GetPrice().ToBig(), txn.GetGas(), ethconfig.Defaults.RPCTxFeeCap); err != nil { - return common.Hash{}, err - } - if !txn.Protected() { - return common.Hash{}, errors.New("only replay-protected (EIP-155) transactions allowed over RPC") - } - hash := txn.Hash() - res, err := api.txPool.Add(ctx, &txPoolProto.AddRequest{RlpTxs: [][]byte{encodedTx}}) - if err != nil { - return common.Hash{}, err - } - - if res.Imported[0] != txPoolProto.ImportResult_SUCCESS { - return hash, fmt.Errorf("%s: %s", txPoolProto.ImportResult_name[int32(res.Imported[0])], res.Errors[0]) - } - - tx, err := api.db.BeginRo(ctx) - if err != nil { - return common.Hash{}, err - } - defer tx.Rollback() - - // Print a log with full txn details for manual investigations and interventions - blockNum := rawdb.ReadCurrentBlockNumber(tx) - if blockNum == nil { - return common.Hash{}, err - } - cc, err := api.chainConfig(tx) - if err != nil { - return common.Hash{}, err - } - signer := types.MakeSigner(cc, *blockNum) - from, err := txn.Sender(*signer) - if err != nil { - return common.Hash{}, err - } - - if txn.GetTo() == nil { - addr := crypto.CreateAddress(from, txn.GetNonce()) - log.Info("Submitted contract creation", "hash", txn.Hash().Hex(), "from", from, "nonce", txn.GetNonce(), "contract", addr.Hex(), "value", txn.GetValue()) - } else { - log.Info("Submitted transaction", "hash", txn.Hash().Hex(), "from", from, "nonce", txn.GetNonce(), "recipient", txn.GetTo(), "value", txn.GetValue()) - } - - return txn.Hash(), nil -} - -// SendTransaction implements eth_sendTransaction. Creates new message call transaction or a contract creation if the data field contains code. -func (api *APIImpl) SendTransaction(_ context.Context, txObject interface{}) (common.Hash, error) { - return common.Hash{0}, fmt.Errorf(NotImplemented, "eth_sendTransaction") -} - -// checkTxFee is an internal function used to check whether the fee of -// the given transaction is _reasonable_(under the cap). -func checkTxFee(gasPrice *big.Int, gas uint64, cap float64) error { - // Short circuit if there is no cap for transaction fee at all. - if cap == 0 { - return nil - } - feeEth := new(big.Float).Quo(new(big.Float).SetInt(new(big.Int).Mul(gasPrice, new(big.Int).SetUint64(gas))), new(big.Float).SetInt(big.NewInt(params.Ether))) - feeFloat, _ := feeEth.Float64() - if feeFloat > cap { - return fmt.Errorf("tx fee (%.2f ether) exceeds the configured cap (%.2f ether)", feeFloat, cap) - } - return nil -} diff --git a/cmd/rpcdaemon22/commands/send_transaction_test.go b/cmd/rpcdaemon22/commands/send_transaction_test.go deleted file mode 100644 index 87c6ef411b3..00000000000 --- a/cmd/rpcdaemon22/commands/send_transaction_test.go +++ /dev/null @@ -1,110 +0,0 @@ -package commands_test - -import ( - "bytes" - "crypto/ecdsa" - "math/big" - "testing" - - "github.com/holiman/uint256" - "github.com/ledgerwatch/erigon-lib/gointerfaces/sentry" - "github.com/ledgerwatch/erigon-lib/gointerfaces/txpool" - "github.com/ledgerwatch/erigon-lib/kv/kvcache" - "github.com/ledgerwatch/erigon/cmd/rpcdaemon22/commands" - "github.com/ledgerwatch/erigon/cmd/rpcdaemon22/rpcdaemontest" - "github.com/ledgerwatch/erigon/common" - "github.com/ledgerwatch/erigon/common/u256" - "github.com/ledgerwatch/erigon/core" - "github.com/ledgerwatch/erigon/core/types" - "github.com/ledgerwatch/erigon/eth/protocols/eth" - "github.com/ledgerwatch/erigon/params" - "github.com/ledgerwatch/erigon/rlp" - "github.com/ledgerwatch/erigon/turbo/rpchelper" - "github.com/ledgerwatch/erigon/turbo/snapshotsync" - "github.com/ledgerwatch/erigon/turbo/stages" - "github.com/stretchr/testify/require" -) - -func TestSendRawTransaction(t *testing.T) { - t.Skip("Flaky test") - m, require := stages.Mock(t), require.New(t) - - chain, err := core.GenerateChain(m.ChainConfig, m.Genesis, m.Engine, m.DB, 1, func(i int, b *core.BlockGen) { - b.SetCoinbase(common.Address{1}) - }, false /* intermediateHashes */) - require.NoError(err) - { // Do 1 step to start txPool - - // Send NewBlock message - b, err := rlp.EncodeToBytes(ð.NewBlockPacket{ - Block: chain.TopBlock, - TD: big.NewInt(1), // This is ignored anyway - }) - require.NoError(err) - m.ReceiveWg.Add(1) - for _, err = range m.Send(&sentry.InboundMessage{Id: sentry.MessageId_NEW_BLOCK_66, Data: b, PeerId: m.PeerId}) { - require.NoError(err) - } - // Send all the headers - b, err = rlp.EncodeToBytes(ð.BlockHeadersPacket66{ - RequestId: 1, - BlockHeadersPacket: chain.Headers, - }) - require.NoError(err) - m.ReceiveWg.Add(1) - for _, err = range m.Send(&sentry.InboundMessage{Id: sentry.MessageId_BLOCK_HEADERS_66, Data: b, PeerId: m.PeerId}) { - require.NoError(err) - } - m.ReceiveWg.Wait() // Wait for all messages to be processed before we proceeed - - initialCycle := true - highestSeenHeader := chain.TopBlock.NumberU64() - if _, err := stages.StageLoopStep(m.Ctx, m.DB, m.Sync, highestSeenHeader, m.Notifications, initialCycle, m.UpdateHead, nil); err != nil { - t.Fatal(err) - } - } - - expectValue := uint64(1234) - txn, err := types.SignTx(types.NewTransaction(0, common.Address{1}, uint256.NewInt(expectValue), params.TxGas, uint256.NewInt(10*params.GWei), nil), *types.LatestSignerForChainID(m.ChainConfig.ChainID), m.Key) - require.NoError(err) - - ctx, conn := rpcdaemontest.CreateTestGrpcConn(t, m) - txPool := txpool.NewTxpoolClient(conn) - ff := rpchelper.New(ctx, nil, txPool, txpool.NewMiningClient(conn), func() {}) - stateCache := kvcache.New(kvcache.DefaultCoherentConfig) - api := commands.NewEthAPI(commands.NewBaseApi(ff, stateCache, snapshotsync.NewBlockReader(), nil, nil, false), m.DB, nil, txPool, nil, 5000000) - - buf := bytes.NewBuffer(nil) - err = txn.MarshalBinary(buf) - require.NoError(err) - - txsCh := make(chan []types.Transaction, 1) - id := ff.SubscribePendingTxs(txsCh) - defer ff.UnsubscribePendingTxs(id) - - _, err = api.SendRawTransaction(ctx, buf.Bytes()) - require.NoError(err) - - got := <-txsCh - require.Equal(expectValue, got[0].GetValue().Uint64()) - - //send same tx second time and expect error - _, err = api.SendRawTransaction(ctx, buf.Bytes()) - require.NotNil(err) - require.Equal("ALREADY_EXISTS: already known", err.Error()) - m.ReceiveWg.Wait() - - //TODO: make propagation easy to test - now race - //time.Sleep(time.Second) - //sent := m.SentMessage(0) - //require.Equal(eth.ToProto[m.MultiClient.Protocol()][eth.NewPooledTransactionHashesMsg], sent.Id) -} - -func transaction(nonce uint64, gaslimit uint64, key *ecdsa.PrivateKey) types.Transaction { - return pricedTransaction(nonce, gaslimit, u256.Num1, key) -} - -func pricedTransaction(nonce uint64, gaslimit uint64, gasprice *uint256.Int, key *ecdsa.PrivateKey) types.Transaction { - tx, _ := types.SignTx(types.NewTransaction(nonce, common.Address{}, uint256.NewInt(100), gaslimit, gasprice, nil), *types.LatestSignerForChainID(big.NewInt(1337)), key) - return tx -} diff --git a/cmd/rpcdaemon22/commands/starknet_accounts.go b/cmd/rpcdaemon22/commands/starknet_accounts.go deleted file mode 100644 index abe0e5c7903..00000000000 --- a/cmd/rpcdaemon22/commands/starknet_accounts.go +++ /dev/null @@ -1,39 +0,0 @@ -package commands - -import ( - "context" - "fmt" - - "github.com/ledgerwatch/erigon/common" - "github.com/ledgerwatch/erigon/common/hexutil" - "github.com/ledgerwatch/erigon/rpc" - "github.com/ledgerwatch/erigon/turbo/adapter" - "github.com/ledgerwatch/erigon/turbo/rpchelper" -) - -// GetCode implements starknet_getCode. Returns the byte code at a given address (if it's a smart contract). -func (api *StarknetImpl) GetCode(ctx context.Context, address common.Address, blockNrOrHash rpc.BlockNumberOrHash) (hexutil.Bytes, error) { - tx, err1 := api.db.BeginRo(ctx) - if err1 != nil { - return nil, fmt.Errorf("getCode cannot open tx: %w", err1) - } - defer tx.Rollback() - blockNumber, _, _, err := rpchelper.GetBlockNumber(blockNrOrHash, tx, api.filters) - if err != nil { - return nil, err - } - - reader := adapter.NewStateReader(tx, blockNumber) - acc, err := reader.ReadAccountData(address) - if acc == nil || err != nil { - return hexutil.Bytes(""), nil - } - res, err := reader.ReadAccountCode(address, acc.Incarnation, acc.CodeHash) - if res == nil || err != nil { - return hexutil.Bytes(""), nil - } - if res == nil { - return hexutil.Bytes(""), nil - } - return res, nil -} diff --git a/cmd/rpcdaemon22/commands/starknet_api.go b/cmd/rpcdaemon22/commands/starknet_api.go deleted file mode 100644 index 0423e31e725..00000000000 --- a/cmd/rpcdaemon22/commands/starknet_api.go +++ /dev/null @@ -1,34 +0,0 @@ -package commands - -import ( - "context" - "github.com/ledgerwatch/erigon-lib/gointerfaces/starknet" - "github.com/ledgerwatch/erigon-lib/gointerfaces/txpool" - "github.com/ledgerwatch/erigon/common/hexutil" - "github.com/ledgerwatch/erigon/rpc" - - "github.com/ledgerwatch/erigon-lib/kv" - "github.com/ledgerwatch/erigon/common" -) - -type StarknetAPI interface { - SendRawTransaction(ctx context.Context, encodedTx hexutil.Bytes) (common.Hash, error) - GetCode(ctx context.Context, address common.Address, blockNrOrHash rpc.BlockNumberOrHash) (hexutil.Bytes, error) - Call(ctx context.Context, request StarknetCallRequest, blockNrOrHash rpc.BlockNumberOrHash) ([]string, error) -} - -type StarknetImpl struct { - *BaseAPI - db kv.RoDB - client starknet.CAIROVMClient - txPool txpool.TxpoolClient -} - -func NewStarknetAPI(base *BaseAPI, db kv.RoDB, client starknet.CAIROVMClient, txPool txpool.TxpoolClient) *StarknetImpl { - return &StarknetImpl{ - BaseAPI: base, - db: db, - client: client, - txPool: txPool, - } -} diff --git a/cmd/rpcdaemon22/commands/starknet_call.go b/cmd/rpcdaemon22/commands/starknet_call.go deleted file mode 100644 index 4b68eac9c39..00000000000 --- a/cmd/rpcdaemon22/commands/starknet_call.go +++ /dev/null @@ -1,96 +0,0 @@ -package commands - -import ( - "context" - "github.com/ledgerwatch/erigon-lib/gointerfaces/starknet" - "github.com/ledgerwatch/erigon/common" - "github.com/ledgerwatch/erigon/rpc" - "google.golang.org/protobuf/types/known/anypb" - "google.golang.org/protobuf/types/known/wrapperspb" - "reflect" - "strings" -) - -type StarknetGrpcCallArgs struct { - Inputs string - Address string - Function string - Code string - BlockHash string - BlockNumber int64 - Network string -} - -type StarknetCallRequest struct { - ContractAddress common.Address32 - EntryPointSelector string - CallData []string -} - -func (s StarknetGrpcCallArgs) ToMapAny() (result map[string]*anypb.Any) { - result = make(map[string]*anypb.Any) - - v := reflect.ValueOf(s) - typeOfS := v.Type() - - for i := 0; i < v.NumField(); i++ { - fieldName := strings.ToLower(typeOfS.Field(i).Name) - switch v.Field(i).Kind() { - case reflect.Int64: - result[fieldName], _ = anypb.New(wrapperspb.Int64(v.Field(i).Interface().(int64))) - default: - result[fieldName], _ = anypb.New(wrapperspb.String(v.Field(i).Interface().(string))) - } - } - return result -} - -// Call implements starknet_call. -func (api *StarknetImpl) Call(ctx context.Context, request StarknetCallRequest, blockNrOrHash rpc.BlockNumberOrHash) ([]string, error) { - tx, err := api.db.BeginRo(ctx) - if err != nil { - return nil, err - } - defer tx.Rollback() - - code, err := api.GetCode(ctx, request.ContractAddress.ToCommonAddress(), blockNrOrHash) - if err != nil { - return nil, err - } - - requestParams := &StarknetGrpcCallArgs{ - Inputs: strings.Join(request.CallData, ","), - Address: request.ContractAddress.String(), - Function: request.EntryPointSelector, - Code: code.String(), - } - - if blockNrOrHash.BlockHash != nil { - requestParams.BlockHash = blockNrOrHash.BlockHash.String() - } - - if blockNrOrHash.BlockNumber != nil { - requestParams.BlockNumber = blockNrOrHash.BlockNumber.Int64() - } - - requestParamsMap := requestParams.ToMapAny() - - grpcRequest := &starknet.CallRequest{ - Method: "starknet_call", - Params: requestParamsMap, - } - - response, err := api.client.Call(ctx, grpcRequest) - if err != nil { - return nil, err - } - - var result []string - for _, v := range response.Result { - s := wrapperspb.String("") - v.UnmarshalTo(s) - result = append(result, s.GetValue()) - } - - return result, nil -} diff --git a/cmd/rpcdaemon22/commands/starknet_send_transaction.go b/cmd/rpcdaemon22/commands/starknet_send_transaction.go deleted file mode 100644 index 7bb90ea3bf0..00000000000 --- a/cmd/rpcdaemon22/commands/starknet_send_transaction.go +++ /dev/null @@ -1,50 +0,0 @@ -package commands - -import ( - "bytes" - "context" - "errors" - "fmt" - txPoolProto "github.com/ledgerwatch/erigon-lib/gointerfaces/txpool" - "github.com/ledgerwatch/erigon/common" - "github.com/ledgerwatch/erigon/common/hexutil" - "github.com/ledgerwatch/erigon/core/types" - "github.com/ledgerwatch/erigon/rlp" - "github.com/ledgerwatch/log/v3" -) - -var ( - ErrOnlyStarknetTx = errors.New("only support starknet transactions") - ErrOnlyContractDeploy = errors.New("only support contract creation") -) - -// SendRawTransaction deploy new cairo contract -func (api *StarknetImpl) SendRawTransaction(ctx context.Context, encodedTx hexutil.Bytes) (common.Hash, error) { - txn, err := types.DecodeTransaction(rlp.NewStream(bytes.NewReader(encodedTx), uint64(len(encodedTx)))) - - if err != nil { - return common.Hash{}, err - } - - if !txn.IsStarkNet() { - return common.Hash{}, ErrOnlyStarknetTx - } - - if !txn.IsContractDeploy() { - return common.Hash{}, ErrOnlyContractDeploy - } - - hash := txn.Hash() - res, err := api.txPool.Add(ctx, &txPoolProto.AddRequest{RlpTxs: [][]byte{encodedTx}}) - if err != nil { - return common.Hash{}, err - } - - if res.Imported[0] != txPoolProto.ImportResult_SUCCESS { - return hash, fmt.Errorf("%s: %s", txPoolProto.ImportResult_name[int32(res.Imported[0])], res.Errors[0]) - } - - log.Info("Submitted contract creation", "hash", txn.Hash().Hex(), "nonce", txn.GetNonce(), "value", txn.GetValue()) - - return txn.Hash(), nil -} diff --git a/cmd/rpcdaemon22/commands/starknet_send_transaction_test.go b/cmd/rpcdaemon22/commands/starknet_send_transaction_test.go deleted file mode 100644 index 4e8eb3979e5..00000000000 --- a/cmd/rpcdaemon22/commands/starknet_send_transaction_test.go +++ /dev/null @@ -1,83 +0,0 @@ -package commands_test - -import ( - "bytes" - "testing" - - "github.com/holiman/uint256" - "github.com/ledgerwatch/erigon-lib/gointerfaces/starknet" - "github.com/ledgerwatch/erigon-lib/gointerfaces/txpool" - "github.com/ledgerwatch/erigon-lib/kv/kvcache" - "github.com/ledgerwatch/erigon/cmd/rpcdaemon22/commands" - "github.com/ledgerwatch/erigon/cmd/rpcdaemon22/rpcdaemontest" - "github.com/ledgerwatch/erigon/common" - "github.com/ledgerwatch/erigon/common/hexutil" - "github.com/ledgerwatch/erigon/core/types" - "github.com/ledgerwatch/erigon/turbo/rpchelper" - "github.com/ledgerwatch/erigon/turbo/snapshotsync" - "github.com/ledgerwatch/erigon/turbo/stages" - "github.com/stretchr/testify/require" -) - -func TestErrorStarknetSendRawTransaction(t *testing.T) { - var cases = []struct { - name string - tx string - error error - }{ - {name: "wrong tx type", tx: generateDynamicFeeTransaction(), error: commands.ErrOnlyStarknetTx}, - {name: "not contract creation", tx: generateStarknetTransaction(), error: commands.ErrOnlyContractDeploy}, - } - - m, require := stages.MockWithTxPool(t), require.New(t) - ctx, conn := rpcdaemontest.CreateTestGrpcConn(t, m) - txPool := txpool.NewTxpoolClient(conn) - starknetClient := starknet.NewCAIROVMClient(conn) - ff := rpchelper.New(ctx, nil, txPool, txpool.NewMiningClient(conn), func() {}) - stateCache := kvcache.New(kvcache.DefaultCoherentConfig) - - for _, tt := range cases { - api := commands.NewStarknetAPI(commands.NewBaseApi(ff, stateCache, snapshotsync.NewBlockReader(), nil, nil, false), m.DB, starknetClient, txPool) - - t.Run(tt.name, func(t *testing.T) { - hex, _ := hexutil.Decode(tt.tx) - - _, err := api.SendRawTransaction(ctx, hex) - - require.ErrorIs(err, tt.error) - }) - } -} - -func generateDynamicFeeTransaction() string { - buf := bytes.NewBuffer(nil) - types.DynamicFeeTransaction{ - CommonTx: types.CommonTx{ - ChainID: new(uint256.Int), - Nonce: 1, - Value: uint256.NewInt(1), - Gas: 1, - }, - Tip: new(uint256.Int), - FeeCap: new(uint256.Int), - }.MarshalBinary(buf) - - return hexutil.Encode(buf.Bytes()) -} - -func generateStarknetTransaction() string { - buf := bytes.NewBuffer(nil) - types.StarknetTransaction{ - CommonTx: types.CommonTx{ - ChainID: new(uint256.Int), - Nonce: 1, - Value: uint256.NewInt(1), - Gas: 1, - To: &common.Address{}, - }, - Tip: new(uint256.Int), - FeeCap: new(uint256.Int), - }.MarshalBinary(buf) - - return hexutil.Encode(buf.Bytes()) -} diff --git a/cmd/rpcdaemon22/commands/storage_range.go b/cmd/rpcdaemon22/commands/storage_range.go deleted file mode 100644 index 141be618bbc..00000000000 --- a/cmd/rpcdaemon22/commands/storage_range.go +++ /dev/null @@ -1,42 +0,0 @@ -package commands - -import ( - "fmt" - - "github.com/holiman/uint256" - "github.com/ledgerwatch/erigon/common" - "github.com/ledgerwatch/erigon/core/state" -) - -// StorageRangeResult is the result of a debug_storageRangeAt API call. -type StorageRangeResult struct { - Storage StorageMap `json:"storage"` - NextKey *common.Hash `json:"nextKey"` // nil if Storage includes the last key in the trie. -} - -// StorageMap a map from storage locations to StorageEntry items -type StorageMap map[common.Hash]StorageEntry - -// StorageEntry an entry in storage of the account -type StorageEntry struct { - Key *common.Hash `json:"key"` - Value common.Hash `json:"value"` -} - -func StorageRangeAt(stateReader *state.PlainState, contractAddress common.Address, start []byte, maxResult int) (StorageRangeResult, error) { - result := StorageRangeResult{Storage: StorageMap{}} - resultCount := 0 - - if err := stateReader.ForEachStorage(contractAddress, common.BytesToHash(start), func(key, seckey common.Hash, value uint256.Int) bool { - if resultCount < maxResult { - result.Storage[seckey] = StorageEntry{Key: &key, Value: value.Bytes32()} - } else { - result.NextKey = &key - } - resultCount++ - return resultCount <= maxResult - }, maxResult+1); err != nil { - return StorageRangeResult{}, fmt.Errorf("error walking over storage: %w", err) - } - return result, nil -} diff --git a/cmd/rpcdaemon22/commands/trace_adhoc.go b/cmd/rpcdaemon22/commands/trace_adhoc.go deleted file mode 100644 index 2f3c2bb743e..00000000000 --- a/cmd/rpcdaemon22/commands/trace_adhoc.go +++ /dev/null @@ -1,1224 +0,0 @@ -package commands - -import ( - "bytes" - "context" - "encoding/json" - "fmt" - "math" - "math/big" - "strings" - "time" - - "github.com/holiman/uint256" - libcommon "github.com/ledgerwatch/erigon-lib/common" - "github.com/ledgerwatch/erigon-lib/kv" - "github.com/ledgerwatch/erigon/common" - "github.com/ledgerwatch/erigon/common/hexutil" - math2 "github.com/ledgerwatch/erigon/common/math" - "github.com/ledgerwatch/erigon/core" - "github.com/ledgerwatch/erigon/core/state" - "github.com/ledgerwatch/erigon/core/types" - "github.com/ledgerwatch/erigon/core/types/accounts" - "github.com/ledgerwatch/erigon/core/vm" - "github.com/ledgerwatch/erigon/ethdb" - "github.com/ledgerwatch/erigon/rpc" - "github.com/ledgerwatch/erigon/turbo/rpchelper" - "github.com/ledgerwatch/erigon/turbo/shards" - "github.com/ledgerwatch/erigon/turbo/transactions" - "github.com/ledgerwatch/log/v3" -) - -const callTimeout = 5 * time.Minute - -const ( - CALL = "call" - CALLCODE = "callcode" - DELEGATECALL = "delegatecall" - STATICCALL = "staticcall" - CREATE = "create" - SUICIDE = "suicide" - REWARD = "reward" - TraceTypeTrace = "trace" - TraceTypeStateDiff = "stateDiff" - TraceTypeVmTrace = "vmTrace" -) - -// TraceCallParam (see SendTxArgs -- this allows optional prams plus don't use MixedcaseAddress -type TraceCallParam struct { - From *common.Address `json:"from"` - To *common.Address `json:"to"` - Gas *hexutil.Uint64 `json:"gas"` - GasPrice *hexutil.Big `json:"gasPrice"` - MaxPriorityFeePerGas *hexutil.Big `json:"maxPriorityFeePerGas"` - MaxFeePerGas *hexutil.Big `json:"maxFeePerGas"` - Value *hexutil.Big `json:"value"` - Data hexutil.Bytes `json:"data"` - AccessList *types.AccessList `json:"accessList"` - txHash *common.Hash - traceTypes []string -} - -// TraceCallResult is the response to `trace_call` method -type TraceCallResult struct { - Output hexutil.Bytes `json:"output"` - StateDiff map[common.Address]*StateDiffAccount `json:"stateDiff"` - Trace []*ParityTrace `json:"trace"` - VmTrace *VmTrace `json:"vmTrace"` - TransactionHash *common.Hash `json:"transactionHash,omitempty"` -} - -// StateDiffAccount is the part of `trace_call` response that is under "stateDiff" tag -type StateDiffAccount struct { - Balance interface{} `json:"balance"` // Can be either string "=" or mapping "*" => {"from": "hex", "to": "hex"} - Code interface{} `json:"code"` - Nonce interface{} `json:"nonce"` - Storage map[common.Hash]map[string]interface{} `json:"storage"` -} - -type StateDiffBalance struct { - From *hexutil.Big `json:"from"` - To *hexutil.Big `json:"to"` -} - -type StateDiffCode struct { - From hexutil.Bytes `json:"from"` - To hexutil.Bytes `json:"to"` -} - -type StateDiffNonce struct { - From hexutil.Uint64 `json:"from"` - To hexutil.Uint64 `json:"to"` -} - -type StateDiffStorage struct { - From common.Hash `json:"from"` - To common.Hash `json:"to"` -} - -// VmTrace is the part of `trace_call` response that is under "vmTrace" tag -type VmTrace struct { - Code hexutil.Bytes `json:"code"` - Ops []*VmTraceOp `json:"ops"` -} - -// VmTraceOp is one element of the vmTrace ops trace -type VmTraceOp struct { - Cost int `json:"cost"` - Ex *VmTraceEx `json:"ex"` - Pc int `json:"pc"` - Sub *VmTrace `json:"sub"` - Op string `json:"op,omitempty"` - Idx string `json:"idx,omitempty"` -} - -type VmTraceEx struct { - Mem *VmTraceMem `json:"mem"` - Push []string `json:"push"` - Store *VmTraceStore `json:"store"` - Used int `json:"used"` -} - -type VmTraceMem struct { - Data string `json:"data"` - Off int `json:"off"` -} - -type VmTraceStore struct { - Key string `json:"key"` - Val string `json:"val"` -} - -// ToMessage converts CallArgs to the Message type used by the core evm -func (args *TraceCallParam) ToMessage(globalGasCap uint64, baseFee *uint256.Int) (types.Message, error) { - // Set sender address or use zero address if none specified. - var addr common.Address - if args.From != nil { - addr = *args.From - } - - // Set default gas & gas price if none were set - gas := globalGasCap - if gas == 0 { - gas = uint64(math.MaxUint64 / 2) - } - if args.Gas != nil { - gas = uint64(*args.Gas) - } - if globalGasCap != 0 && globalGasCap < gas { - log.Warn("Caller gas above allowance, capping", "requested", gas, "cap", globalGasCap) - gas = globalGasCap - } - var ( - gasPrice *uint256.Int - gasFeeCap *uint256.Int - gasTipCap *uint256.Int - ) - if baseFee == nil { - // If there's no basefee, then it must be a non-1559 execution - gasPrice = new(uint256.Int) - if args.GasPrice != nil { - overflow := gasPrice.SetFromBig(args.GasPrice.ToInt()) - if overflow { - return types.Message{}, fmt.Errorf("args.GasPrice higher than 2^256-1") - } - } - gasFeeCap, gasTipCap = gasPrice, gasPrice - } else { - // A basefee is provided, necessitating 1559-type execution - if args.GasPrice != nil { - var overflow bool - // User specified the legacy gas field, convert to 1559 gas typing - gasPrice, overflow = uint256.FromBig(args.GasPrice.ToInt()) - if overflow { - return types.Message{}, fmt.Errorf("args.GasPrice higher than 2^256-1") - } - gasFeeCap, gasTipCap = gasPrice, gasPrice - } else { - // User specified 1559 gas feilds (or none), use those - gasFeeCap = new(uint256.Int) - if args.MaxFeePerGas != nil { - overflow := gasFeeCap.SetFromBig(args.MaxFeePerGas.ToInt()) - if overflow { - return types.Message{}, fmt.Errorf("args.GasPrice higher than 2^256-1") - } - } - gasTipCap = new(uint256.Int) - if args.MaxPriorityFeePerGas != nil { - overflow := gasTipCap.SetFromBig(args.MaxPriorityFeePerGas.ToInt()) - if overflow { - return types.Message{}, fmt.Errorf("args.GasPrice higher than 2^256-1") - } - } - // Backfill the legacy gasPrice for EVM execution, unless we're all zeroes - gasPrice = new(uint256.Int) - if !gasFeeCap.IsZero() || !gasTipCap.IsZero() { - gasPrice = math2.U256Min(new(uint256.Int).Add(gasTipCap, baseFee), gasFeeCap) - } else { - // This means gasFeeCap == 0, gasTipCap == 0 - gasPrice.Set(baseFee) - gasFeeCap, gasTipCap = gasPrice, gasPrice - } - } - } - value := new(uint256.Int) - if args.Value != nil { - overflow := value.SetFromBig(args.Value.ToInt()) - if overflow { - return types.Message{}, fmt.Errorf("args.Value higher than 2^256-1") - } - } - var data []byte - if args.Data != nil { - data = args.Data - } - var accessList types.AccessList - if args.AccessList != nil { - accessList = *args.AccessList - } - msg := types.NewMessage(addr, args.To, 0, value, gas, gasPrice, gasFeeCap, gasTipCap, data, accessList, false /* checkNonce */) - return msg, nil -} - -// OpenEthereum-style tracer -type OeTracer struct { - r *TraceCallResult - traceAddr []int - traceStack []*ParityTrace - precompile bool // Whether the last CaptureStart was called with `precompile = true` - compat bool // Bug for bug compatibility mode - lastVmOp *VmTraceOp - lastOp vm.OpCode - lastMemOff uint64 - lastMemLen uint64 - memOffStack []uint64 - memLenStack []uint64 - lastOffStack *VmTraceOp - vmOpStack []*VmTraceOp // Stack of vmTrace operations as call depth increases - idx []string // Prefix for the "idx" inside operations, for easier navigation -} - -func (ot *OeTracer) CaptureStart(env *vm.EVM, depth int, from common.Address, to common.Address, precompile bool, create bool, calltype vm.CallType, input []byte, gas uint64, value *big.Int, code []byte) { - //fmt.Printf("CaptureStart depth %d, from %x, to %x, create %t, input %x, gas %d, value %d, precompile %t\n", depth, from, to, create, input, gas, value, precompile) - if ot.r.VmTrace != nil { - var vmTrace *VmTrace - if depth > 0 { - var vmT *VmTrace - if len(ot.vmOpStack) > 0 { - vmT = ot.vmOpStack[len(ot.vmOpStack)-1].Sub - } else { - vmT = ot.r.VmTrace - } - if !ot.compat { - ot.idx = append(ot.idx, fmt.Sprintf("%d-", len(vmT.Ops)-1)) - } - } - if ot.lastVmOp != nil { - vmTrace = &VmTrace{Ops: []*VmTraceOp{}} - ot.lastVmOp.Sub = vmTrace - ot.vmOpStack = append(ot.vmOpStack, ot.lastVmOp) - } else { - vmTrace = ot.r.VmTrace - } - if create { - vmTrace.Code = common.CopyBytes(input) - if ot.lastVmOp != nil { - ot.lastVmOp.Cost += int(gas) - } - } else { - vmTrace.Code = code - } - } - if precompile && depth > 0 && value.Sign() <= 0 { - ot.precompile = true - return - } - if gas > 500000000 { - gas = 500000001 - (0x8000000000000000 - gas) - } - trace := &ParityTrace{} - if create { - trResult := &CreateTraceResult{} - trace.Type = CREATE - trResult.Address = new(common.Address) - copy(trResult.Address[:], to.Bytes()) - trace.Result = trResult - } else { - trace.Result = &TraceResult{} - trace.Type = CALL - } - if depth > 0 { - topTrace := ot.traceStack[len(ot.traceStack)-1] - traceIdx := topTrace.Subtraces - ot.traceAddr = append(ot.traceAddr, traceIdx) - topTrace.Subtraces++ - if calltype == vm.DELEGATECALLT { - switch action := topTrace.Action.(type) { - case *CreateTraceAction: - value = action.Value.ToInt() - case *CallTraceAction: - value = action.Value.ToInt() - } - } - if calltype == vm.STATICCALLT { - value = big.NewInt(0) - } - } - trace.TraceAddress = make([]int, len(ot.traceAddr)) - copy(trace.TraceAddress, ot.traceAddr) - if create { - action := CreateTraceAction{} - action.From = from - action.Gas.ToInt().SetUint64(gas) - action.Init = common.CopyBytes(input) - action.Value.ToInt().Set(value) - trace.Action = &action - } else { - action := CallTraceAction{} - switch calltype { - case vm.CALLT: - action.CallType = CALL - case vm.CALLCODET: - action.CallType = CALLCODE - case vm.DELEGATECALLT: - action.CallType = DELEGATECALL - case vm.STATICCALLT: - action.CallType = STATICCALL - } - action.From = from - action.To = to - action.Gas.ToInt().SetUint64(gas) - action.Input = common.CopyBytes(input) - action.Value.ToInt().Set(value) - trace.Action = &action - } - ot.r.Trace = append(ot.r.Trace, trace) - ot.traceStack = append(ot.traceStack, trace) -} - -func (ot *OeTracer) CaptureEnd(depth int, output []byte, startGas, endGas uint64, t time.Duration, err error) { - if ot.r.VmTrace != nil { - if len(ot.vmOpStack) > 0 { - ot.lastOffStack = ot.vmOpStack[len(ot.vmOpStack)-1] - ot.vmOpStack = ot.vmOpStack[:len(ot.vmOpStack)-1] - } - if !ot.compat && depth > 0 { - ot.idx = ot.idx[:len(ot.idx)-1] - } - if depth > 0 { - ot.lastMemOff = ot.memOffStack[len(ot.memOffStack)-1] - ot.memOffStack = ot.memOffStack[:len(ot.memOffStack)-1] - ot.lastMemLen = ot.memLenStack[len(ot.memLenStack)-1] - ot.memLenStack = ot.memLenStack[:len(ot.memLenStack)-1] - } - } - if ot.precompile { - ot.precompile = false - return - } - if depth == 0 { - ot.r.Output = common.CopyBytes(output) - } - ignoreError := false - topTrace := ot.traceStack[len(ot.traceStack)-1] - if ot.compat { - ignoreError = depth == 0 && topTrace.Type == CREATE - } - if err != nil && !ignoreError { - switch err { - case vm.ErrInvalidJump: - topTrace.Error = "Bad jump destination" - case vm.ErrContractAddressCollision, vm.ErrCodeStoreOutOfGas, vm.ErrOutOfGas, vm.ErrGasUintOverflow: - topTrace.Error = "Out of gas" - case vm.ErrExecutionReverted: - topTrace.Error = "Reverted" - case vm.ErrWriteProtection: - topTrace.Error = "Mutable Call In Static Context" - default: - switch err.(type) { - case *vm.ErrStackUnderflow: - topTrace.Error = "Stack underflow" - case *vm.ErrInvalidOpCode: - topTrace.Error = "Bad instruction" - default: - topTrace.Error = err.Error() - } - } - topTrace.Result = nil - } else { - if len(output) > 0 { - switch topTrace.Type { - case CALL: - topTrace.Result.(*TraceResult).Output = common.CopyBytes(output) - case CREATE: - topTrace.Result.(*CreateTraceResult).Code = common.CopyBytes(output) - } - } - switch topTrace.Type { - case CALL: - topTrace.Result.(*TraceResult).GasUsed = new(hexutil.Big) - topTrace.Result.(*TraceResult).GasUsed.ToInt().SetUint64(startGas - endGas) - case CREATE: - topTrace.Result.(*CreateTraceResult).GasUsed = new(hexutil.Big) - topTrace.Result.(*CreateTraceResult).GasUsed.ToInt().SetUint64(startGas - endGas) - } - } - ot.traceStack = ot.traceStack[:len(ot.traceStack)-1] - if depth > 0 { - ot.traceAddr = ot.traceAddr[:len(ot.traceAddr)-1] - } -} - -func (ot *OeTracer) CaptureState(env *vm.EVM, pc uint64, op vm.OpCode, gas, cost uint64, scope *vm.ScopeContext, rData []byte, opDepth int, err error) { - memory := scope.Memory - st := scope.Stack - - if ot.r.VmTrace != nil { - var vmTrace *VmTrace - if len(ot.vmOpStack) > 0 { - vmTrace = ot.vmOpStack[len(ot.vmOpStack)-1].Sub - } else { - vmTrace = ot.r.VmTrace - } - if ot.lastVmOp != nil && ot.lastVmOp.Ex != nil { - // Set the "push" of the last operation - var showStack int - switch { - case ot.lastOp >= vm.PUSH1 && ot.lastOp <= vm.PUSH32: - showStack = 1 - case ot.lastOp >= vm.SWAP1 && ot.lastOp <= vm.SWAP16: - showStack = int(ot.lastOp-vm.SWAP1) + 2 - case ot.lastOp >= vm.DUP1 && ot.lastOp <= vm.DUP16: - showStack = int(ot.lastOp-vm.DUP1) + 2 - } - switch ot.lastOp { - case vm.CALLDATALOAD, vm.SLOAD, vm.MLOAD, vm.CALLDATASIZE, vm.LT, vm.GT, vm.DIV, vm.SDIV, vm.SAR, vm.AND, vm.EQ, vm.CALLVALUE, vm.ISZERO, - vm.ADD, vm.EXP, vm.CALLER, vm.SHA3, vm.SUB, vm.ADDRESS, vm.GAS, vm.MUL, vm.RETURNDATASIZE, vm.NOT, vm.SHR, vm.SHL, - vm.EXTCODESIZE, vm.SLT, vm.OR, vm.NUMBER, vm.PC, vm.TIMESTAMP, vm.BALANCE, vm.SELFBALANCE, vm.MULMOD, vm.ADDMOD, vm.BASEFEE, - vm.BLOCKHASH, vm.BYTE, vm.XOR, vm.ORIGIN, vm.CODESIZE, vm.MOD, vm.SIGNEXTEND, vm.GASLIMIT, vm.DIFFICULTY, vm.SGT, vm.GASPRICE, - vm.MSIZE, vm.EXTCODEHASH: - showStack = 1 - } - for i := showStack - 1; i >= 0; i-- { - ot.lastVmOp.Ex.Push = append(ot.lastVmOp.Ex.Push, st.Back(i).String()) - } - // Set the "mem" of the last operation - var setMem bool - switch ot.lastOp { - case vm.MSTORE, vm.MSTORE8, vm.MLOAD, vm.RETURNDATACOPY, vm.CALLDATACOPY, vm.CODECOPY: - setMem = true - } - if setMem && ot.lastMemLen > 0 { - cpy := memory.GetCopy(ot.lastMemOff, ot.lastMemLen) - if len(cpy) == 0 { - cpy = make([]byte, ot.lastMemLen) - } - ot.lastVmOp.Ex.Mem = &VmTraceMem{Data: fmt.Sprintf("0x%0x", cpy), Off: int(ot.lastMemOff)} - } - } - if ot.lastOffStack != nil { - ot.lastOffStack.Ex.Used = int(gas) - ot.lastOffStack.Ex.Push = []string{st.Back(0).String()} - if ot.lastMemLen > 0 && memory != nil { - cpy := memory.GetCopy(ot.lastMemOff, ot.lastMemLen) - if len(cpy) == 0 { - cpy = make([]byte, ot.lastMemLen) - } - ot.lastOffStack.Ex.Mem = &VmTraceMem{Data: fmt.Sprintf("0x%0x", cpy), Off: int(ot.lastMemOff)} - } - ot.lastOffStack = nil - } - if ot.lastOp == vm.STOP && op == vm.STOP && len(ot.vmOpStack) == 0 { - // Looks like OE is "optimising away" the second STOP - return - } - ot.lastVmOp = &VmTraceOp{Ex: &VmTraceEx{}} - vmTrace.Ops = append(vmTrace.Ops, ot.lastVmOp) - if !ot.compat { - var sb strings.Builder - for _, idx := range ot.idx { - sb.WriteString(idx) - } - ot.lastVmOp.Idx = fmt.Sprintf("%s%d", sb.String(), len(vmTrace.Ops)-1) - } - ot.lastOp = op - ot.lastVmOp.Cost = int(cost) - ot.lastVmOp.Pc = int(pc) - ot.lastVmOp.Ex.Push = []string{} - ot.lastVmOp.Ex.Used = int(gas) - int(cost) - if !ot.compat { - ot.lastVmOp.Op = op.String() - } - switch op { - case vm.MSTORE, vm.MLOAD: - ot.lastMemOff = st.Back(0).Uint64() - ot.lastMemLen = 32 - case vm.MSTORE8: - ot.lastMemOff = st.Back(0).Uint64() - ot.lastMemLen = 1 - case vm.RETURNDATACOPY, vm.CALLDATACOPY, vm.CODECOPY: - ot.lastMemOff = st.Back(0).Uint64() - ot.lastMemLen = st.Back(2).Uint64() - case vm.STATICCALL, vm.DELEGATECALL: - ot.memOffStack = append(ot.memOffStack, st.Back(4).Uint64()) - ot.memLenStack = append(ot.memLenStack, st.Back(5).Uint64()) - case vm.CALL, vm.CALLCODE: - ot.memOffStack = append(ot.memOffStack, st.Back(5).Uint64()) - ot.memLenStack = append(ot.memLenStack, st.Back(6).Uint64()) - case vm.CREATE, vm.CREATE2: - // Effectively disable memory output - ot.memOffStack = append(ot.memOffStack, 0) - ot.memLenStack = append(ot.memLenStack, 0) - case vm.SSTORE: - ot.lastVmOp.Ex.Store = &VmTraceStore{Key: st.Back(0).String(), Val: st.Back(1).String()} - } - if ot.lastVmOp.Ex.Used < 0 { - ot.lastVmOp.Ex = nil - } - } -} - -func (ot *OeTracer) CaptureFault(env *vm.EVM, pc uint64, op vm.OpCode, gas, cost uint64, scope *vm.ScopeContext, opDepth int, err error) { -} - -func (ot *OeTracer) CaptureSelfDestruct(from common.Address, to common.Address, value *big.Int) { - trace := &ParityTrace{} - trace.Type = SUICIDE - action := &SuicideTraceAction{} - action.Address = from - action.RefundAddress = to - action.Balance.ToInt().Set(value) - trace.Action = action - topTrace := ot.traceStack[len(ot.traceStack)-1] - traceIdx := topTrace.Subtraces - ot.traceAddr = append(ot.traceAddr, traceIdx) - topTrace.Subtraces++ - trace.TraceAddress = make([]int, len(ot.traceAddr)) - copy(trace.TraceAddress, ot.traceAddr) - ot.traceAddr = ot.traceAddr[:len(ot.traceAddr)-1] - ot.r.Trace = append(ot.r.Trace, trace) -} - -func (ot *OeTracer) CaptureAccountRead(account common.Address) error { - return nil -} -func (ot *OeTracer) CaptureAccountWrite(account common.Address) error { - return nil -} - -// Implements core/state/StateWriter to provide state diffs -type StateDiff struct { - sdMap map[common.Address]*StateDiffAccount -} - -func (sd *StateDiff) UpdateAccountData(address common.Address, original, account *accounts.Account) error { - if _, ok := sd.sdMap[address]; !ok { - sd.sdMap[address] = &StateDiffAccount{Storage: make(map[common.Hash]map[string]interface{})} - } - return nil -} - -func (sd *StateDiff) UpdateAccountCode(address common.Address, incarnation uint64, codeHash common.Hash, code []byte) error { - if _, ok := sd.sdMap[address]; !ok { - sd.sdMap[address] = &StateDiffAccount{Storage: make(map[common.Hash]map[string]interface{})} - } - return nil -} - -func (sd *StateDiff) DeleteAccount(address common.Address, original *accounts.Account) error { - if _, ok := sd.sdMap[address]; !ok { - sd.sdMap[address] = &StateDiffAccount{Storage: make(map[common.Hash]map[string]interface{})} - } - return nil -} - -func (sd *StateDiff) WriteAccountStorage(address common.Address, incarnation uint64, key *common.Hash, original, value *uint256.Int) error { - if *original == *value { - return nil - } - accountDiff := sd.sdMap[address] - if accountDiff == nil { - accountDiff = &StateDiffAccount{Storage: make(map[common.Hash]map[string]interface{})} - sd.sdMap[address] = accountDiff - } - m := make(map[string]interface{}) - m["*"] = &StateDiffStorage{From: common.BytesToHash(original.Bytes()), To: common.BytesToHash(value.Bytes())} - accountDiff.Storage[*key] = m - return nil -} - -func (sd *StateDiff) CreateContract(address common.Address) error { - if _, ok := sd.sdMap[address]; !ok { - sd.sdMap[address] = &StateDiffAccount{Storage: make(map[common.Hash]map[string]interface{})} - } - return nil -} - -// CompareStates uses the addresses accumulated in the sdMap and compares balances, nonces, and codes of the accounts, and fills the rest of the sdMap -func (sd *StateDiff) CompareStates(initialIbs, ibs *state.IntraBlockState) { - var toRemove []common.Address - for addr, accountDiff := range sd.sdMap { - initialExist := initialIbs.Exist(addr) - exist := ibs.Exist(addr) - if initialExist { - if exist { - var allEqual = len(accountDiff.Storage) == 0 - fromBalance := initialIbs.GetBalance(addr).ToBig() - toBalance := ibs.GetBalance(addr).ToBig() - if fromBalance.Cmp(toBalance) == 0 { - accountDiff.Balance = "=" - } else { - m := make(map[string]*StateDiffBalance) - m["*"] = &StateDiffBalance{From: (*hexutil.Big)(fromBalance), To: (*hexutil.Big)(toBalance)} - accountDiff.Balance = m - allEqual = false - } - fromCode := initialIbs.GetCode(addr) - toCode := ibs.GetCode(addr) - if bytes.Equal(fromCode, toCode) { - accountDiff.Code = "=" - } else { - m := make(map[string]*StateDiffCode) - m["*"] = &StateDiffCode{From: fromCode, To: toCode} - accountDiff.Code = m - allEqual = false - } - fromNonce := initialIbs.GetNonce(addr) - toNonce := ibs.GetNonce(addr) - if fromNonce == toNonce { - accountDiff.Nonce = "=" - } else { - m := make(map[string]*StateDiffNonce) - m["*"] = &StateDiffNonce{From: hexutil.Uint64(fromNonce), To: hexutil.Uint64(toNonce)} - accountDiff.Nonce = m - allEqual = false - } - if allEqual { - toRemove = append(toRemove, addr) - } - } else { - { - m := make(map[string]*hexutil.Big) - m["-"] = (*hexutil.Big)(initialIbs.GetBalance(addr).ToBig()) - accountDiff.Balance = m - } - { - m := make(map[string]hexutil.Bytes) - m["-"] = initialIbs.GetCode(addr) - accountDiff.Code = m - } - { - m := make(map[string]hexutil.Uint64) - m["-"] = hexutil.Uint64(initialIbs.GetNonce(addr)) - accountDiff.Nonce = m - } - } - } else if exist { - { - m := make(map[string]*hexutil.Big) - m["+"] = (*hexutil.Big)(ibs.GetBalance(addr).ToBig()) - accountDiff.Balance = m - } - { - m := make(map[string]hexutil.Bytes) - m["+"] = ibs.GetCode(addr) - accountDiff.Code = m - } - { - m := make(map[string]hexutil.Uint64) - m["+"] = hexutil.Uint64(ibs.GetNonce(addr)) - accountDiff.Nonce = m - } - // Transform storage - for _, sm := range accountDiff.Storage { - str := sm["*"].(*StateDiffStorage) - delete(sm, "*") - sm["+"] = &str.To - } - } else { - toRemove = append(toRemove, addr) - } - } - for _, addr := range toRemove { - delete(sd.sdMap, addr) - } -} - -func (api *TraceAPIImpl) ReplayTransaction(ctx context.Context, txHash common.Hash, traceTypes []string) (*TraceCallResult, error) { - tx, err := api.kv.BeginRo(ctx) - if err != nil { - return nil, err - } - defer tx.Rollback() - chainConfig, err := api.chainConfig(tx) - if err != nil { - return nil, err - } - - blockNum, ok, err := api.txnLookup(ctx, tx, txHash) - if err != nil { - return nil, err - } - if !ok { - return nil, nil - } - block, err := api.blockByNumberWithSenders(tx, blockNum) - if err != nil { - return nil, err - } - if block == nil { - return nil, nil - } - var txnIndex uint64 - for i, transaction := range block.Transactions() { - if transaction.Hash() == txHash { - txnIndex = uint64(i) - break - } - } - - bn := hexutil.Uint64(blockNum) - - parentNr := bn - if parentNr > 0 { - parentNr -= 1 - } - - // Returns an array of trace arrays, one trace array for each transaction - traces, err := api.callManyTransactions(ctx, tx, block.Transactions(), traceTypes, block.ParentHash(), rpc.BlockNumber(parentNr), block.Header(), int(txnIndex), types.MakeSigner(chainConfig, blockNum), chainConfig.Rules(blockNum)) - if err != nil { - return nil, err - } - - var traceTypeTrace, traceTypeStateDiff, traceTypeVmTrace bool - for _, traceType := range traceTypes { - switch traceType { - case TraceTypeTrace: - traceTypeTrace = true - case TraceTypeStateDiff: - traceTypeStateDiff = true - case TraceTypeVmTrace: - traceTypeVmTrace = true - default: - return nil, fmt.Errorf("unrecognized trace type: %s", traceType) - } - } - result := &TraceCallResult{} - - for txno, trace := range traces { - // We're only looking for a specific transaction - if txno == int(txnIndex) { - result.Output = trace.Output - if traceTypeTrace { - result.Trace = trace.Trace - } - if traceTypeStateDiff { - result.StateDiff = trace.StateDiff - } - if traceTypeVmTrace { - result.VmTrace = trace.VmTrace - } - - return trace, nil - } - } - return result, nil - -} - -func (api *TraceAPIImpl) ReplayBlockTransactions(ctx context.Context, blockNrOrHash rpc.BlockNumberOrHash, traceTypes []string) ([]*TraceCallResult, error) { - tx, err := api.kv.BeginRo(ctx) - if err != nil { - return nil, err - } - defer tx.Rollback() - chainConfig, err := api.chainConfig(tx) - if err != nil { - return nil, err - } - - blockNumber, _, _, err := rpchelper.GetBlockNumber(blockNrOrHash, tx, api.filters) - if err != nil { - return nil, err - } - - parentNr := blockNumber - if parentNr > 0 { - parentNr -= 1 - } - // Extract transactions from block - block, bErr := api.blockByNumberWithSenders(tx, blockNumber) - if bErr != nil { - return nil, bErr - } - if block == nil { - return nil, fmt.Errorf("could not find block %d", blockNumber) - } - var traceTypeTrace, traceTypeStateDiff, traceTypeVmTrace bool - for _, traceType := range traceTypes { - switch traceType { - case TraceTypeTrace: - traceTypeTrace = true - case TraceTypeStateDiff: - traceTypeStateDiff = true - case TraceTypeVmTrace: - traceTypeVmTrace = true - default: - return nil, fmt.Errorf("unrecognized trace type: %s", traceType) - } - } - - // Returns an array of trace arrays, one trace array for each transaction - traces, err := api.callManyTransactions(ctx, tx, block.Transactions(), traceTypes, block.ParentHash(), rpc.BlockNumber(parentNr), block.Header(), -1 /* all tx indices */, types.MakeSigner(chainConfig, blockNumber), chainConfig.Rules(blockNumber)) - if err != nil { - return nil, err - } - - result := make([]*TraceCallResult, len(traces)) - for i, trace := range traces { - tr := &TraceCallResult{} - tr.Output = trace.Output - if traceTypeTrace { - tr.Trace = trace.Trace - } else { - tr.Trace = []*ParityTrace{} - } - if traceTypeStateDiff { - tr.StateDiff = trace.StateDiff - } - if traceTypeVmTrace { - tr.VmTrace = trace.VmTrace - } - result[i] = tr - txhash := block.Transactions()[i].Hash() - tr.TransactionHash = &txhash - } - - return result, nil -} - -// Call implements trace_call. -func (api *TraceAPIImpl) Call(ctx context.Context, args TraceCallParam, traceTypes []string, blockNrOrHash *rpc.BlockNumberOrHash) (*TraceCallResult, error) { - tx, err := api.kv.BeginRo(ctx) - if err != nil { - return nil, err - } - defer tx.Rollback() - - chainConfig, err := api.chainConfig(tx) - if err != nil { - return nil, err - } - - if blockNrOrHash == nil { - var num = rpc.LatestBlockNumber - blockNrOrHash = &rpc.BlockNumberOrHash{BlockNumber: &num} - } - - blockNumber, hash, latest, err := rpchelper.GetBlockNumber(*blockNrOrHash, tx, api.filters) - if err != nil { - return nil, err - } - var stateReader state.StateReader - if latest { - cacheView, err := api.stateCache.View(ctx, tx) - if err != nil { - return nil, err - } - stateReader = state.NewCachedReader2(cacheView, tx) - } else { - stateReader = state.NewPlainState(tx, blockNumber) - } - ibs := state.New(stateReader) - - block, err := api.blockWithSenders(tx, hash, blockNumber) - if err != nil { - return nil, err - } - if block == nil { - return nil, fmt.Errorf("block %d(%x) not found", blockNumber, hash) - } - header := block.Header() - - // Setup context so it may be cancelled the call has completed - // or, in case of unmetered gas, setup a context with a timeout. - var cancel context.CancelFunc - if callTimeout > 0 { - ctx, cancel = context.WithTimeout(ctx, callTimeout) - } else { - ctx, cancel = context.WithCancel(ctx) - } - - // Make sure the context is cancelled when the call has completed - // this makes sure resources are cleaned up. - defer cancel() - - traceResult := &TraceCallResult{Trace: []*ParityTrace{}} - var traceTypeTrace, traceTypeStateDiff, traceTypeVmTrace bool - for _, traceType := range traceTypes { - switch traceType { - case TraceTypeTrace: - traceTypeTrace = true - case TraceTypeStateDiff: - traceTypeStateDiff = true - case TraceTypeVmTrace: - traceTypeVmTrace = true - default: - return nil, fmt.Errorf("unrecognized trace type: %s", traceType) - } - } - if traceTypeVmTrace { - traceResult.VmTrace = &VmTrace{Ops: []*VmTraceOp{}} - } - var ot OeTracer - ot.compat = api.compatibility - if traceTypeTrace || traceTypeVmTrace { - ot.r = traceResult - ot.traceAddr = []int{} - } - - // Get a new instance of the EVM. - var baseFee *uint256.Int - if header != nil && header.BaseFee != nil { - var overflow bool - baseFee, overflow = uint256.FromBig(header.BaseFee) - if overflow { - return nil, fmt.Errorf("header.BaseFee uint256 overflow") - } - } - msg, err := args.ToMessage(api.gasCap, baseFee) - if err != nil { - return nil, err - } - - contractHasTEVM := func(contractHash common.Hash) (bool, error) { return false, nil } - if api.TevmEnabled { - contractHasTEVM = ethdb.GetHasTEVM(tx) - } - blockCtx, txCtx := transactions.GetEvmContext(msg, header, blockNrOrHash.RequireCanonical, tx, contractHasTEVM, api._blockReader) - blockCtx.GasLimit = math.MaxUint64 - blockCtx.MaxGasLimit = true - - evm := vm.NewEVM(blockCtx, txCtx, ibs, chainConfig, vm.Config{Debug: traceTypeTrace, Tracer: &ot}) - - // Wait for the context to be done and cancel the evm. Even if the - // EVM has finished, cancelling may be done (repeatedly) - go func() { - <-ctx.Done() - evm.Cancel() - }() - - gp := new(core.GasPool).AddGas(msg.Gas()) - var execResult *core.ExecutionResult - ibs.Prepare(common.Hash{}, common.Hash{}, 0) - execResult, err = core.ApplyMessage(evm, msg, gp, true /* refunds */, true /* gasBailout */) - if err != nil { - return nil, err - } - traceResult.Output = common.CopyBytes(execResult.ReturnData) - if traceTypeStateDiff { - sdMap := make(map[common.Address]*StateDiffAccount) - traceResult.StateDiff = sdMap - sd := &StateDiff{sdMap: sdMap} - if err = ibs.FinalizeTx(evm.ChainRules(), sd); err != nil { - return nil, err - } - // Create initial IntraBlockState, we will compare it with ibs (IntraBlockState after the transaction) - initialIbs := state.New(stateReader) - sd.CompareStates(initialIbs, ibs) - } - - // If the timer caused an abort, return an appropriate error message - if evm.Cancelled() { - return nil, fmt.Errorf("execution aborted (timeout = %v)", callTimeout) - } - - return traceResult, nil -} - -// CallMany implements trace_callMany. -func (api *TraceAPIImpl) CallMany(ctx context.Context, calls json.RawMessage, parentNrOrHash *rpc.BlockNumberOrHash) ([]*TraceCallResult, error) { - dbtx, err := api.kv.BeginRo(ctx) - if err != nil { - return nil, err - } - defer dbtx.Rollback() - - var callParams []TraceCallParam - dec := json.NewDecoder(bytes.NewReader(calls)) - tok, err := dec.Token() - if err != nil { - return nil, err - } - if tok != json.Delim('[') { - return nil, fmt.Errorf("expected array of [callparam, tracetypes]") - } - for dec.More() { - tok, err = dec.Token() - if err != nil { - return nil, err - } - if tok != json.Delim('[') { - return nil, fmt.Errorf("expected [callparam, tracetypes]") - } - callParams = append(callParams, TraceCallParam{}) - args := &callParams[len(callParams)-1] - if err = dec.Decode(args); err != nil { - return nil, err - } - if err = dec.Decode(&args.traceTypes); err != nil { - return nil, err - } - tok, err = dec.Token() - if err != nil { - return nil, err - } - if tok != json.Delim(']') { - return nil, fmt.Errorf("expected end of [callparam, tracetypes]") - } - } - tok, err = dec.Token() - if err != nil { - return nil, err - } - if tok != json.Delim(']') { - return nil, fmt.Errorf("expected end of array of [callparam, tracetypes]") - } - var baseFee *uint256.Int - if parentNrOrHash == nil { - var num = rpc.LatestBlockNumber - parentNrOrHash = &rpc.BlockNumberOrHash{BlockNumber: &num} - } - blockNumber, hash, _, err := rpchelper.GetBlockNumber(*parentNrOrHash, dbtx, api.filters) - if err != nil { - return nil, err - } - - // TODO: can read here only parent header - parentBlock, err := api.blockWithSenders(dbtx, hash, blockNumber) - if err != nil { - return nil, err - } - parentHeader := parentBlock.Header() - if parentHeader == nil { - return nil, fmt.Errorf("parent header %d(%x) not found", blockNumber, hash) - } - if parentHeader != nil && parentHeader.BaseFee != nil { - var overflow bool - baseFee, overflow = uint256.FromBig(parentHeader.BaseFee) - if overflow { - return nil, fmt.Errorf("header.BaseFee uint256 overflow") - } - } - msgs := make([]types.Message, len(callParams)) - for i, args := range callParams { - msgs[i], err = args.ToMessage(api.gasCap, baseFee) - if err != nil { - return nil, fmt.Errorf("convert callParam to msg: %w", err) - } - } - return api.doCallMany(ctx, dbtx, msgs, callParams, parentNrOrHash, nil, true /* gasBailout */, -1 /* all tx indices */) -} - -func (api *TraceAPIImpl) doCallMany(ctx context.Context, dbtx kv.Tx, msgs []types.Message, callParams []TraceCallParam, parentNrOrHash *rpc.BlockNumberOrHash, header *types.Header, - gasBailout bool, txIndexNeeded int) ([]*TraceCallResult, error) { - chainConfig, err := api.chainConfig(dbtx) - if err != nil { - return nil, err - } - - if parentNrOrHash == nil { - var num = rpc.LatestBlockNumber - parentNrOrHash = &rpc.BlockNumberOrHash{BlockNumber: &num} - } - blockNumber, hash, latest, err := rpchelper.GetBlockNumber(*parentNrOrHash, dbtx, api.filters) - if err != nil { - return nil, err - } - var stateReader state.StateReader - if latest { - cacheView, err := api.stateCache.View(ctx, dbtx) - if err != nil { - return nil, err - } - stateReader = state.NewCachedReader2(cacheView, dbtx) // this cache stays between RPC calls - } else { - stateReader = state.NewPlainState(dbtx, blockNumber+1) - } - stateCache := shards.NewStateCache(32, 0 /* no limit */) // this cache living only during current RPC call, but required to store state writes - cachedReader := state.NewCachedReader(stateReader, stateCache) - noop := state.NewNoopWriter() - cachedWriter := state.NewCachedWriter(noop, stateCache) - - // TODO: can read here only parent header - parentBlock, err := api.blockWithSenders(dbtx, hash, blockNumber) - if err != nil { - return nil, err - } - parentHeader := parentBlock.Header() - if parentHeader == nil { - return nil, fmt.Errorf("parent header %d(%x) not found", blockNumber, hash) - } - - // Setup context so it may be cancelled the call has completed - // or, in case of unmetered gas, setup a context with a timeout. - var cancel context.CancelFunc - if callTimeout > 0 { - ctx, cancel = context.WithTimeout(ctx, callTimeout) - } else { - ctx, cancel = context.WithCancel(ctx) - } - - // Make sure the context is cancelled when the call has completed - // this makes sure resources are cleaned up. - defer cancel() - results := []*TraceCallResult{} - - useParent := false - if header == nil { - header = parentHeader - useParent = true - } - - contractHasTEVM := func(contractHash common.Hash) (bool, error) { return false, nil } - if api.TevmEnabled { - contractHasTEVM = ethdb.GetHasTEVM(dbtx) - } - - for txIndex, msg := range msgs { - if err := libcommon.Stopped(ctx.Done()); err != nil { - return nil, err - } - traceResult := &TraceCallResult{Trace: []*ParityTrace{}} - var traceTypeTrace, traceTypeStateDiff, traceTypeVmTrace bool - args := callParams[txIndex] - for _, traceType := range args.traceTypes { - switch traceType { - case TraceTypeTrace: - traceTypeTrace = true - case TraceTypeStateDiff: - traceTypeStateDiff = true - case TraceTypeVmTrace: - traceTypeVmTrace = true - default: - return nil, fmt.Errorf("unrecognized trace type: %s", traceType) - } - } - vmConfig := vm.Config{} - if (traceTypeTrace && (txIndexNeeded == -1 || txIndex == txIndexNeeded)) || traceTypeVmTrace { - var ot OeTracer - ot.compat = api.compatibility - ot.r = traceResult - ot.idx = []string{fmt.Sprintf("%d-", txIndex)} - if traceTypeTrace && (txIndexNeeded == -1 || txIndex == txIndexNeeded) { - ot.traceAddr = []int{} - } - if traceTypeVmTrace { - traceResult.VmTrace = &VmTrace{Ops: []*VmTraceOp{}} - } - vmConfig.Debug = true - vmConfig.Tracer = &ot - } - - // Get a new instance of the EVM. - blockCtx, txCtx := transactions.GetEvmContext(msg, header, parentNrOrHash.RequireCanonical, dbtx, contractHasTEVM, api._blockReader) - if useParent { - blockCtx.GasLimit = math.MaxUint64 - blockCtx.MaxGasLimit = true - } - ibs := state.New(cachedReader) - // Create initial IntraBlockState, we will compare it with ibs (IntraBlockState after the transaction) - - evm := vm.NewEVM(blockCtx, txCtx, ibs, chainConfig, vmConfig) - - gp := new(core.GasPool).AddGas(msg.Gas()) - var execResult *core.ExecutionResult - // Clone the state cache before applying the changes, clone is discarded - var cloneReader state.StateReader - if traceTypeStateDiff { - cloneCache := stateCache.Clone() - cloneReader = state.NewCachedReader(stateReader, cloneCache) - } - if args.txHash != nil { - ibs.Prepare(*args.txHash, header.Hash(), txIndex) - } else { - ibs.Prepare(common.Hash{}, header.Hash(), txIndex) - } - execResult, err = core.ApplyMessage(evm, msg, gp, true /* refunds */, gasBailout /* gasBailout */) - if err != nil { - return nil, fmt.Errorf("first run for txIndex %d error: %w", txIndex, err) - } - traceResult.Output = common.CopyBytes(execResult.ReturnData) - if traceTypeStateDiff { - initialIbs := state.New(cloneReader) - sdMap := make(map[common.Address]*StateDiffAccount) - traceResult.StateDiff = sdMap - sd := &StateDiff{sdMap: sdMap} - if err = ibs.FinalizeTx(evm.ChainRules(), sd); err != nil { - return nil, err - } - sd.CompareStates(initialIbs, ibs) - if err = ibs.CommitBlock(evm.ChainRules(), cachedWriter); err != nil { - return nil, err - } - } else { - if err = ibs.FinalizeTx(evm.ChainRules(), noop); err != nil { - return nil, err - } - if err = ibs.CommitBlock(evm.ChainRules(), cachedWriter); err != nil { - return nil, err - } - } - if !traceTypeTrace { - traceResult.Trace = []*ParityTrace{} - } - results = append(results, traceResult) - } - return results, nil -} - -// RawTransaction implements trace_rawTransaction. -func (api *TraceAPIImpl) RawTransaction(ctx context.Context, txHash common.Hash, traceTypes []string) ([]interface{}, error) { - var stub []interface{} - return stub, fmt.Errorf(NotImplemented, "trace_rawTransaction") -} diff --git a/cmd/rpcdaemon22/commands/trace_adhoc_test.go b/cmd/rpcdaemon22/commands/trace_adhoc_test.go deleted file mode 100644 index abd446c1a78..00000000000 --- a/cmd/rpcdaemon22/commands/trace_adhoc_test.go +++ /dev/null @@ -1,108 +0,0 @@ -package commands - -import ( - "context" - "encoding/json" - "testing" - - "github.com/ledgerwatch/erigon-lib/kv" - "github.com/ledgerwatch/erigon-lib/kv/kvcache" - "github.com/ledgerwatch/erigon/cmd/rpcdaemon22/cli/httpcfg" - "github.com/ledgerwatch/erigon/cmd/rpcdaemon22/rpcdaemontest" - "github.com/ledgerwatch/erigon/common" - "github.com/ledgerwatch/erigon/common/hexutil" - "github.com/ledgerwatch/erigon/core/rawdb" - "github.com/ledgerwatch/erigon/rpc" - "github.com/ledgerwatch/erigon/turbo/snapshotsync" - "github.com/stretchr/testify/require" -) - -func TestEmptyQuery(t *testing.T) { - db := rpcdaemontest.CreateTestKV(t) - stateCache := kvcache.New(kvcache.DefaultCoherentConfig) - api := NewTraceAPI(NewBaseApi(nil, stateCache, snapshotsync.NewBlockReader(), nil, nil, false), db, &httpcfg.HttpCfg{}) - // Call GetTransactionReceipt for transaction which is not in the database - var latest = rpc.LatestBlockNumber - results, err := api.CallMany(context.Background(), json.RawMessage("[]"), &rpc.BlockNumberOrHash{BlockNumber: &latest}) - if err != nil { - t.Errorf("calling CallMany: %v", err) - } - if results == nil { - t.Errorf("expected empty array, got nil") - } - if len(results) > 0 { - t.Errorf("expected empty array, got %d elements", len(results)) - } -} -func TestCoinbaseBalance(t *testing.T) { - db := rpcdaemontest.CreateTestKV(t) - stateCache := kvcache.New(kvcache.DefaultCoherentConfig) - api := NewTraceAPI(NewBaseApi(nil, stateCache, snapshotsync.NewBlockReader(), nil, nil, false), db, &httpcfg.HttpCfg{}) - // Call GetTransactionReceipt for transaction which is not in the database - var latest = rpc.LatestBlockNumber - results, err := api.CallMany(context.Background(), json.RawMessage(` -[ - [{"from":"0x71562b71999873db5b286df957af199ec94617f7","to":"0x0d3ab14bbad3d99f4203bd7a11acb94882050e7e","gas":"0x15f90","gasPrice":"0x4a817c800","value":"0x1"},["trace", "stateDiff"]], - [{"from":"0x71562b71999873db5b286df957af199ec94617f7","to":"0x0d3ab14bbad3d99f4203bd7a11acb94882050e7e","gas":"0x15f90","gasPrice":"0x4a817c800","value":"0x1"},["trace", "stateDiff"]] -] -`), &rpc.BlockNumberOrHash{BlockNumber: &latest}) - if err != nil { - t.Errorf("calling CallMany: %v", err) - } - if results == nil { - t.Errorf("expected empty array, got nil") - } - if len(results) != 2 { - t.Errorf("expected array with 2 elements, got %d elements", len(results)) - } - // Expect balance increase of the coinbase (zero address) - if _, ok := results[1].StateDiff[common.Address{}]; !ok { - t.Errorf("expected balance increase for coinbase (zero address)") - } -} - -func TestReplayTransaction(t *testing.T) { - db := rpcdaemontest.CreateTestKV(t) - stateCache := kvcache.New(kvcache.DefaultCoherentConfig) - api := NewTraceAPI(NewBaseApi(nil, stateCache, snapshotsync.NewBlockReader(), nil, nil, false), db, &httpcfg.HttpCfg{}) - var txnHash common.Hash - if err := db.View(context.Background(), func(tx kv.Tx) error { - b, err := rawdb.ReadBlockByNumber(tx, 6) - if err != nil { - return err - } - txnHash = b.Transactions()[5].Hash() - return nil - }); err != nil { - t.Fatal(err) - } - - // Call GetTransactionReceipt for transaction which is not in the database - results, err := api.ReplayTransaction(context.Background(), txnHash, []string{"stateDiff"}) - if err != nil { - t.Errorf("calling ReplayTransaction: %v", err) - } - require.NotNil(t, results) - require.NotNil(t, results.StateDiff) - addrDiff := results.StateDiff[common.HexToAddress("0x0000000000000006000000000000000000000000")] - v := addrDiff.Balance.(map[string]*hexutil.Big)["+"].ToInt().Uint64() - require.Equal(t, uint64(1_000_000_000_000_000), v) -} - -func TestReplayBlockTransactions(t *testing.T) { - db := rpcdaemontest.CreateTestKV(t) - stateCache := kvcache.New(kvcache.DefaultCoherentConfig) - api := NewTraceAPI(NewBaseApi(nil, stateCache, snapshotsync.NewBlockReader(), nil, nil, false), db, &httpcfg.HttpCfg{}) - - // Call GetTransactionReceipt for transaction which is not in the database - n := rpc.BlockNumber(6) - results, err := api.ReplayBlockTransactions(context.Background(), rpc.BlockNumberOrHash{BlockNumber: &n}, []string{"stateDiff"}) - if err != nil { - t.Errorf("calling ReplayBlockTransactions: %v", err) - } - require.NotNil(t, results) - require.NotNil(t, results[0].StateDiff) - addrDiff := results[0].StateDiff[common.HexToAddress("0x0000000000000001000000000000000000000000")] - v := addrDiff.Balance.(map[string]*hexutil.Big)["+"].ToInt().Uint64() - require.Equal(t, uint64(1_000_000_000_000_000), v) -} diff --git a/cmd/rpcdaemon22/commands/trace_api.go b/cmd/rpcdaemon22/commands/trace_api.go deleted file mode 100644 index c8a34cc2576..00000000000 --- a/cmd/rpcdaemon22/commands/trace_api.go +++ /dev/null @@ -1,49 +0,0 @@ -package commands - -import ( - "context" - "encoding/json" - - jsoniter "github.com/json-iterator/go" - "github.com/ledgerwatch/erigon-lib/kv" - "github.com/ledgerwatch/erigon/cmd/rpcdaemon22/cli/httpcfg" - "github.com/ledgerwatch/erigon/common" - "github.com/ledgerwatch/erigon/common/hexutil" - "github.com/ledgerwatch/erigon/rpc" -) - -// TraceAPI RPC interface into tracing API -type TraceAPI interface { - // Ad-hoc (see ./trace_adhoc.go) - ReplayBlockTransactions(ctx context.Context, blockNr rpc.BlockNumberOrHash, traceTypes []string) ([]*TraceCallResult, error) - ReplayTransaction(ctx context.Context, txHash common.Hash, traceTypes []string) (*TraceCallResult, error) - Call(ctx context.Context, call TraceCallParam, types []string, blockNr *rpc.BlockNumberOrHash) (*TraceCallResult, error) - CallMany(ctx context.Context, calls json.RawMessage, blockNr *rpc.BlockNumberOrHash) ([]*TraceCallResult, error) - RawTransaction(ctx context.Context, txHash common.Hash, traceTypes []string) ([]interface{}, error) - - // Filtering (see ./trace_filtering.go) - Transaction(ctx context.Context, txHash common.Hash) (ParityTraces, error) - Get(ctx context.Context, txHash common.Hash, txIndicies []hexutil.Uint64) (*ParityTrace, error) - Block(ctx context.Context, blockNr rpc.BlockNumber) (ParityTraces, error) - Filter(ctx context.Context, req TraceFilterRequest, stream *jsoniter.Stream) error -} - -// TraceAPIImpl is implementation of the TraceAPI interface based on remote Db access -type TraceAPIImpl struct { - *BaseAPI - kv kv.RoDB - maxTraces uint64 - gasCap uint64 - compatibility bool // Bug for bug compatiblity with OpenEthereum -} - -// NewTraceAPI returns NewTraceAPI instance -func NewTraceAPI(base *BaseAPI, kv kv.RoDB, cfg *httpcfg.HttpCfg) *TraceAPIImpl { - return &TraceAPIImpl{ - BaseAPI: base, - kv: kv, - maxTraces: cfg.MaxTraces, - gasCap: cfg.Gascap, - compatibility: cfg.TraceCompatibility, - } -} diff --git a/cmd/rpcdaemon22/commands/trace_filtering.go b/cmd/rpcdaemon22/commands/trace_filtering.go deleted file mode 100644 index fc456811666..00000000000 --- a/cmd/rpcdaemon22/commands/trace_filtering.go +++ /dev/null @@ -1,572 +0,0 @@ -package commands - -import ( - "context" - "fmt" - "sort" - - "github.com/RoaringBitmap/roaring/roaring64" - jsoniter "github.com/json-iterator/go" - "github.com/ledgerwatch/erigon-lib/kv" - - "github.com/ledgerwatch/erigon/common" - "github.com/ledgerwatch/erigon/common/hexutil" - "github.com/ledgerwatch/erigon/consensus/ethash" - "github.com/ledgerwatch/erigon/core" - "github.com/ledgerwatch/erigon/core/rawdb" - "github.com/ledgerwatch/erigon/core/state" - "github.com/ledgerwatch/erigon/core/types" - "github.com/ledgerwatch/erigon/core/vm" - "github.com/ledgerwatch/erigon/params" - "github.com/ledgerwatch/erigon/rpc" - "github.com/ledgerwatch/erigon/turbo/shards" - "github.com/ledgerwatch/erigon/turbo/transactions" -) - -// Transaction implements trace_transaction -func (api *TraceAPIImpl) Transaction(ctx context.Context, txHash common.Hash) (ParityTraces, error) { - tx, err := api.kv.BeginRo(ctx) - if err != nil { - return nil, err - } - defer tx.Rollback() - chainConfig, err := api.chainConfig(tx) - if err != nil { - return nil, err - } - - blockNumber, ok, err := api.txnLookup(ctx, tx, txHash) - if err != nil { - return nil, err - } - if !ok { - return nil, nil - } - block, err := api.blockByNumberWithSenders(tx, blockNumber) - if err != nil { - return nil, err - } - if block == nil { - return nil, nil - } - - // Extract transactions from block - block, bErr := api.blockByNumberWithSenders(tx, blockNumber) - if bErr != nil { - return nil, bErr - } - if block == nil { - return nil, fmt.Errorf("could not find block %d", blockNumber) - } - var txIndex int - for idx, txn := range block.Transactions() { - if txn.Hash() == txHash { - txIndex = idx - break - } - } - bn := hexutil.Uint64(blockNumber) - - parentNr := bn - if parentNr > 0 { - parentNr -= 1 - } - hash := block.Hash() - - // Returns an array of trace arrays, one trace array for each transaction - traces, err := api.callManyTransactions(ctx, tx, block.Transactions(), []string{TraceTypeTrace}, block.ParentHash(), rpc.BlockNumber(parentNr), block.Header(), txIndex, types.MakeSigner(chainConfig, blockNumber), chainConfig.Rules(blockNumber)) - if err != nil { - return nil, err - } - - out := make([]ParityTrace, 0, len(traces)) - blockno := uint64(bn) - for txno, trace := range traces { - txhash := block.Transactions()[txno].Hash() - // We're only looking for a specific transaction - if txno == txIndex { - for _, pt := range trace.Trace { - pt.BlockHash = &hash - pt.BlockNumber = &blockno - pt.TransactionHash = &txhash - txpos := uint64(txno) - pt.TransactionPosition = &txpos - out = append(out, *pt) - } - } - } - - return out, err -} - -// Get implements trace_get -func (api *TraceAPIImpl) Get(ctx context.Context, txHash common.Hash, indicies []hexutil.Uint64) (*ParityTrace, error) { - // Parity fails if it gets more than a single index. It returns nothing in this case. Must we? - if len(indicies) > 1 { - return nil, nil - } - - traces, err := api.Transaction(ctx, txHash) - if err != nil { - return nil, err - } - - // 'trace_get' index starts at one (oddly) - firstIndex := int(indicies[0]) + 1 - for i, trace := range traces { - if i == firstIndex { - return &trace, nil - } - } - return nil, err -} - -// Block implements trace_block -func (api *TraceAPIImpl) Block(ctx context.Context, blockNr rpc.BlockNumber) (ParityTraces, error) { - tx, err := api.kv.BeginRo(ctx) - if err != nil { - return nil, err - } - defer tx.Rollback() - blockNum, err := getBlockNumber(blockNr, tx) - if err != nil { - return nil, err - } - if blockNum == 0 { - return []ParityTrace{}, nil - } - bn := hexutil.Uint64(blockNum) - - // Extract transactions from block - block, bErr := api.blockByNumberWithSenders(tx, blockNum) - if bErr != nil { - return nil, bErr - } - if block == nil { - return nil, fmt.Errorf("could not find block %d", uint64(bn)) - } - hash := block.Hash() - - parentNr := bn - if parentNr > 0 { - parentNr -= 1 - } - - chainConfig, err := api.chainConfig(tx) - if err != nil { - return nil, err - } - traces, err := api.callManyTransactions(ctx, tx, block.Transactions(), []string{TraceTypeTrace}, block.ParentHash(), rpc.BlockNumber(parentNr), block.Header(), -1 /* all tx indices */, types.MakeSigner(chainConfig, blockNum), chainConfig.Rules(blockNum)) - if err != nil { - return nil, err - } - - out := make([]ParityTrace, 0, len(traces)) - blockno := uint64(bn) - for txno, trace := range traces { - txhash := block.Transactions()[txno].Hash() - txpos := uint64(txno) - for _, pt := range trace.Trace { - pt.BlockHash = &hash - pt.BlockNumber = &blockno - pt.TransactionHash = &txhash - pt.TransactionPosition = &txpos - out = append(out, *pt) - } - } - minerReward, uncleRewards := ethash.AccumulateRewards(chainConfig, block.Header(), block.Uncles()) - var tr ParityTrace - var rewardAction = &RewardTraceAction{} - rewardAction.Author = block.Coinbase() - rewardAction.RewardType = "block" // nolint: goconst - rewardAction.Value.ToInt().Set(minerReward.ToBig()) - tr.Action = rewardAction - tr.BlockHash = &common.Hash{} - copy(tr.BlockHash[:], block.Hash().Bytes()) - tr.BlockNumber = new(uint64) - *tr.BlockNumber = block.NumberU64() - tr.Type = "reward" // nolint: goconst - tr.TraceAddress = []int{} - out = append(out, tr) - for i, uncle := range block.Uncles() { - if i < len(uncleRewards) { - var tr ParityTrace - rewardAction = &RewardTraceAction{} - rewardAction.Author = uncle.Coinbase - rewardAction.RewardType = "uncle" // nolint: goconst - rewardAction.Value.ToInt().Set(uncleRewards[i].ToBig()) - tr.Action = rewardAction - tr.BlockHash = &common.Hash{} - copy(tr.BlockHash[:], block.Hash().Bytes()) - tr.BlockNumber = new(uint64) - *tr.BlockNumber = block.NumberU64() - tr.Type = "reward" // nolint: goconst - tr.TraceAddress = []int{} - out = append(out, tr) - } - } - - return out, err -} - -// Filter implements trace_filter -// NOTE: We do not store full traces - we just store index for each address -// Pull blocks which have txs with matching address -func (api *TraceAPIImpl) Filter(ctx context.Context, req TraceFilterRequest, stream *jsoniter.Stream) error { - dbtx, err1 := api.kv.BeginRo(ctx) - if err1 != nil { - stream.WriteNil() - return fmt.Errorf("traceFilter cannot open tx: %w", err1) - } - defer dbtx.Rollback() - - var fromBlock uint64 - var toBlock uint64 - if req.FromBlock == nil { - fromBlock = 0 - } else { - fromBlock = uint64(*req.FromBlock) - } - - if req.ToBlock == nil { - headNumber := rawdb.ReadHeaderNumber(dbtx, rawdb.ReadHeadHeaderHash(dbtx)) - toBlock = *headNumber - } else { - toBlock = uint64(*req.ToBlock) - } - - var fromTxNum, toTxNum uint64 - if fromBlock > 0 { - fromTxNum = api._txNums[fromBlock-1] - } - toTxNum = api._txNums[toBlock] // toBlock is an inclusive bound - - if fromBlock > toBlock { - stream.WriteNil() - return fmt.Errorf("invalid parameters: fromBlock cannot be greater than toBlock") - } - - fromAddresses := make(map[common.Address]struct{}, len(req.FromAddress)) - toAddresses := make(map[common.Address]struct{}, len(req.ToAddress)) - - var ( - allTxs roaring64.Bitmap - txsTo roaring64.Bitmap - ) - ac := api._agg.MakeContext() - - for _, addr := range req.FromAddress { - if addr != nil { - it := ac.TraceFromIterator(addr.Bytes(), fromTxNum, toTxNum, nil) - for it.HasNext() { - allTxs.Add(it.Next()) - } - fromAddresses[*addr] = struct{}{} - } - } - - for _, addr := range req.ToAddress { - if addr != nil { - it := ac.TraceToIterator(addr.Bytes(), fromTxNum, toTxNum, nil) - for it.HasNext() { - txsTo.Add(it.Next()) - } - toAddresses[*addr] = struct{}{} - } - } - - switch req.Mode { - case TraceFilterModeIntersection: - allTxs.And(&txsTo) - case TraceFilterModeUnion: - fallthrough - default: - allTxs.Or(&txsTo) - } - - // Special case - if no addresses specified, take all traces - if len(req.FromAddress) == 0 && len(req.ToAddress) == 0 { - allTxs.AddRange(fromTxNum, toTxNum+1) - } else { - allTxs.RemoveRange(0, fromTxNum) - allTxs.RemoveRange(toTxNum, uint64(0x1000000000000)) - } - - chainConfig, err := api.chainConfig(dbtx) - if err != nil { - stream.WriteNil() - return err - } - - var json = jsoniter.ConfigCompatibleWithStandardLibrary - stream.WriteArrayStart() - first := true - // Execute all transactions in picked blocks - - count := uint64(^uint(0)) // this just makes it easier to use below - if req.Count != nil { - count = *req.Count - } - after := uint64(0) // this just makes it easier to use below - if req.After != nil { - after = *req.After - } - nSeen := uint64(0) - nExported := uint64(0) - includeAll := len(fromAddresses) == 0 && len(toAddresses) == 0 - it := allTxs.Iterator() - var lastBlockNum uint64 - var lastBlockHash common.Hash - var lastHeader *types.Header - var lastSigner *types.Signer - var lastRules *params.Rules - stateReader := state.NewHistoryReader22(ac, nil /* ReadIndices */) - noop := state.NewNoopWriter() - for it.HasNext() { - txNum := uint64(it.Next()) - // Find block number - blockNum := uint64(sort.Search(len(api._txNums), func(i int) bool { - return api._txNums[i] > txNum - })) - if blockNum > lastBlockNum { - if lastHeader, err = api._blockReader.HeaderByNumber(ctx, nil, blockNum); err != nil { - stream.WriteNil() - return err - } - lastBlockNum = blockNum - lastBlockHash = lastHeader.Hash() - lastSigner = types.MakeSigner(chainConfig, blockNum) - lastRules = chainConfig.Rules(blockNum) - } - if txNum+1 == api._txNums[blockNum] { - body, err := api._blockReader.Body(ctx, nil, lastBlockHash, blockNum) - if err != nil { - stream.WriteNil() - return err - } - // Block reward section, handle specially - minerReward, uncleRewards := ethash.AccumulateRewards(chainConfig, lastHeader, body.Uncles) - if _, ok := toAddresses[lastHeader.Coinbase]; ok || includeAll { - nSeen++ - var tr ParityTrace - var rewardAction = &RewardTraceAction{} - rewardAction.Author = lastHeader.Coinbase - rewardAction.RewardType = "block" // nolint: goconst - rewardAction.Value.ToInt().Set(minerReward.ToBig()) - tr.Action = rewardAction - tr.BlockHash = &common.Hash{} - copy(tr.BlockHash[:], lastBlockHash.Bytes()) - tr.BlockNumber = new(uint64) - *tr.BlockNumber = blockNum - tr.Type = "reward" // nolint: goconst - tr.TraceAddress = []int{} - b, err := json.Marshal(tr) - if err != nil { - stream.WriteNil() - return err - } - if nSeen > after && nExported < count { - if first { - first = false - } else { - stream.WriteMore() - } - stream.Write(b) - nExported++ - } - } - for i, uncle := range body.Uncles { - if _, ok := toAddresses[uncle.Coinbase]; ok || includeAll { - if i < len(uncleRewards) { - nSeen++ - var tr ParityTrace - rewardAction := &RewardTraceAction{} - rewardAction.Author = uncle.Coinbase - rewardAction.RewardType = "uncle" // nolint: goconst - rewardAction.Value.ToInt().Set(uncleRewards[i].ToBig()) - tr.Action = rewardAction - tr.BlockHash = &common.Hash{} - copy(tr.BlockHash[:], lastBlockHash[:]) - tr.BlockNumber = new(uint64) - *tr.BlockNumber = blockNum - tr.Type = "reward" // nolint: goconst - tr.TraceAddress = []int{} - b, err := json.Marshal(tr) - if err != nil { - stream.WriteNil() - return err - } - if nSeen > after && nExported < count { - if first { - first = false - } else { - stream.WriteMore() - } - stream.Write(b) - nExported++ - } - } - } - } - continue - } - var startTxNum uint64 - if blockNum > 0 { - startTxNum = api._txNums[blockNum-1] - } - txIndex := txNum - startTxNum - 1 - //fmt.Printf("txNum=%d, blockNum=%d, txIndex=%d\n", txNum, blockNum, txIndex) - txn, err := api._txnReader.TxnByIdxInBlock(ctx, nil, blockNum, int(txIndex)) - if err != nil { - stream.WriteNil() - return err - } - txHash := txn.Hash() - msg, err := txn.AsMessage(*lastSigner, lastHeader.BaseFee, lastRules) - if err != nil { - stream.WriteNil() - return err - } - contractHasTEVM := func(contractHash common.Hash) (bool, error) { return false, nil } - blockCtx, txCtx := transactions.GetEvmContext(msg, lastHeader, true /* requireCanonical */, dbtx, contractHasTEVM, api._blockReader) - stateReader.SetTxNum(txNum) - stateCache := shards.NewStateCache(32, 0 /* no limit */) // this cache living only during current RPC call, but required to store state writes - cachedReader := state.NewCachedReader(stateReader, stateCache) - cachedWriter := state.NewCachedWriter(noop, stateCache) - vmConfig := vm.Config{} - vmConfig.SkipAnalysis = core.SkipAnalysis(chainConfig, blockNum) - traceResult := &TraceCallResult{Trace: []*ParityTrace{}} - var ot OeTracer - ot.compat = api.compatibility - ot.r = traceResult - ot.idx = []string{fmt.Sprintf("%d-", txIndex)} - ot.traceAddr = []int{} - vmConfig.Debug = true - vmConfig.Tracer = &ot - ibs := state.New(cachedReader) - evm := vm.NewEVM(blockCtx, txCtx, ibs, chainConfig, vmConfig) - - gp := new(core.GasPool).AddGas(msg.Gas()) - ibs.Prepare(txHash, lastBlockHash, int(txIndex)) - var execResult *core.ExecutionResult - execResult, err = core.ApplyMessage(evm, msg, gp, true /* refunds */, false /* gasBailout */) - if err != nil { - stream.WriteNil() - return err - } - traceResult.Output = common.CopyBytes(execResult.ReturnData) - if err = ibs.FinalizeTx(evm.ChainRules(), noop); err != nil { - stream.WriteNil() - return err - } - if err = ibs.CommitBlock(evm.ChainRules(), cachedWriter); err != nil { - stream.WriteNil() - return err - } - for _, pt := range traceResult.Trace { - if includeAll || filter_trace(pt, fromAddresses, toAddresses) { - nSeen++ - pt.BlockHash = &lastBlockHash - pt.BlockNumber = &blockNum - pt.TransactionHash = &txHash - pt.TransactionPosition = &txIndex - b, err := json.Marshal(pt) - if err != nil { - stream.WriteNil() - return err - } - if nSeen > after && nExported < count { - if first { - first = false - } else { - stream.WriteMore() - } - stream.Write(b) - nExported++ - } - } - } - } - stream.WriteArrayEnd() - return stream.Flush() -} - -func filter_trace(pt *ParityTrace, fromAddresses map[common.Address]struct{}, toAddresses map[common.Address]struct{}) bool { - switch action := pt.Action.(type) { - case *CallTraceAction: - _, f := fromAddresses[action.From] - _, t := toAddresses[action.To] - if f || t { - return true - } - case *CreateTraceAction: - _, f := fromAddresses[action.From] - if f { - return true - } - - if res, ok := pt.Result.(*CreateTraceResult); ok { - if res.Address != nil { - if _, t := toAddresses[*res.Address]; t { - return true - } - } - } - case *SuicideTraceAction: - _, f := fromAddresses[action.Address] - _, t := toAddresses[action.RefundAddress] - if f || t { - return true - } - } - - return false -} - -func (api *TraceAPIImpl) callManyTransactions(ctx context.Context, dbtx kv.Tx, txs []types.Transaction, traceTypes []string, parentHash common.Hash, parentNo rpc.BlockNumber, header *types.Header, txIndex int, signer *types.Signer, rules *params.Rules) ([]*TraceCallResult, error) { - callParams := make([]TraceCallParam, 0, len(txs)) - msgs := make([]types.Message, len(txs)) - for i, tx := range txs { - hash := tx.Hash() - callParams = append(callParams, TraceCallParam{ - txHash: &hash, - traceTypes: traceTypes, - }) - var err error - if msgs[i], err = tx.AsMessage(*signer, header.BaseFee, rules); err != nil { - return nil, fmt.Errorf("convert tx into msg: %w", err) - } - } - - traces, cmErr := api.doCallMany(ctx, dbtx, msgs, callParams, &rpc.BlockNumberOrHash{ - BlockNumber: &parentNo, - BlockHash: &parentHash, - RequireCanonical: true, - }, header, false /* gasBailout */, txIndex) - - if cmErr != nil { - return nil, cmErr - } - - return traces, nil -} - -// TraceFilterRequest represents the arguments for trace_filter -type TraceFilterRequest struct { - FromBlock *hexutil.Uint64 `json:"fromBlock"` - ToBlock *hexutil.Uint64 `json:"toBlock"` - FromAddress []*common.Address `json:"fromAddress"` - ToAddress []*common.Address `json:"toAddress"` - Mode TraceFilterMode `json:"mode"` - After *uint64 `json:"after"` - Count *uint64 `json:"count"` -} - -type TraceFilterMode string - -const ( - // Default mode for TraceFilter. Unions results referred to addresses from FromAddress or ToAddress - TraceFilterModeUnion = "union" - // IntersectionMode retrives results referred to addresses provided both in FromAddress and ToAddress - TraceFilterModeIntersection = "intersection" -) diff --git a/cmd/rpcdaemon22/commands/trace_types.go b/cmd/rpcdaemon22/commands/trace_types.go deleted file mode 100644 index 2b98c70bdb0..00000000000 --- a/cmd/rpcdaemon22/commands/trace_types.go +++ /dev/null @@ -1,160 +0,0 @@ -package commands - -import ( - "fmt" - - "github.com/ledgerwatch/erigon/common" - "github.com/ledgerwatch/erigon/common/hexutil" - "github.com/ledgerwatch/erigon/core/types" -) - -// TODO:(tjayrush) -// Implementation Notes: -// -- Many of these fields are of string type. I chose to do this for ease of debugging / clarity of code (less -// conversions, etc.).Once we start optimizing this code, many of these fields will be made into their native -// types (Addresses, uint64, etc.) -// -- The ordering of the fields in the Parity types should not be changed. This allows us to compare output -// directly with existing Parity tests - -// GethTrace The trace as received from the existing Geth javascript tracer 'callTracer' -type GethTrace struct { - Type string `json:"type"` - Error string `json:"error"` - From string `json:"from"` - To string `json:"to"` - Value string `json:"value"` - Gas string `json:"gas"` - GasUsed string `json:"gasUsed"` - Input string `json:"input"` - Output string `json:"output"` - Time string `json:"time"` - Calls GethTraces `json:"calls"` -} - -// GethTraces an array of GethTraces -type GethTraces []*GethTrace - -// ParityTrace A trace in the desired format (Parity/OpenEtherum) See: https://openethereum.github.io/wiki/JSONRPC-trace-module -type ParityTrace struct { - // Do not change the ordering of these fields -- allows for easier comparison with other clients - Action interface{} `json:"action"` // Can be either CallTraceAction or CreateTraceAction - BlockHash *common.Hash `json:"blockHash,omitempty"` - BlockNumber *uint64 `json:"blockNumber,omitempty"` - Error string `json:"error,omitempty"` - Result interface{} `json:"result"` - Subtraces int `json:"subtraces"` - TraceAddress []int `json:"traceAddress"` - TransactionHash *common.Hash `json:"transactionHash,omitempty"` - TransactionPosition *uint64 `json:"transactionPosition,omitempty"` - Type string `json:"type"` -} - -// ParityTraces An array of parity traces -type ParityTraces []ParityTrace - -// TraceAction A parity formatted trace action -type TraceAction struct { - // Do not change the ordering of these fields -- allows for easier comparison with other clients - Author string `json:"author,omitempty"` - RewardType string `json:"rewardType,omitempty"` - SelfDestructed string `json:"address,omitempty"` - Balance string `json:"balance,omitempty"` - CallType string `json:"callType,omitempty"` - From common.Address `json:"from"` - Gas hexutil.Big `json:"gas"` - Init hexutil.Bytes `json:"init,omitempty"` - Input hexutil.Bytes `json:"input,omitempty"` - RefundAddress string `json:"refundAddress,omitempty"` - To string `json:"to,omitempty"` - Value string `json:"value,omitempty"` -} - -type CallTraceAction struct { - From common.Address `json:"from"` - CallType string `json:"callType"` - Gas hexutil.Big `json:"gas"` - Input hexutil.Bytes `json:"input"` - To common.Address `json:"to"` - Value hexutil.Big `json:"value"` -} - -type CreateTraceAction struct { - From common.Address `json:"from"` - Gas hexutil.Big `json:"gas"` - Init hexutil.Bytes `json:"init"` - Value hexutil.Big `json:"value"` -} - -type SuicideTraceAction struct { - Address common.Address `json:"address"` - RefundAddress common.Address `json:"refundAddress"` - Balance hexutil.Big `json:"balance"` -} - -type RewardTraceAction struct { - Author common.Address `json:"author"` - RewardType string `json:"rewardType"` - Value hexutil.Big `json:"value"` -} - -type CreateTraceResult struct { - // Do not change the ordering of these fields -- allows for easier comparison with other clients - Address *common.Address `json:"address,omitempty"` - Code hexutil.Bytes `json:"code"` - GasUsed *hexutil.Big `json:"gasUsed"` -} - -// TraceResult A parity formatted trace result -type TraceResult struct { - // Do not change the ordering of these fields -- allows for easier comparison with other clients - GasUsed *hexutil.Big `json:"gasUsed"` - Output hexutil.Bytes `json:"output"` -} - -// Allows for easy printing of a geth trace for debugging -func (p GethTrace) String() string { - var ret string - ret += fmt.Sprintf("Type: %s\n", p.Type) - ret += fmt.Sprintf("From: %s\n", p.From) - ret += fmt.Sprintf("To: %s\n", p.To) - ret += fmt.Sprintf("Value: %s\n", p.Value) - ret += fmt.Sprintf("Gas: %s\n", p.Gas) - ret += fmt.Sprintf("GasUsed: %s\n", p.GasUsed) - ret += fmt.Sprintf("Input: %s\n", p.Input) - ret += fmt.Sprintf("Output: %s\n", p.Output) - return ret -} - -// Allows for easy printing of a parity trace for debugging -func (t ParityTrace) String() string { - var ret string - //ret += fmt.Sprintf("Action.SelfDestructed: %s\n", t.Action.SelfDestructed) - //ret += fmt.Sprintf("Action.Balance: %s\n", t.Action.Balance) - //ret += fmt.Sprintf("Action.CallType: %s\n", t.Action.CallType) - //ret += fmt.Sprintf("Action.From: %s\n", t.Action.From) - //ret += fmt.Sprintf("Action.Gas: %d\n", t.Action.Gas.ToInt()) - //ret += fmt.Sprintf("Action.Init: %s\n", t.Action.Init) - //ret += fmt.Sprintf("Action.Input: %s\n", t.Action.Input) - //ret += fmt.Sprintf("Action.RefundAddress: %s\n", t.Action.RefundAddress) - //ret += fmt.Sprintf("Action.To: %s\n", t.Action.To) - //ret += fmt.Sprintf("Action.Value: %s\n", t.Action.Value) - ret += fmt.Sprintf("BlockHash: %v\n", t.BlockHash) - ret += fmt.Sprintf("BlockNumber: %d\n", t.BlockNumber) - //ret += fmt.Sprintf("Result.Address: %s\n", t.Result.Address) - //ret += fmt.Sprintf("Result.Code: %s\n", t.Result.Code) - //ret += fmt.Sprintf("Result.GasUsed: %s\n", t.Result.GasUsed) - //ret += fmt.Sprintf("Result.Output: %s\n", t.Result.Output) - ret += fmt.Sprintf("Subtraces: %d\n", t.Subtraces) - ret += fmt.Sprintf("TraceAddress: %v\n", t.TraceAddress) - ret += fmt.Sprintf("TransactionHash: %v\n", t.TransactionHash) - ret += fmt.Sprintf("TransactionPosition: %d\n", t.TransactionPosition) - ret += fmt.Sprintf("Type: %s\n", t.Type) - return ret -} - -// Takes a hierarchical Geth trace with fields of different meaning stored in the same named fields depending on 'type'. Parity traces -// are flattened depth first and each field is put in its proper place -func (api *TraceAPIImpl) convertToParityTrace(gethTrace GethTrace, blockHash common.Hash, blockNumber uint64, tx types.Transaction, txIndex uint64, depth []int) ParityTraces { //nolint: unused - var traces ParityTraces // nolint prealloc - return traces -} diff --git a/cmd/rpcdaemon22/commands/tracing.go b/cmd/rpcdaemon22/commands/tracing.go deleted file mode 100644 index 02b56214acd..00000000000 --- a/cmd/rpcdaemon22/commands/tracing.go +++ /dev/null @@ -1,245 +0,0 @@ -package commands - -import ( - "context" - "fmt" - - "github.com/holiman/uint256" - jsoniter "github.com/json-iterator/go" - "github.com/ledgerwatch/erigon/common" - "github.com/ledgerwatch/erigon/consensus/ethash" - "github.com/ledgerwatch/erigon/core/rawdb" - "github.com/ledgerwatch/erigon/core/state" - "github.com/ledgerwatch/erigon/core/types" - "github.com/ledgerwatch/erigon/core/vm" - "github.com/ledgerwatch/erigon/eth/tracers" - "github.com/ledgerwatch/erigon/ethdb" - "github.com/ledgerwatch/erigon/internal/ethapi" - "github.com/ledgerwatch/erigon/rpc" - "github.com/ledgerwatch/erigon/turbo/rpchelper" - "github.com/ledgerwatch/erigon/turbo/transactions" - "github.com/ledgerwatch/log/v3" -) - -// TraceBlockByNumber implements debug_traceBlockByNumber. Returns Geth style block traces. -func (api *PrivateDebugAPIImpl) TraceBlockByNumber(ctx context.Context, blockNum rpc.BlockNumber, config *tracers.TraceConfig, stream *jsoniter.Stream) error { - return api.traceBlock(ctx, rpc.BlockNumberOrHashWithNumber(blockNum), config, stream) -} - -// TraceBlockByHash implements debug_traceBlockByHash. Returns Geth style block traces. -func (api *PrivateDebugAPIImpl) TraceBlockByHash(ctx context.Context, hash common.Hash, config *tracers.TraceConfig, stream *jsoniter.Stream) error { - return api.traceBlock(ctx, rpc.BlockNumberOrHashWithHash(hash, true), config, stream) -} - -func (api *PrivateDebugAPIImpl) traceBlock(ctx context.Context, blockNrOrHash rpc.BlockNumberOrHash, config *tracers.TraceConfig, stream *jsoniter.Stream) error { - tx, err := api.db.BeginRo(ctx) - if err != nil { - stream.WriteNil() - return err - } - defer tx.Rollback() - var block *types.Block - if number, ok := blockNrOrHash.Number(); ok { - block, err = api.blockByRPCNumber(number, tx) - } else if hash, ok := blockNrOrHash.Hash(); ok { - block, err = api.blockByHashWithSenders(tx, hash) - } else { - return fmt.Errorf("invalid arguments; neither block nor hash specified") - } - - if err != nil { - stream.WriteNil() - return err - } - - chainConfig, err := api.chainConfig(tx) - if err != nil { - stream.WriteNil() - return err - } - - contractHasTEVM := func(contractHash common.Hash) (bool, error) { return false, nil } - if api.TevmEnabled { - contractHasTEVM = ethdb.GetHasTEVM(tx) - } - - getHeader := func(hash common.Hash, number uint64) *types.Header { - h, e := api._blockReader.Header(ctx, tx, hash, number) - if e != nil { - log.Error("getHeader error", "number", number, "hash", hash, "err", e) - } - return h - } - - _, blockCtx, _, ibs, reader, err := transactions.ComputeTxEnv(ctx, block, chainConfig, getHeader, contractHasTEVM, ethash.NewFaker(), tx, block.Hash(), 0) - if err != nil { - stream.WriteNil() - return err - } - - signer := types.MakeSigner(chainConfig, block.NumberU64()) - rules := chainConfig.Rules(block.NumberU64()) - stream.WriteArrayStart() - for idx, tx := range block.Transactions() { - select { - default: - case <-ctx.Done(): - stream.WriteNil() - return ctx.Err() - } - ibs.Prepare(tx.Hash(), block.Hash(), idx) - msg, _ := tx.AsMessage(*signer, block.BaseFee(), rules) - txCtx := vm.TxContext{ - TxHash: tx.Hash(), - Origin: msg.From(), - GasPrice: msg.GasPrice().ToBig(), - } - - transactions.TraceTx(ctx, msg, blockCtx, txCtx, ibs, config, chainConfig, stream) - _ = ibs.FinalizeTx(rules, reader) - if idx != len(block.Transactions())-1 { - stream.WriteMore() - } - stream.Flush() - } - stream.WriteArrayEnd() - stream.Flush() - return nil -} - -// TraceTransaction implements debug_traceTransaction. Returns Geth style transaction traces. -func (api *PrivateDebugAPIImpl) TraceTransaction(ctx context.Context, hash common.Hash, config *tracers.TraceConfig, stream *jsoniter.Stream) error { - tx, err := api.db.BeginRo(ctx) - if err != nil { - stream.WriteNil() - return err - } - defer tx.Rollback() - // Retrieve the transaction and assemble its EVM context - blockNum, ok, err := api.txnLookup(ctx, tx, hash) - if err != nil { - stream.WriteNil() - return err - } - if !ok { - stream.WriteNil() - return nil - } - block, err := api.blockByNumberWithSenders(tx, blockNum) - if err != nil { - stream.WriteNil() - return err - } - if block == nil { - stream.WriteNil() - return nil - } - blockHash := block.Hash() - var txnIndex uint64 - var txn types.Transaction - for i, transaction := range block.Transactions() { - if transaction.Hash() == hash { - txnIndex = uint64(i) - txn = transaction - break - } - } - if txn == nil { - var borTx types.Transaction - borTx, _, _, _, err = rawdb.ReadBorTransaction(tx, hash) - - if err != nil { - return err - } - - if borTx != nil { - return nil - } - stream.WriteNil() - return fmt.Errorf("transaction %#x not found", hash) - } - chainConfig, err := api.chainConfig(tx) - if err != nil { - stream.WriteNil() - return err - } - - getHeader := func(hash common.Hash, number uint64) *types.Header { - return rawdb.ReadHeader(tx, hash, number) - } - contractHasTEVM := func(contractHash common.Hash) (bool, error) { return false, nil } - if api.TevmEnabled { - contractHasTEVM = ethdb.GetHasTEVM(tx) - } - msg, blockCtx, txCtx, ibs, _, err := transactions.ComputeTxEnv(ctx, block, chainConfig, getHeader, contractHasTEVM, ethash.NewFaker(), tx, blockHash, txnIndex) - if err != nil { - stream.WriteNil() - return err - } - // Trace the transaction and return - return transactions.TraceTx(ctx, msg, blockCtx, txCtx, ibs, config, chainConfig, stream) -} - -func (api *PrivateDebugAPIImpl) TraceCall(ctx context.Context, args ethapi.CallArgs, blockNrOrHash rpc.BlockNumberOrHash, config *tracers.TraceConfig, stream *jsoniter.Stream) error { - dbtx, err := api.db.BeginRo(ctx) - if err != nil { - stream.WriteNil() - return err - } - defer dbtx.Rollback() - - chainConfig, err := api.chainConfig(dbtx) - if err != nil { - stream.WriteNil() - return err - } - - blockNumber, hash, latest, err := rpchelper.GetBlockNumber(blockNrOrHash, dbtx, api.filters) - if err != nil { - stream.WriteNil() - return err - } - var stateReader state.StateReader - if latest { - cacheView, err := api.stateCache.View(ctx, dbtx) - if err != nil { - return err - } - stateReader = state.NewCachedReader2(cacheView, dbtx) - } else { - stateReader = state.NewPlainState(dbtx, blockNumber) - } - header := rawdb.ReadHeader(dbtx, hash, blockNumber) - if header == nil { - stream.WriteNil() - return fmt.Errorf("block %d(%x) not found", blockNumber, hash) - } - ibs := state.New(stateReader) - - if config != nil && config.StateOverrides != nil { - if err := config.StateOverrides.Override(ibs); err != nil { - return err - } - } - - var baseFee *uint256.Int - if header != nil && header.BaseFee != nil { - var overflow bool - baseFee, overflow = uint256.FromBig(header.BaseFee) - if overflow { - return fmt.Errorf("header.BaseFee uint256 overflow") - } - } - msg, err := args.ToMessage(api.GasCap, baseFee) - if err != nil { - return err - } - - contractHasTEVM := func(contractHash common.Hash) (bool, error) { return false, nil } - if api.TevmEnabled { - contractHasTEVM = ethdb.GetHasTEVM(dbtx) - } - blockCtx, txCtx := transactions.GetEvmContext(msg, header, blockNrOrHash.RequireCanonical, dbtx, contractHasTEVM, api._blockReader) - // Trace the transaction and return - return transactions.TraceTx(ctx, msg, blockCtx, txCtx, ibs, config, chainConfig, stream) -} diff --git a/cmd/rpcdaemon22/commands/txpool_api.go b/cmd/rpcdaemon22/commands/txpool_api.go deleted file mode 100644 index eccb66bc4f5..00000000000 --- a/cmd/rpcdaemon22/commands/txpool_api.go +++ /dev/null @@ -1,170 +0,0 @@ -package commands - -import ( - "bytes" - "context" - "fmt" - - "github.com/ledgerwatch/erigon-lib/gointerfaces" - proto_txpool "github.com/ledgerwatch/erigon-lib/gointerfaces/txpool" - "github.com/ledgerwatch/erigon-lib/kv" - "github.com/ledgerwatch/erigon/common" - "github.com/ledgerwatch/erigon/common/hexutil" - "github.com/ledgerwatch/erigon/core/rawdb" - "github.com/ledgerwatch/erigon/core/types" - "github.com/ledgerwatch/erigon/rlp" -) - -// NetAPI the interface for the net_ RPC commands -type TxPoolAPI interface { - Content(ctx context.Context) (map[string]map[string]map[string]*RPCTransaction, error) -} - -// TxPoolAPIImpl data structure to store things needed for net_ commands -type TxPoolAPIImpl struct { - *BaseAPI - pool proto_txpool.TxpoolClient - db kv.RoDB -} - -// NewTxPoolAPI returns NetAPIImplImpl instance -func NewTxPoolAPI(base *BaseAPI, db kv.RoDB, pool proto_txpool.TxpoolClient) *TxPoolAPIImpl { - return &TxPoolAPIImpl{ - BaseAPI: base, - pool: pool, - db: db, - } -} - -func (api *TxPoolAPIImpl) Content(ctx context.Context) (map[string]map[string]map[string]*RPCTransaction, error) { - reply, err := api.pool.All(ctx, &proto_txpool.AllRequest{}) - if err != nil { - return nil, err - } - - content := map[string]map[string]map[string]*RPCTransaction{ - "pending": make(map[string]map[string]*RPCTransaction), - "baseFee": make(map[string]map[string]*RPCTransaction), - "queued": make(map[string]map[string]*RPCTransaction), - } - - pending := make(map[common.Address][]types.Transaction, 8) - baseFee := make(map[common.Address][]types.Transaction, 8) - queued := make(map[common.Address][]types.Transaction, 8) - for i := range reply.Txs { - stream := rlp.NewStream(bytes.NewReader(reply.Txs[i].RlpTx), 0) - txn, err := types.DecodeTransaction(stream) - if err != nil { - return nil, err - } - addr := gointerfaces.ConvertH160toAddress(reply.Txs[i].Sender) - switch reply.Txs[i].TxnType { - case proto_txpool.AllReply_PENDING: - if _, ok := pending[addr]; !ok { - pending[addr] = make([]types.Transaction, 0, 4) - } - pending[addr] = append(pending[addr], txn) - case proto_txpool.AllReply_BASE_FEE: - if _, ok := baseFee[addr]; !ok { - baseFee[addr] = make([]types.Transaction, 0, 4) - } - baseFee[addr] = append(baseFee[addr], txn) - case proto_txpool.AllReply_QUEUED: - if _, ok := queued[addr]; !ok { - queued[addr] = make([]types.Transaction, 0, 4) - } - queued[addr] = append(queued[addr], txn) - } - } - - tx, err := api.db.BeginRo(ctx) - if err != nil { - return nil, err - } - defer tx.Rollback() - cc, err := api.chainConfig(tx) - if err != nil { - return nil, err - } - - curHeader := rawdb.ReadCurrentHeader(tx) - if curHeader == nil { - return nil, nil - } - // Flatten the pending transactions - for account, txs := range pending { - dump := make(map[string]*RPCTransaction) - for _, txn := range txs { - dump[fmt.Sprintf("%d", txn.GetNonce())] = newRPCPendingTransaction(txn, curHeader, cc) - } - content["pending"][account.Hex()] = dump - } - // Flatten the baseFee transactions - for account, txs := range baseFee { - dump := make(map[string]*RPCTransaction) - for _, txn := range txs { - dump[fmt.Sprintf("%d", txn.GetNonce())] = newRPCPendingTransaction(txn, curHeader, cc) - } - content["baseFee"][account.Hex()] = dump - } - // Flatten the queued transactions - for account, txs := range queued { - dump := make(map[string]*RPCTransaction) - for _, txn := range txs { - dump[fmt.Sprintf("%d", txn.GetNonce())] = newRPCPendingTransaction(txn, curHeader, cc) - } - content["queued"][account.Hex()] = dump - } - return content, nil -} - -// Status returns the number of pending and queued transaction in the pool. -func (api *TxPoolAPIImpl) Status(ctx context.Context) (map[string]hexutil.Uint, error) { - reply, err := api.pool.Status(ctx, &proto_txpool.StatusRequest{}) - if err != nil { - return nil, err - } - return map[string]hexutil.Uint{ - "pending": hexutil.Uint(reply.PendingCount), - "baseFee": hexutil.Uint(reply.BaseFeeCount), - "queued": hexutil.Uint(reply.QueuedCount), - }, nil -} - -/* - -// Inspect retrieves the content of the transaction pool and flattens it into an -// easily inspectable list. -func (s *PublicTxPoolAPI) Inspect() map[string]map[string]map[string]string { - content := map[string]map[string]map[string]string{ - "pending": make(map[string]map[string]string), - "queued": make(map[string]map[string]string), - } - pending, queue := s.b.TxPoolContent() - - // Define a formatter to flatten a transaction into a string - var format = func(tx *types.Transaction) string { - if to := tx.To(); to != nil { - return fmt.Sprintf("%s: %v wei + %v gas × %v wei", tx.To().Hex(), tx.Value(), tx.Gas(), tx.GasPrice()) - } - return fmt.Sprintf("contract creation: %v wei + %v gas × %v wei", tx.Value(), tx.Gas(), tx.GasPrice()) - } - // Flatten the pending transactions - for account, txs := range pending { - dump := make(map[string]string) - for _, tx := range txs { - dump[fmt.Sprintf("%d", tx.Nonce())] = format(tx) - } - content["pending"][account.Hex()] = dump - } - // Flatten the queued transactions - for account, txs := range queue { - dump := make(map[string]string) - for _, tx := range txs { - dump[fmt.Sprintf("%d", tx.Nonce())] = format(tx) - } - content["queued"][account.Hex()] = dump - } - return content -} -*/ diff --git a/cmd/rpcdaemon22/commands/txpool_api_test.go b/cmd/rpcdaemon22/commands/txpool_api_test.go deleted file mode 100644 index 5cd85335692..00000000000 --- a/cmd/rpcdaemon22/commands/txpool_api_test.go +++ /dev/null @@ -1,64 +0,0 @@ -package commands - -import ( - "bytes" - "fmt" - "testing" - - "github.com/holiman/uint256" - "github.com/ledgerwatch/erigon-lib/gointerfaces/txpool" - txPoolProto "github.com/ledgerwatch/erigon-lib/gointerfaces/txpool" - "github.com/ledgerwatch/erigon-lib/kv/kvcache" - "github.com/ledgerwatch/erigon/cmd/rpcdaemon22/rpcdaemontest" - "github.com/ledgerwatch/erigon/common" - "github.com/ledgerwatch/erigon/common/hexutil" - "github.com/ledgerwatch/erigon/core" - "github.com/ledgerwatch/erigon/core/types" - "github.com/ledgerwatch/erigon/params" - "github.com/ledgerwatch/erigon/turbo/rpchelper" - "github.com/ledgerwatch/erigon/turbo/snapshotsync" - "github.com/ledgerwatch/erigon/turbo/stages" - "github.com/stretchr/testify/require" -) - -func TestTxPoolContent(t *testing.T) { - m, require := stages.MockWithTxPool(t), require.New(t) - chain, err := core.GenerateChain(m.ChainConfig, m.Genesis, m.Engine, m.DB, 1, func(i int, b *core.BlockGen) { - b.SetCoinbase(common.Address{1}) - }, false /* intermediateHashes */) - require.NoError(err) - err = m.InsertChain(chain) - require.NoError(err) - - ctx, conn := rpcdaemontest.CreateTestGrpcConn(t, m) - txPool := txpool.NewTxpoolClient(conn) - ff := rpchelper.New(ctx, nil, txPool, txpool.NewMiningClient(conn), func() {}) - api := NewTxPoolAPI(NewBaseApi(ff, kvcache.New(kvcache.DefaultCoherentConfig), snapshotsync.NewBlockReader(), nil, nil, false), m.DB, txPool) - - expectValue := uint64(1234) - txn, err := types.SignTx(types.NewTransaction(0, common.Address{1}, uint256.NewInt(expectValue), params.TxGas, uint256.NewInt(10*params.GWei), nil), *types.LatestSignerForChainID(m.ChainConfig.ChainID), m.Key) - require.NoError(err) - - buf := bytes.NewBuffer(nil) - err = txn.MarshalBinary(buf) - require.NoError(err) - - reply, err := txPool.Add(ctx, &txpool.AddRequest{RlpTxs: [][]byte{buf.Bytes()}}) - require.NoError(err) - for _, res := range reply.Imported { - require.Equal(res, txPoolProto.ImportResult_SUCCESS, fmt.Sprintf("%s", reply.Errors)) - } - - content, err := api.Content(ctx) - require.NoError(err) - - sender := m.Address.String() - require.Equal(1, len(content["pending"][sender])) - require.Equal(expectValue, content["pending"][sender]["0"].Value.ToInt().Uint64()) - - status, err := api.Status(ctx) - require.NoError(err) - require.Len(status, 3) - require.Equal(status["pending"], hexutil.Uint(1)) - require.Equal(status["queued"], hexutil.Uint(0)) -} diff --git a/cmd/rpcdaemon22/commands/validator_set.go b/cmd/rpcdaemon22/commands/validator_set.go deleted file mode 100644 index 5ebbe35217a..00000000000 --- a/cmd/rpcdaemon22/commands/validator_set.go +++ /dev/null @@ -1,702 +0,0 @@ -package commands - -// Tendermint leader selection algorithm - -import ( - "bytes" - "fmt" - "math" - "math/big" - "sort" - "strings" - - "github.com/ledgerwatch/erigon/common" - "github.com/ledgerwatch/erigon/consensus/bor" - "github.com/ledgerwatch/log/v3" -) - -// MaxTotalVotingPower - the maximum allowed total voting power. -// It needs to be sufficiently small to, in all cases: -// 1. prevent clipping in incrementProposerPriority() -// 2. let (diff+diffMax-1) not overflow in IncrementProposerPriority() -// (Proof of 1 is tricky, left to the reader). -// It could be higher, but this is sufficiently large for our purposes, -// and leaves room for defensive purposes. -// PriorityWindowSizeFactor - is a constant that when multiplied with the total voting power gives -// the maximum allowed distance between validator priorities. - -const ( - MaxTotalVotingPower = int64(math.MaxInt64) / 8 - PriorityWindowSizeFactor = 2 -) - -// ValidatorSet represent a set of *Validator at a given height. -// The validators can be fetched by address or index. -// The index is in order of .Address, so the indices are fixed -// for all rounds of a given blockchain height - ie. the validators -// are sorted by their address. -// On the other hand, the .ProposerPriority of each validator and -// the designated .GetProposer() of a set changes every round, -// upon calling .IncrementProposerPriority(). -// NOTE: Not goroutine-safe. -// NOTE: All get/set to validators should copy the value for safety. -type ValidatorSet struct { - // NOTE: persisted via reflect, must be exported. - Validators []*bor.Validator `json:"validators"` - Proposer *bor.Validator `json:"proposer"` - - // cached (unexported) - totalVotingPower int64 -} - -// NewValidatorSet initializes a ValidatorSet by copying over the -// values from `valz`, a list of Validators. If valz is nil or empty, -// the new ValidatorSet will have an empty list of Validators. -// The addresses of validators in `valz` must be unique otherwise the -// function panics. -func NewValidatorSet(valz []*bor.Validator) *ValidatorSet { - vals := &ValidatorSet{} - err := vals.updateWithChangeSet(valz, false) - if err != nil { - panic(fmt.Sprintf("cannot create validator set: %s", err)) - } - if len(valz) > 0 { - vals.IncrementProposerPriority(1) - } - return vals -} - -// Nil or empty validator sets are invalid. -func (vals *ValidatorSet) IsNilOrEmpty() bool { - return vals == nil || len(vals.Validators) == 0 -} - -// Increment ProposerPriority and update the proposer on a copy, and return it. -func (vals *ValidatorSet) CopyIncrementProposerPriority(times int) *ValidatorSet { - copy := vals.Copy() - copy.IncrementProposerPriority(times) - return copy -} - -// IncrementProposerPriority increments ProposerPriority of each validator and updates the -// proposer. Panics if validator set is empty. -// `times` must be positive. -func (vals *ValidatorSet) IncrementProposerPriority(times int) { - if vals.IsNilOrEmpty() { - panic("empty validator set") - } - if times <= 0 { - panic("Cannot call IncrementProposerPriority with non-positive times") - } - - // Cap the difference between priorities to be proportional to 2*totalPower by - // re-normalizing priorities, i.e., rescale all priorities by multiplying with: - // 2*totalVotingPower/(maxPriority - minPriority) - diffMax := PriorityWindowSizeFactor * vals.TotalVotingPower() - vals.RescalePriorities(diffMax) - vals.shiftByAvgProposerPriority() - - var proposer *bor.Validator - // Call IncrementProposerPriority(1) times times. - for i := 0; i < times; i++ { - proposer = vals.incrementProposerPriority() - } - - vals.Proposer = proposer -} - -func (vals *ValidatorSet) RescalePriorities(diffMax int64) { - if vals.IsNilOrEmpty() { - panic("empty validator set") - } - // NOTE: This check is merely a sanity check which could be - // removed if all tests would init. voting power appropriately; - // i.e. diffMax should always be > 0 - if diffMax <= 0 { - return - } - - // Calculating ceil(diff/diffMax): - // Re-normalization is performed by dividing by an integer for simplicity. - // NOTE: This may make debugging priority issues easier as well. - diff := computeMaxMinPriorityDiff(vals) - ratio := (diff + diffMax - 1) / diffMax - if diff > diffMax { - for _, val := range vals.Validators { - val.ProposerPriority = val.ProposerPriority / ratio - } - } -} - -func (vals *ValidatorSet) incrementProposerPriority() *bor.Validator { - for _, val := range vals.Validators { - // Check for overflow for sum. - newPrio := safeAddClip(val.ProposerPriority, val.VotingPower) - val.ProposerPriority = newPrio - } - // Decrement the validator with most ProposerPriority. - mostest := vals.getValWithMostPriority() - // Mind the underflow. - mostest.ProposerPriority = safeSubClip(mostest.ProposerPriority, vals.TotalVotingPower()) - - return mostest -} - -// Should not be called on an empty validator set. -func (vals *ValidatorSet) computeAvgProposerPriority() int64 { - n := int64(len(vals.Validators)) - sum := big.NewInt(0) - for _, val := range vals.Validators { - sum.Add(sum, big.NewInt(val.ProposerPriority)) - } - avg := sum.Div(sum, big.NewInt(n)) - if avg.IsInt64() { - return avg.Int64() - } - - // This should never happen: each val.ProposerPriority is in bounds of int64. - panic(fmt.Sprintf("Cannot represent avg ProposerPriority as an int64 %v", avg)) -} - -// Compute the difference between the max and min ProposerPriority of that set. -func computeMaxMinPriorityDiff(vals *ValidatorSet) int64 { - if vals.IsNilOrEmpty() { - panic("empty validator set") - } - max := int64(math.MinInt64) - min := int64(math.MaxInt64) - for _, v := range vals.Validators { - if v.ProposerPriority < min { - min = v.ProposerPriority - } - if v.ProposerPriority > max { - max = v.ProposerPriority - } - } - diff := max - min - if diff < 0 { - return -1 * diff - } else { - return diff - } -} - -func (vals *ValidatorSet) getValWithMostPriority() *bor.Validator { - var res *bor.Validator - for _, val := range vals.Validators { - res = res.Cmp(val) - } - return res -} - -func (vals *ValidatorSet) shiftByAvgProposerPriority() { - if vals.IsNilOrEmpty() { - panic("empty validator set") - } - avgProposerPriority := vals.computeAvgProposerPriority() - for _, val := range vals.Validators { - val.ProposerPriority = safeSubClip(val.ProposerPriority, avgProposerPriority) - } -} - -// Makes a copy of the validator list. -func validatorListCopy(valsList []*bor.Validator) []*bor.Validator { - if valsList == nil { - return nil - } - valsCopy := make([]*bor.Validator, len(valsList)) - for i, val := range valsList { - valsCopy[i] = val.Copy() - } - return valsCopy -} - -// Copy each validator into a new ValidatorSet. -func (vals *ValidatorSet) Copy() *ValidatorSet { - return &ValidatorSet{ - Validators: validatorListCopy(vals.Validators), - Proposer: vals.Proposer, - totalVotingPower: vals.totalVotingPower, - } -} - -// HasAddress returns true if address given is in the validator set, false - -// otherwise. -func (vals *ValidatorSet) HasAddress(address []byte) bool { - idx := sort.Search(len(vals.Validators), func(i int) bool { - return bytes.Compare(address, vals.Validators[i].Address.Bytes()) <= 0 - }) - return idx < len(vals.Validators) && bytes.Equal(vals.Validators[idx].Address.Bytes(), address) -} - -// GetByAddress returns an index of the validator with address and validator -// itself if found. Otherwise, -1 and nil are returned. -func (vals *ValidatorSet) GetByAddress(address common.Address) (index int, val *bor.Validator) { - idx := sort.Search(len(vals.Validators), func(i int) bool { - return bytes.Compare(address.Bytes(), vals.Validators[i].Address.Bytes()) <= 0 - }) - if idx < len(vals.Validators) && bytes.Equal(vals.Validators[idx].Address.Bytes(), address.Bytes()) { - return idx, vals.Validators[idx].Copy() - } - return -1, nil -} - -// GetByIndex returns the validator's address and validator itself by index. -// It returns nil values if index is less than 0 or greater or equal to -// len(ValidatorSet.Validators). -func (vals *ValidatorSet) GetByIndex(index int) (address []byte, val *bor.Validator) { - if index < 0 || index >= len(vals.Validators) { - return nil, nil - } - val = vals.Validators[index] - return val.Address.Bytes(), val.Copy() -} - -// Size returns the length of the validator set. -func (vals *ValidatorSet) Size() int { - return len(vals.Validators) -} - -// Force recalculation of the set's total voting power. -func (vals *ValidatorSet) updateTotalVotingPower() error { - - sum := int64(0) - for _, val := range vals.Validators { - // mind overflow - sum = safeAddClip(sum, val.VotingPower) - if sum > MaxTotalVotingPower { - return &bor.TotalVotingPowerExceededError{Sum: sum, Validators: vals.Validators} - } - } - vals.totalVotingPower = sum - return nil -} - -// TotalVotingPower returns the sum of the voting powers of all validators. -// It recomputes the total voting power if required. -func (vals *ValidatorSet) TotalVotingPower() int64 { - if vals.totalVotingPower == 0 { - log.Info("invoking updateTotalVotingPower before returning it") - if err := vals.updateTotalVotingPower(); err != nil { - // Can/should we do better? - panic(err) - } - } - return vals.totalVotingPower -} - -// GetProposer returns the current proposer. If the validator set is empty, nil -// is returned. -func (vals *ValidatorSet) GetProposer() (proposer *bor.Validator) { - if len(vals.Validators) == 0 { - return nil - } - if vals.Proposer == nil { - vals.Proposer = vals.findProposer() - } - return vals.Proposer.Copy() -} - -func (vals *ValidatorSet) findProposer() *bor.Validator { - var proposer *bor.Validator - for _, val := range vals.Validators { - if proposer == nil || !bytes.Equal(val.Address.Bytes(), proposer.Address.Bytes()) { - proposer = proposer.Cmp(val) - } - } - return proposer -} - -// Hash returns the Merkle root hash build using validators (as leaves) in the -// set. -// func (vals *ValidatorSet) Hash() []byte { -// if len(vals.Validators) == 0 { -// return nil -// } -// bzs := make([][]byte, len(vals.Validators)) -// for i, val := range vals.Validators { -// bzs[i] = val.Bytes() -// } -// return merkle.SimpleHashFromByteSlices(bzs) -// } - -// Iterate will run the given function over the set. -func (vals *ValidatorSet) Iterate(fn func(index int, val *bor.Validator) bool) { - for i, val := range vals.Validators { - stop := fn(i, val.Copy()) - if stop { - break - } - } -} - -// Checks changes against duplicates, splits the changes in updates and removals, sorts them by address. -// -// Returns: -// updates, removals - the sorted lists of updates and removals -// err - non-nil if duplicate entries or entries with negative voting power are seen -// -// No changes are made to 'origChanges'. -func processChanges(origChanges []*bor.Validator) (updates, removals []*bor.Validator, err error) { - // Make a deep copy of the changes and sort by address. - changes := validatorListCopy(origChanges) - sort.Sort(ValidatorsByAddress(changes)) - - removals = make([]*bor.Validator, 0, len(changes)) - updates = make([]*bor.Validator, 0, len(changes)) - var prevAddr common.Address - - // Scan changes by address and append valid validators to updates or removals lists. - for _, valUpdate := range changes { - if bytes.Equal(valUpdate.Address.Bytes(), prevAddr.Bytes()) { - err = fmt.Errorf("duplicate entry %v in %v", valUpdate, changes) - return nil, nil, err - } - if valUpdate.VotingPower < 0 { - err = fmt.Errorf("voting power can't be negative: %v", valUpdate) - return nil, nil, err - } - if valUpdate.VotingPower > MaxTotalVotingPower { - err = fmt.Errorf("to prevent clipping/ overflow, voting power can't be higher than %v: %v ", - MaxTotalVotingPower, valUpdate) - return nil, nil, err - } - if valUpdate.VotingPower == 0 { - removals = append(removals, valUpdate) - } else { - updates = append(updates, valUpdate) - } - prevAddr = valUpdate.Address - } - return updates, removals, err -} - -// Verifies a list of updates against a validator set, making sure the allowed -// total voting power would not be exceeded if these updates would be applied to the set. -// -// Returns: -// updatedTotalVotingPower - the new total voting power if these updates would be applied -// numNewValidators - number of new validators -// err - non-nil if the maximum allowed total voting power would be exceeded -// -// 'updates' should be a list of proper validator changes, i.e. they have been verified -// by processChanges for duplicates and invalid values. -// No changes are made to the validator set 'vals'. -func verifyUpdates(updates []*bor.Validator, vals *ValidatorSet) (updatedTotalVotingPower int64, numNewValidators int, err error) { - - updatedTotalVotingPower = vals.TotalVotingPower() - - for _, valUpdate := range updates { - address := valUpdate.Address - _, val := vals.GetByAddress(address) - if val == nil { - // New validator, add its voting power the the total. - updatedTotalVotingPower += valUpdate.VotingPower - numNewValidators++ - } else { - // Updated validator, add the difference in power to the total. - updatedTotalVotingPower += valUpdate.VotingPower - val.VotingPower - } - overflow := updatedTotalVotingPower > MaxTotalVotingPower - if overflow { - err = fmt.Errorf( - "failed to add/update validator %v, total voting power would exceed the max allowed %v", - valUpdate, MaxTotalVotingPower) - return 0, 0, err - } - } - - return updatedTotalVotingPower, numNewValidators, nil -} - -// Computes the proposer priority for the validators not present in the set based on 'updatedTotalVotingPower'. -// Leaves unchanged the priorities of validators that are changed. -// -// 'updates' parameter must be a list of unique validators to be added or updated. -// No changes are made to the validator set 'vals'. -func computeNewPriorities(updates []*bor.Validator, vals *ValidatorSet, updatedTotalVotingPower int64) { - - for _, valUpdate := range updates { - address := valUpdate.Address - _, val := vals.GetByAddress(address) - if val == nil { - // add val - // Set ProposerPriority to -C*totalVotingPower (with C ~= 1.125) to make sure validators can't - // un-bond and then re-bond to reset their (potentially previously negative) ProposerPriority to zero. - // - // Contract: updatedVotingPower < MaxTotalVotingPower to ensure ProposerPriority does - // not exceed the bounds of int64. - // - // Compute ProposerPriority = -1.125*totalVotingPower == -(updatedVotingPower + (updatedVotingPower >> 3)). - valUpdate.ProposerPriority = -(updatedTotalVotingPower + (updatedTotalVotingPower >> 3)) - } else { - valUpdate.ProposerPriority = val.ProposerPriority - } - } - -} - -// Merges the vals' validator list with the updates list. -// When two elements with same address are seen, the one from updates is selected. -// Expects updates to be a list of updates sorted by address with no duplicates or errors, -// must have been validated with verifyUpdates() and priorities computed with computeNewPriorities(). -func (vals *ValidatorSet) applyUpdates(updates []*bor.Validator) { - - existing := vals.Validators - merged := make([]*bor.Validator, len(existing)+len(updates)) - i := 0 - - for len(existing) > 0 && len(updates) > 0 { - if bytes.Compare(existing[0].Address.Bytes(), updates[0].Address.Bytes()) < 0 { // unchanged validator - merged[i] = existing[0] - existing = existing[1:] - } else { - // Apply add or update. - merged[i] = updates[0] - if bytes.Equal(existing[0].Address.Bytes(), updates[0].Address.Bytes()) { - // bor.Validator is present in both, advance existing. - existing = existing[1:] - } - updates = updates[1:] - } - i++ - } - - // Add the elements which are left. - for j := 0; j < len(existing); j++ { - merged[i] = existing[j] - i++ - } - // OR add updates which are left. - for j := 0; j < len(updates); j++ { - merged[i] = updates[j] - i++ - } - - vals.Validators = merged[:i] -} - -// Checks that the validators to be removed are part of the validator set. -// No changes are made to the validator set 'vals'. -func verifyRemovals(deletes []*bor.Validator, vals *ValidatorSet) error { - - for _, valUpdate := range deletes { - address := valUpdate.Address - _, val := vals.GetByAddress(address) - if val == nil { - return fmt.Errorf("failed to find validator %X to remove", address) - } - } - if len(deletes) > len(vals.Validators) { - panic("more deletes than validators") - } - return nil -} - -// Removes the validators specified in 'deletes' from validator set 'vals'. -// Should not fail as verification has been done before. -func (vals *ValidatorSet) applyRemovals(deletes []*bor.Validator) { - - existing := vals.Validators - - merged := make([]*bor.Validator, len(existing)-len(deletes)) - i := 0 - - // Loop over deletes until we removed all of them. - for len(deletes) > 0 { - if bytes.Equal(existing[0].Address.Bytes(), deletes[0].Address.Bytes()) { - deletes = deletes[1:] - } else { // Leave it in the resulting slice. - merged[i] = existing[0] - i++ - } - existing = existing[1:] - } - - // Add the elements which are left. - for j := 0; j < len(existing); j++ { - merged[i] = existing[j] - i++ - } - - vals.Validators = merged[:i] -} - -// Main function used by UpdateWithChangeSet() and NewValidatorSet(). -// If 'allowDeletes' is false then delete operations (identified by validators with voting power 0) -// are not allowed and will trigger an error if present in 'changes'. -// The 'allowDeletes' flag is set to false by NewValidatorSet() and to true by UpdateWithChangeSet(). -func (vals *ValidatorSet) updateWithChangeSet(changes []*bor.Validator, allowDeletes bool) error { - - if len(changes) < 1 { - return nil - } - - // Check for duplicates within changes, split in 'updates' and 'deletes' lists (sorted). - updates, deletes, err := processChanges(changes) - if err != nil { - return err - } - - if !allowDeletes && len(deletes) != 0 { - return fmt.Errorf("cannot process validators with voting power 0: %v", deletes) - } - - // Verify that applying the 'deletes' against 'vals' will not result in error. - if err := verifyRemovals(deletes, vals); err != nil { - return err - } - - // Verify that applying the 'updates' against 'vals' will not result in error. - updatedTotalVotingPower, numNewValidators, err := verifyUpdates(updates, vals) - if err != nil { - return err - } - - // Check that the resulting set will not be empty. - if numNewValidators == 0 && len(vals.Validators) == len(deletes) { - return fmt.Errorf("applying the validator changes would result in empty set") - } - - // Compute the priorities for updates. - computeNewPriorities(updates, vals, updatedTotalVotingPower) - - // Apply updates and removals. - vals.applyUpdates(updates) - vals.applyRemovals(deletes) - - if err := vals.updateTotalVotingPower(); err != nil { - return err - } - - // Scale and center. - vals.RescalePriorities(PriorityWindowSizeFactor * vals.TotalVotingPower()) - vals.shiftByAvgProposerPriority() - - return nil -} - -// UpdateWithChangeSet attempts to update the validator set with 'changes'. -// It performs the following steps: -// - validates the changes making sure there are no duplicates and splits them in updates and deletes -// - verifies that applying the changes will not result in errors -// - computes the total voting power BEFORE removals to ensure that in the next steps the priorities -// across old and newly added validators are fair -// - computes the priorities of new validators against the final set -// - applies the updates against the validator set -// - applies the removals against the validator set -// - performs scaling and centering of priority values -// If an error is detected during verification steps, it is returned and the validator set -// is not changed. -func (vals *ValidatorSet) UpdateWithChangeSet(changes []*bor.Validator) error { - return vals.updateWithChangeSet(changes, true) -} - -//----------------- -// ErrTooMuchChange - -func IsErrTooMuchChange(err error) bool { - switch err.(type) { - case errTooMuchChange: - return true - default: - return false - } -} - -type errTooMuchChange struct { - got int64 - needed int64 -} - -func (e errTooMuchChange) Error() string { - return fmt.Sprintf("Invalid commit -- insufficient old voting power: got %v, needed %v", e.got, e.needed) -} - -//---------------- - -func (vals *ValidatorSet) String() string { - return vals.StringIndented("") -} - -func (vals *ValidatorSet) StringIndented(indent string) string { - if vals == nil { - return "nil-ValidatorSet" - } - var valStrings []string - vals.Iterate(func(index int, val *bor.Validator) bool { - valStrings = append(valStrings, val.String()) - return false - }) - return fmt.Sprintf(`ValidatorSet{ -%s Proposer: %v -%s Validators: -%s %v -%s}`, - indent, vals.GetProposer().String(), - indent, - indent, strings.Join(valStrings, "\n"+indent+" "), - indent) - -} - -//------------------------------------- -// Implements sort for sorting validators by address. - -// Sort validators by address. -type ValidatorsByAddress []*bor.Validator - -func (valz ValidatorsByAddress) Len() int { - return len(valz) -} - -func (valz ValidatorsByAddress) Less(i, j int) bool { - return bytes.Compare(valz[i].Address.Bytes(), valz[j].Address.Bytes()) == -1 -} - -func (valz ValidatorsByAddress) Swap(i, j int) { - valz[i], valz[j] = valz[j], valz[i] -} - -/////////////////////////////////////////////////////////////////////////////// -// safe addition/subtraction - -func safeAdd(a, b int64) (int64, bool) { - if b > 0 && a > math.MaxInt64-b { - return -1, true - } else if b < 0 && a < math.MinInt64-b { - return -1, true - } - return a + b, false -} - -func safeSub(a, b int64) (int64, bool) { - if b > 0 && a < math.MinInt64+b { - return -1, true - } else if b < 0 && a > math.MaxInt64+b { - return -1, true - } - return a - b, false -} - -func safeAddClip(a, b int64) int64 { - c, overflow := safeAdd(a, b) - if overflow { - if b < 0 { - return math.MinInt64 - } - return math.MaxInt64 - } - return c -} - -func safeSubClip(a, b int64) int64 { - c, overflow := safeSub(a, b) - if overflow { - if b > 0 { - return math.MinInt64 - } - return math.MaxInt64 - } - return c -} diff --git a/cmd/rpcdaemon22/commands/web3_api.go b/cmd/rpcdaemon22/commands/web3_api.go deleted file mode 100644 index c35f62c632e..00000000000 --- a/cmd/rpcdaemon22/commands/web3_api.go +++ /dev/null @@ -1,38 +0,0 @@ -package commands - -import ( - "context" - - "github.com/ledgerwatch/erigon/common/hexutil" - "github.com/ledgerwatch/erigon/crypto" - "github.com/ledgerwatch/erigon/turbo/rpchelper" -) - -// Web3API provides interfaces for the web3_ RPC commands -type Web3API interface { - ClientVersion(_ context.Context) (string, error) - Sha3(_ context.Context, input hexutil.Bytes) hexutil.Bytes -} - -type Web3APIImpl struct { - *BaseAPI - ethBackend rpchelper.ApiBackend -} - -// NewWeb3APIImpl returns Web3APIImpl instance -func NewWeb3APIImpl(ethBackend rpchelper.ApiBackend) *Web3APIImpl { - return &Web3APIImpl{ - BaseAPI: &BaseAPI{}, - ethBackend: ethBackend, - } -} - -// ClientVersion implements web3_clientVersion. Returns the current client version. -func (api *Web3APIImpl) ClientVersion(ctx context.Context) (string, error) { - return api.ethBackend.ClientVersion(ctx) -} - -// Sha3 implements web3_sha3. Returns Keccak-256 (not the standardized SHA3-256) of the given data. -func (api *Web3APIImpl) Sha3(_ context.Context, input hexutil.Bytes) hexutil.Bytes { - return crypto.Keccak256(input) -} diff --git a/cmd/rpcdaemon22/health/check_block.go b/cmd/rpcdaemon22/health/check_block.go deleted file mode 100644 index 8978b6ffc4e..00000000000 --- a/cmd/rpcdaemon22/health/check_block.go +++ /dev/null @@ -1,23 +0,0 @@ -package health - -import ( - "context" - "fmt" - - "github.com/ledgerwatch/erigon/rpc" -) - -func checkBlockNumber(blockNumber rpc.BlockNumber, api EthAPI) error { - if api == nil { - return fmt.Errorf("no connection to the Erigon server or `eth` namespace isn't enabled") - } - data, err := api.GetBlockByNumber(context.TODO(), blockNumber, false) - if err != nil { - return err - } - if len(data) == 0 { // block not found - return fmt.Errorf("no known block with number %v (%x hex)", blockNumber, blockNumber) - } - - return nil -} diff --git a/cmd/rpcdaemon22/health/check_peers.go b/cmd/rpcdaemon22/health/check_peers.go deleted file mode 100644 index 818152b668b..00000000000 --- a/cmd/rpcdaemon22/health/check_peers.go +++ /dev/null @@ -1,23 +0,0 @@ -package health - -import ( - "context" - "fmt" -) - -func checkMinPeers(minPeerCount uint, api NetAPI) error { - if api == nil { - return fmt.Errorf("no connection to the Erigon server or `net` namespace isn't enabled") - } - - peerCount, err := api.PeerCount(context.TODO()) - if err != nil { - return err - } - - if uint64(peerCount) < uint64(minPeerCount) { - return fmt.Errorf("not enough peers: %d (minimum %d))", peerCount, minPeerCount) - } - - return nil -} diff --git a/cmd/rpcdaemon22/health/health.go b/cmd/rpcdaemon22/health/health.go deleted file mode 100644 index 311af85c5d9..00000000000 --- a/cmd/rpcdaemon22/health/health.go +++ /dev/null @@ -1,131 +0,0 @@ -package health - -import ( - "encoding/json" - "errors" - "fmt" - "io" - "net/http" - "strings" - - "github.com/ledgerwatch/erigon/rpc" - "github.com/ledgerwatch/log/v3" -) - -type requestBody struct { - MinPeerCount *uint `json:"min_peer_count"` - BlockNumber *rpc.BlockNumber `json:"known_block"` -} - -const ( - urlPath = "/health" -) - -var ( - errCheckDisabled = errors.New("error check disabled") -) - -func ProcessHealthcheckIfNeeded( - w http.ResponseWriter, - r *http.Request, - rpcAPI []rpc.API, -) bool { - if !strings.EqualFold(r.URL.Path, urlPath) { - return false - } - - netAPI, ethAPI := parseAPI(rpcAPI) - - var errMinPeerCount = errCheckDisabled - var errCheckBlock = errCheckDisabled - - body, errParse := parseHealthCheckBody(r.Body) - defer r.Body.Close() - - if errParse != nil { - log.Root().Warn("unable to process healthcheck request", "err", errParse) - } else { - // 1. net_peerCount - if body.MinPeerCount != nil { - errMinPeerCount = checkMinPeers(*body.MinPeerCount, netAPI) - } - // 2. custom query (shouldn't fail) - if body.BlockNumber != nil { - errCheckBlock = checkBlockNumber(*body.BlockNumber, ethAPI) - } - // TODO add time from the last sync cycle - } - - err := reportHealth(errParse, errMinPeerCount, errCheckBlock, w) - if err != nil { - log.Root().Warn("unable to process healthcheck request", "err", err) - } - - return true -} - -func parseHealthCheckBody(reader io.Reader) (requestBody, error) { - var body requestBody - - bodyBytes, err := io.ReadAll(reader) - if err != nil { - return body, err - } - - err = json.Unmarshal(bodyBytes, &body) - if err != nil { - return body, err - } - - return body, nil -} - -func reportHealth(errParse, errMinPeerCount, errCheckBlock error, w http.ResponseWriter) error { - statusCode := http.StatusOK - errors := make(map[string]string) - - if shouldChangeStatusCode(errParse) { - statusCode = http.StatusInternalServerError - } - errors["healthcheck_query"] = errorStringOrOK(errParse) - - if shouldChangeStatusCode(errMinPeerCount) { - statusCode = http.StatusInternalServerError - } - errors["min_peer_count"] = errorStringOrOK(errMinPeerCount) - - if shouldChangeStatusCode(errCheckBlock) { - statusCode = http.StatusInternalServerError - } - errors["check_block"] = errorStringOrOK(errCheckBlock) - - w.WriteHeader(statusCode) - - bodyJson, err := json.Marshal(errors) - if err != nil { - return err - } - - _, err = w.Write(bodyJson) - if err != nil { - return err - } - - return nil -} - -func shouldChangeStatusCode(err error) bool { - return err != nil && !errors.Is(err, errCheckDisabled) -} - -func errorStringOrOK(err error) string { - if err == nil { - return "HEALTHY" - } - - if errors.Is(err, errCheckDisabled) { - return "DISABLED" - } - - return fmt.Sprintf("ERROR: %v", err) -} diff --git a/cmd/rpcdaemon22/health/interfaces.go b/cmd/rpcdaemon22/health/interfaces.go deleted file mode 100644 index 4cf0fc6892b..00000000000 --- a/cmd/rpcdaemon22/health/interfaces.go +++ /dev/null @@ -1,16 +0,0 @@ -package health - -import ( - "context" - - "github.com/ledgerwatch/erigon/common/hexutil" - "github.com/ledgerwatch/erigon/rpc" -) - -type NetAPI interface { - PeerCount(_ context.Context) (hexutil.Uint, error) -} - -type EthAPI interface { - GetBlockByNumber(_ context.Context, number rpc.BlockNumber, fullTx bool) (map[string]interface{}, error) -} diff --git a/cmd/rpcdaemon22/health/parse_api.go b/cmd/rpcdaemon22/health/parse_api.go deleted file mode 100644 index 21e003e5a59..00000000000 --- a/cmd/rpcdaemon22/health/parse_api.go +++ /dev/null @@ -1,22 +0,0 @@ -package health - -import ( - "github.com/ledgerwatch/erigon/rpc" -) - -func parseAPI(api []rpc.API) (netAPI NetAPI, ethAPI EthAPI) { - for _, rpc := range api { - if rpc.Service == nil { - continue - } - - if netCandidate, ok := rpc.Service.(NetAPI); ok { - netAPI = netCandidate - } - - if ethCandidate, ok := rpc.Service.(EthAPI); ok { - ethAPI = ethCandidate - } - } - return netAPI, ethAPI -} diff --git a/cmd/rpcdaemon22/main.go b/cmd/rpcdaemon22/main.go deleted file mode 100644 index 9d8ad5463ce..00000000000 --- a/cmd/rpcdaemon22/main.go +++ /dev/null @@ -1,42 +0,0 @@ -package main - -import ( - "os" - - "github.com/ledgerwatch/erigon-lib/common" - "github.com/ledgerwatch/erigon/cmd/rpcdaemon22/cli" - "github.com/ledgerwatch/erigon/cmd/rpcdaemon22/commands" - "github.com/ledgerwatch/log/v3" - "github.com/spf13/cobra" -) - -func main() { - cmd, cfg := cli.RootCommand() - rootCtx, rootCancel := common.RootContext() - cmd.RunE = func(cmd *cobra.Command, args []string) error { - ctx := cmd.Context() - logger := log.New() - db, borDb, backend, txPool, mining, starknet, stateCache, blockReader, ff, agg, txNums, err := cli.RemoteServices(ctx, *cfg, logger, rootCancel) - if err != nil { - log.Error("Could not connect to DB", "err", err) - return nil - } - defer db.Close() - if borDb != nil { - defer borDb.Close() - } - - apiList := commands.APIList(db, borDb, backend, txPool, mining, starknet, ff, stateCache, blockReader, agg, txNums, *cfg) - if err := cli.StartRpcServer(ctx, *cfg, apiList); err != nil { - log.Error(err.Error()) - return nil - } - - return nil - } - - if err := cmd.ExecuteContext(rootCtx); err != nil { - log.Error(err.Error()) - os.Exit(1) - } -} diff --git a/cmd/rpcdaemon22/postman/README.md b/cmd/rpcdaemon22/postman/README.md deleted file mode 100644 index 0b9c2321838..00000000000 --- a/cmd/rpcdaemon22/postman/README.md +++ /dev/null @@ -1,18 +0,0 @@ -# Postman testing - -There are two files here: - -- RPC_Testing.json -- Trace_Testing.json - -You can import them into Postman using these -instructions: https://github.com/ledgerwatch/erigon/wiki/Using-Postman-to-Test-TurboGeth-RPC - -The first one is used to generate help text and other documentation as well as running a sanity check against a new -release. There is basically one test for each of the 81 RPC endpoints. - -The second file contains 31 test cases specifically for the nine trace routines (five tests for five of the routines, -three for another, one each for the other three). - -Another collection of related tests can be found -here: https://github.com/Great-Hill-Corporation/trueblocks-core/tree/develop/src/other/trace_tests diff --git a/cmd/rpcdaemon22/postman/RPC_Testing.json b/cmd/rpcdaemon22/postman/RPC_Testing.json deleted file mode 100644 index 0dce3725afc..00000000000 --- a/cmd/rpcdaemon22/postman/RPC_Testing.json +++ /dev/null @@ -1,4235 +0,0 @@ -{ - "info": { - "_postman_id": "72c52f91-c09d-4af6-abb4-162b9c5532b2", - "name": "RPC_Testing", - "description": "A collection holding all the Ethereum JSON RPC API calls", - "schema": "https://schema.getpostman.com/json/collection/v2.1.0/collection.json" - }, - "item": [ - { - "name": "web3", - "item": [ - { - "name": "clientVersion", - "event": [ - { - "listen": "test", - "script": { - "id": "6c4da7d1-aa83-40f8-bdad-b68cb42415a4", - "exec": [ - "pm.test('Has correct result', function() {", - " const jsonData = pm.response.json();", - " var messages = {", - " '{{NETHERMIND}}': 'Nethermind',", - " '{{ERIGON}}': 'Erigon',", - " '{{SILKRPC}}': 'Erigon',", - " '{{PARITY}}': 'Parity-Ethereum',", - " }", - " var parts = jsonData.result.split('/');", - " pm.expect(parts[0]).to.deep.equals(messages[pm.environment.get('HOST')]);", - "})", - "" - ], - "type": "text/javascript" - } - } - ], - "request": { - "method": "POST", - "header": [ - { - "key": "Content-Type", - "value": "application/json" - } - ], - "body": { - "mode": "raw", - "raw": "{\n \"jsonrpc\": \"2.0\",\n \"method\": \"web3_clientVersion\",\n \"params\": [],\n \"id\": \"1\"\n}", - "options": { - "raw": {} - } - }, - "url": { - "raw": "{{HOST}}", - "host": ["{{HOST}}"] - }, - "description": "Returns the current client version.\r\n\r\n**Parameters**\r\n\r\nNone\r\n\r\n**Returns**\r\n\r\nSTRING - The current client version string including node name and version" - }, - "response": [] - }, - { - "name": "sha3", - "event": [ - { - "listen": "test", - "script": { - "id": "d8ebbf3d-8ae7-460a-9808-4b4b8a08d289", - "exec": [ - "var expected = {", - " \"jsonrpc\": \"2.0\",", - " \"id\": \"1\",", - " \"result\": \"0x47173285a8d7341e5e972fc677286384f802f8ef42a5ec5f03bbfa254cb01fad\"", - "}", - "", - "pm.test('Has correct result', function() {", - " pm.expect(pm.response.json()).to.be.deep.equal(expected);", - "})", - "" - ], - "type": "text/javascript" - } - }, - { - "listen": "prerequest", - "script": { - "id": "78f0ca53-f4fe-4396-a87a-e1c81899822a", - "exec": [""], - "type": "text/javascript" - } - } - ], - "request": { - "method": "POST", - "header": [ - { - "key": "Content-Type", - "value": "application/json" - } - ], - "body": { - "mode": "raw", - "raw": "{\n\t\"jsonrpc\":\"2.0\",\n\t\"method\":\"web3_sha3\",\n\t\"params\":[\"0x68656c6c6f20776f726c64\"],\n\t\"id\":\"1\"\n}", - "options": { - "raw": {} - } - }, - "url": { - "raw": "{{HOST}}", - "host": ["{{HOST}}"] - }, - "description": "Returns Keccak-256 (not the standardized SHA3-256) of the given data.\r\n\r\n**Parameters**\r\n\r\nDATA - The data to convert into a SHA3 hash\r\n\r\n**Returns**\r\n\r\nDATA - The SHA3 result of the given input string" - }, - "response": [] - } - ], - "protocolProfileBehavior": {} - }, - { - "name": "net", - "item": [ - { - "name": "listening", - "event": [ - { - "listen": "test", - "script": { - "id": "322f2289-938f-4cfb-adde-3d4f0c54455e", - "exec": [ - "expected = {", - " \"jsonrpc\": \"2.0\",", - " \"id\": \"1\",", - " \"result\": true", - "}", - "", - "pm.test('Returns true (hardcoded)', function() {", - " pm.expect(pm.response.json()).to.be.deep.equal(expected)", - "});" - ], - "type": "text/javascript" - } - } - ], - "request": { - "method": "POST", - "header": [ - { - "key": "Content-Type", - "value": "application/json" - } - ], - "body": { - "mode": "raw", - "raw": "{\n\t\"jsonrpc\":\"2.0\",\n\t\"method\":\"net_listening\",\n\t\"params\":[],\n\t\"id\":\"1\"\n}", - "options": { - "raw": {} - } - }, - "url": { - "raw": "{{HOST}}", - "host": ["{{HOST}}"] - }, - "description": "Returns true if client is actively listening for network connections.\r\n\r\n**TODO**\r\n\r\nThe code currently returns a hard coded true value. Remove hard coded value.\r\n\r\n**Parameters**\r\n\r\nNone\r\n\r\n**Returns**\r\n\r\nBoolean - true when listening, false otherwise" - }, - "response": [] - }, - { - "name": "version", - "event": [ - { - "listen": "test", - "script": { - "id": "a7d33b17-7d1d-49db-b30a-82d8c695c1d4", - "exec": [ - "expected = {", - " \"jsonrpc\": \"2.0\",", - " \"id\": \"1\",", - " \"result\": \"1\"", - "}", - "", - "pm.test('Returns true (hardcoded)', function() {", - " pm.expect(pm.response.json()).to.be.deep.equal(expected)", - "});" - ], - "type": "text/javascript" - } - } - ], - "request": { - "method": "POST", - "header": [ - { - "key": "Content-Type", - "value": "application/json" - } - ], - "body": { - "mode": "raw", - "raw": "{\n\t\"jsonrpc\":\"2.0\",\n\t\"method\":\"net_version\",\n\t\"params\":[],\n\t\"id\":\"1\"\n}", - "options": { - "raw": {} - } - }, - "url": { - "raw": "{{HOST}}", - "host": ["{{HOST}}"] - }, - "description": "Returns the current network id.\r\n\r\n**Parameters**\r\n\r\nNone\r\n\r\n**Returns**\r\n\r\nSTRING - The current network id. One of BR \"1\": Ethereum Mainnet BR \"2\": Morden Testnet (deprecated) BR \"3\": Ropsten Testnet BR \"4\": Rinkeby Testnet BR \"42\": Kovan Testnet BR" - }, - "response": [] - }, - { - "name": "peerCount", - "event": [ - { - "listen": "test", - "script": { - "id": "985b79fb-0c36-421d-8dcf-cc1d619a11e3", - "exec": [ - "expected = {", - " \"jsonrpc\": \"2.0\",", - " \"id\": \"1\",", - " \"result\": \"0x19\"", - "}", - "", - "pm.test('Returns true (hardcoded)', function() {", - " expected.result = pm.response.json().result;", - " pm.expect(pm.response.json()).to.be.deep.equal(expected)", - "});" - ], - "type": "text/javascript" - } - } - ], - "request": { - "method": "POST", - "header": [ - { - "key": "Content-Type", - "value": "application/json" - } - ], - "body": { - "mode": "raw", - "raw": "{\n\t\"jsonrpc\":\"2.0\",\n\t\"method\":\"net_peerCount\",\n\t\"params\":[],\n\t\"id\":\"1\"\n}", - "options": { - "raw": {} - } - }, - "url": { - "raw": "{{HOST}}", - "host": ["{{HOST}}"] - }, - "description": "Returns number of peers currently connected to the client.\r\n\r\n**TODO**\r\n\r\nThis routine currently returns a hard coded value of '25'\r\n\r\n**Parameters**\r\n\r\nNone\r\n\r\n**Returns**\r\n\r\nQUANTITY - Integer of the number of connected peers" - }, - "response": [] - } - ], - "protocolProfileBehavior": {} - }, - { - "name": "eth", - "item": [ - { - "name": "blocks", - "item": [ - { - "name": "getBlockByNumber", - "event": [ - { - "listen": "test", - "script": { - "id": "438e5e99-267a-4a47-92f3-d7a9e675f183", - "exec": [ - "var expected = {", - " \"jsonrpc\": \"2.0\",", - " \"id\": \"1\",", - " \"result\": {", - " \"difficulty\": \"0xb5708d578a6\",", - " \"extraData\": \"0xd783010400844765746887676f312e352e31856c696e7578\",", - " \"gasLimit\": \"0x2fefd8\",", - " \"gasUsed\": \"0x14820\",", - " \"hash\": \"0x0b4c6fb75ded4b90218cf0346b0885e442878f104e1b60bf75d5b6860eeacd53\",", - " \"logsBloom\": \"0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000\",", - " \"miner\": \"0x0c729be7c39543c3d549282a40395299d987cec2\",", - " \"mixHash\": \"0x1530cda332d86d5d7462e3a0eb585e22c88348dd796d29e6ef18196a78cdce07\",", - " \"nonce\": \"0x938e5630b060b7d3\",", - " \"number\": \"0xf4629\",", - " \"parentHash\": \"0x96810a6076e621e311a232468bfd3dcfac08f4803b255af0f00300f47981c10f\",", - " \"receiptsRoot\": \"0x075608bec75d988c52ea6750f4c2204fd60082eb1df32cf8f4732e8a591eef62\",", - " \"sha3Uncles\": \"0x1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347\",", - " \"size\": \"0x3e1\",", - " \"stateRoot\": \"0xb3f9408d80048b6f206951c4e387f8da37fb8510eccc18527865fa746c47bbc5\",", - " \"timestamp\": \"0x56bff9bb\",", - " \"totalDifficulty\": \"0x6332227c16fd7c67\",", - " \"transactions\": [", - " \"0x730724cb08a6eb17bf6b3296359d261570d343ea7944a17a9d7287d77900db08\",", - " \"0xef2ea39c20ba09553b2f3cf02380406ac766039ca56612937eed5e7f3503fb3a\",", - " \"0x5352c80aa2073e21ce6c4aa5488c38455f3519955ece7dca5af3e326797bcc63\",", - " \"0x060e4cf9fa8d34a8b423b5b3691b2541255ff7974ff16699e104edcfb63bd521\"", - " ],", - " \"transactionsRoot\": \"0xb779480508401ddd57f1f1e83a54715dcafc6ccec4e4d842c1b68cb418e6560d\",", - " \"uncles\": []", - " }", - "}", - "", - "pm.test('Has correct result', function() {", - " var isErigon = pm.environment.get('HOST') == \"{{ERIGON}}\";", - " var jsonData = pm.response.json();", - " if (!isErigon) {", - " delete jsonData.result.author;", - " delete jsonData.result.sealFields;", - " }", - " pm.expect(jsonData).to.be.deep.equal(expected);", - "})", - "" - ], - "type": "text/javascript" - } - } - ], - "request": { - "method": "POST", - "header": [ - { - "key": "Content-Type", - "value": "application/json" - } - ], - "body": { - "mode": "raw", - "raw": "{\n\t\"jsonrpc\":\"2.0\",\n\t\"method\":\"eth_getBlockByNumber\",\n\t\"params\":[\n\t\t\"0xf4629\", \n\t\tfalse\n\t],\n\t\"id\":\"1\"\n}", - "options": { - "raw": {} - } - }, - "url": { - "raw": "{{HOST}}", - "host": ["{{HOST}}"] - }, - "description": "Returns information about a block given the block's number.\r\n\r\n**Parameters**\r\n\r\nTAG - Integer block number or one of \"earliest\", \"latest\" or \"pending\"\r\n\r\nBoolean - If true it returns the full transaction objects, if false only the hashes of the transactions\r\n\r\n**Returns**\r\n\r\nObject - An object of type Block defined as:\r\n\r\nnumber: QUANTITY - The block number or null when pending\r\n\r\nhash: DATA, 32 Bytes - Hash of the block or null when pending\r\n\r\nparentHash: DATA, 32 Bytes - Hash of the parent block\r\n\r\nnonce: DATA, 8 bytes - Hash of the proof of work or null when pending\r\n\r\nsha3Uncles: DATA, 32 Bytes - SHA3 of the uncles data in the block\r\n\r\nlogsBloom: DATA, 256 Bytes - The bloom filter for the block's logs or null when pending\r\n\r\ntransactionsRoot: DATA, 32 Bytes - The root of the transaction trie of the block\r\n\r\nstateRoot: DATA, 32 Bytes - The root of the final state trie of the block\r\n\r\nreceiptsRoot: DATA, 32 Bytes - The root of the receipts trie of the block\r\n\r\nminer: DATA, 20 Bytes - The address of the beneficiary to whom the mining rewards were given\r\n\r\ndifficulty: QUANTITY - Integer of the difficulty for this block\r\n\r\ntotalDifficulty: QUANTITY - Integer of the total difficulty of the chain until this block\r\n\r\nextraData: DATA - The extra data field of this block\r\n\r\nsize: QUANTITY - Integer the size of this block in bytes\r\n\r\ngasLimit: QUANTITY - The maximum gas allowed in this block\r\n\r\ngasUsed: QUANTITY - The total used gas by all transactions in this block\r\n\r\ntimestamp: QUANTITY - The unix timestamp for when the block was collated\r\n\r\ntransactions: ARRAY - Array of transaction objects, or 32 Bytes transaction hashes depending on the last given parameter\r\n\r\nuncles: ARRAY - Array of uncle hashes\r\n\r\n" - }, - "response": [] - }, - { - "name": "getBlockByHash", - "event": [ - { - "listen": "test", - "script": { - "id": "7c760190-bb77-4b63-bba7-93c01a72bd2a", - "exec": [ - "var expected = {", - " \"jsonrpc\": \"2.0\",", - " \"id\": \"1\",", - " \"result\": {", - " \"difficulty\": \"0xb5708d578a6\",", - " \"extraData\": \"0xd783010400844765746887676f312e352e31856c696e7578\",", - " \"gasLimit\": \"0x2fefd8\",", - " \"gasUsed\": \"0x14820\",", - " \"hash\": \"0x0b4c6fb75ded4b90218cf0346b0885e442878f104e1b60bf75d5b6860eeacd53\",", - " \"logsBloom\": \"0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000\",", - " \"miner\": \"0x0c729be7c39543c3d549282a40395299d987cec2\",", - " \"mixHash\": \"0x1530cda332d86d5d7462e3a0eb585e22c88348dd796d29e6ef18196a78cdce07\",", - " \"nonce\": \"0x938e5630b060b7d3\",", - " \"number\": \"0xf4629\",", - " \"parentHash\": \"0x96810a6076e621e311a232468bfd3dcfac08f4803b255af0f00300f47981c10f\",", - " \"receiptsRoot\": \"0x075608bec75d988c52ea6750f4c2204fd60082eb1df32cf8f4732e8a591eef62\",", - " \"sha3Uncles\": \"0x1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347\",", - " \"size\": \"0x3e1\",", - " \"stateRoot\": \"0xb3f9408d80048b6f206951c4e387f8da37fb8510eccc18527865fa746c47bbc5\",", - " \"timestamp\": \"0x56bff9bb\",", - " \"totalDifficulty\": \"0x6332227c16fd7c67\",", - " \"transactions\": [", - " \"0x730724cb08a6eb17bf6b3296359d261570d343ea7944a17a9d7287d77900db08\",", - " \"0xef2ea39c20ba09553b2f3cf02380406ac766039ca56612937eed5e7f3503fb3a\",", - " \"0x5352c80aa2073e21ce6c4aa5488c38455f3519955ece7dca5af3e326797bcc63\",", - " \"0x060e4cf9fa8d34a8b423b5b3691b2541255ff7974ff16699e104edcfb63bd521\"", - " ],", - " \"transactionsRoot\": \"0xb779480508401ddd57f1f1e83a54715dcafc6ccec4e4d842c1b68cb418e6560d\",", - " \"uncles\": []", - " }", - "}", - "", - "pm.test('Has correct result', function() {", - " var isErigon = pm.environment.get('HOST') == \"{{ERIGON}}\";", - " var jsonData = pm.response.json();", - " if (!isErigon) {", - " delete jsonData.result.author;", - " delete jsonData.result.sealFields;", - " }", - " pm.expect(jsonData).to.be.deep.equal(expected);", - "})", - "" - ], - "type": "text/javascript" - } - } - ], - "request": { - "method": "POST", - "header": [ - { - "key": "Content-Type", - "value": "application/json" - } - ], - "body": { - "mode": "raw", - "raw": "{\n\t\"jsonrpc\":\"2.0\",\n\t\"method\":\"eth_getBlockByHash\",\n\t\"params\":[\n\t\t\"0x0b4c6fb75ded4b90218cf0346b0885e442878f104e1b60bf75d5b6860eeacd53\", \n\t\tfalse\n\t],\n\t\"id\":\"1\"\n}", - "options": { - "raw": {} - } - }, - "url": { - "raw": "{{HOST}}", - "host": ["{{HOST}}"] - }, - "description": "Returns information about a block given the block's hash.\r\n\r\n**Parameters**\r\n\r\nDATA, 32 Bytes - Hash of a block\r\n\r\nBoolean - If true it returns the full transaction objects, if false only the hashes of the transactions\r\n\r\n**Returns**\r\n\r\nObject - An object of type Block as described at eth_getBlockByNumber, or null when no block was found" - }, - "response": [] - }, - { - "name": "getBlockTransactionCountByNumber", - "event": [ - { - "listen": "test", - "script": { - "id": "341676cb-5915-48b7-a2b2-146feb7b80a6", - "exec": [ - "pm.test('Has correct result', function() {", - " const jsonData = pm.response.json();", - " pm.expect(jsonData.result).to.be.equals(\"0x4\");", - "});", - "" - ], - "type": "text/javascript" - } - } - ], - "request": { - "method": "POST", - "header": [ - { - "key": "Content-Type", - "value": "application/json" - } - ], - "body": { - "mode": "raw", - "raw": "{\n\t\"jsonrpc\":\"2.0\",\n\t\"method\":\"eth_getBlockTransactionCountByNumber\",\n\t\"params\":[\n\t\t\"0xf4629\"\n\t],\n\t\"id\":\"1\"\n}", - "options": { - "raw": {} - } - }, - "url": { - "raw": "{{HOST}}", - "host": ["{{HOST}}"] - }, - "description": "Returns the number of transactions in a block given the block's block number.\r\n\r\n**Parameters**\r\n\r\nTAG - Integer block number or one of \"earliest\", \"latest\" or \"pending\"\r\n\r\n**Returns**\r\n\r\nQUANTITY - Integer of the number of transactions in this block" - }, - "response": [] - }, - { - "name": "getBlockTransactionCountByHash", - "event": [ - { - "listen": "test", - "script": { - "id": "439ec2db-b271-4b15-8fc5-09e86aeed870", - "exec": [ - "pm.test('Has correct result', function() {", - " const jsonData = pm.response.json();", - " pm.expect(jsonData.result).to.be.equals('0x4');", - "});", - "" - ], - "type": "text/javascript" - } - } - ], - "request": { - "method": "POST", - "header": [ - { - "key": "Content-Type", - "value": "application/json" - } - ], - "body": { - "mode": "raw", - "raw": "{\n\t\"jsonrpc\":\"2.0\",\n\t\"method\":\"eth_getBlockTransactionCountByHash\",\n\t\"params\":[\n\t\t\"0x0b4c6fb75ded4b90218cf0346b0885e442878f104e1b60bf75d5b6860eeacd53\"\n\t],\n\t\"id\":\"1\"\n}", - "options": { - "raw": {} - } - }, - "url": { - "raw": "{{HOST}}", - "host": ["{{HOST}}"] - }, - "description": "Returns the number of transactions in a block given the block's block hash.\r\n\r\n**Parameters**\r\n\r\nDATA, 32 Bytes - hash of a block\r\n\r\n**Returns**\r\n\r\nQUANTITY - Integer of the number of transactions in this block" - }, - "response": [] - } - ], - "protocolProfileBehavior": {}, - "_postman_isSubFolder": true - }, - { - "name": "txs", - "item": [ - { - "name": "getTransactionByHash", - "event": [ - { - "listen": "test", - "script": { - "id": "68b084bd-9b84-4018-bc45-e947bcf07f95", - "exec": [ - "var expected = {", - " \"jsonrpc\": \"2.0\",", - " \"id\": \"1\",", - " \"result\": {", - " \"blockHash\": \"0x785b221ec95c66579d5ae14eebe16284a769e948359615d580f02e646e93f1d5\",", - " \"blockNumber\": \"0x52a90b\",", - " \"from\": \"0x11b6a5fe2906f3354145613db0d99ceb51f604c9\",", - " \"gas\": \"0x6b6c\",", - " \"gasPrice\": \"0x11e1a300\",", - " \"hash\": \"0xb2fea9c4b24775af6990237aa90228e5e092c56bdaee74496992a53c208da1ee\",", - " \"input\": \"0x80dfa34a0000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000002e516d556558334448416654747442464a42315454384a617a67765744776a727a7342686973693473547532613551000000000000000000000000000000000000\",", - " \"nonce\": \"0x10\",", - " \"r\": \"0xacdf839bdcb6653da60900f739076a00ecbe0059fa046933348e9b68a62a222\",", - " \"s\": \"0x132a0517a4c52916e0c6b0e74b0479326891df2a9afd711482c7f3919b335ff6\",", - " \"to\": \"0xfa28ec7198028438514b49a3cf353bca5541ce1d\",", - " \"transactionIndex\": \"0x25\",", - " \"v\": \"0x26\",", - " \"value\": \"0x0\"", - " }", - "}", - "", - "pm.test('Has correct result', function() {", - " const jsonData = pm.response.json();", - " var keys = Object.keys(jsonData.result);", - " keys.map(function (k) {", - " var value = jsonData.result[k] ? jsonData.result[k] : null;", - " var expect = expected.result[k] ? expected.result[k] : null;", - " if (expect && typeof expect === 'object') {", - " jsonData.result[k].map(function (value, index) {", - " var expect = expected.result[k][index];", - " pm.expect(value).to.be.equal(expect)", - " })", - " } else {", - " pm.expect(value).to.be.equal(expect)", - " }", - " });", - "})", - "" - ], - "type": "text/javascript" - } - } - ], - "request": { - "method": "POST", - "header": [ - { - "key": "Content-Type", - "value": "application/json" - } - ], - "body": { - "mode": "raw", - "raw": "{\n\t\"jsonrpc\":\"2.0\",\n\t\"method\":\"eth_getTransactionByHash\",\n\t\"params\":[\n\t\t\"0xb2fea9c4b24775af6990237aa90228e5e092c56bdaee74496992a53c208da1ee\"\n\t],\n\t\"id\":\"1\"\n}", - "options": { - "raw": { - "language": "json" - } - } - }, - "url": { - "raw": "{{HOST}}", - "host": ["{{HOST}}"] - }, - "description": "Returns information about a transaction given the transaction's hash.\r\n\r\n**Parameters**\r\n\r\nDATA, 32 Bytes - hash of a transaction\r\n\r\n**Returns**\r\n\r\nObject - An object of type Transaction or null when no transaction was found\r\n\r\nhash: DATA, 32 Bytes - hash of the transaction\r\n\r\nnonce: QUANTITY - The number of transactions made by the sender prior to this one\r\n\r\nblockHash: DATA, 32 Bytes - hash of the block where this transaction was in. null when its pending\r\n\r\nblockNumber: QUANTITY - block number where this transaction was in. null when its pending\r\n\r\ntransactionIndex: QUANTITY - Integer of the transactions index position in the block. null when its pending\r\n\r\nfrom: DATA, 20 Bytes - address of the sender\r\n\r\nto: DATA, 20 Bytes - address of the receiver. null when its a contract creation transaction\r\n\r\nvalue: QUANTITY - value transferred in Wei\r\n\r\ngasPrice: QUANTITY - gas price provided by the sender in Wei\r\n\r\ngas: QUANTITY - gas provided by the sender\r\n\r\ninput: DATA - The data send along with the transaction" - }, - "response": [] - }, - { - "name": "getTransactionByBlockHashAndIndex", - "event": [ - { - "listen": "test", - "script": { - "id": "593d73f7-fea6-4fd0-bd02-ece07971cd58", - "exec": [ - "var expected = {", - " \"jsonrpc\": \"2.0\",", - " \"id\": \"1\",", - " \"result\": {", - " \"blockHash\": \"0x785b221ec95c66579d5ae14eebe16284a769e948359615d580f02e646e93f1d5\",", - " \"blockNumber\": \"0x52a90b\",", - " \"from\": \"0x11b6a5fe2906f3354145613db0d99ceb51f604c9\",", - " \"gas\": \"0x6b6c\",", - " \"gasPrice\": \"0x11e1a300\",", - " \"hash\": \"0xb2fea9c4b24775af6990237aa90228e5e092c56bdaee74496992a53c208da1ee\",", - " \"input\": \"0x80dfa34a0000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000002e516d556558334448416654747442464a42315454384a617a67765744776a727a7342686973693473547532613551000000000000000000000000000000000000\",", - " \"nonce\": \"0x10\",", - " \"r\": \"0xacdf839bdcb6653da60900f739076a00ecbe0059fa046933348e9b68a62a222\",", - " \"s\": \"0x132a0517a4c52916e0c6b0e74b0479326891df2a9afd711482c7f3919b335ff6\",", - " \"to\": \"0xfa28ec7198028438514b49a3cf353bca5541ce1d\",", - " \"transactionIndex\": \"0x25\",", - " \"v\": \"0x26\",", - " \"value\": \"0x0\"", - " }", - "}", - "", - "pm.test('Has correct result', function() {", - " const jsonData = pm.response.json();", - " var keys = Object.keys(jsonData.result);", - " keys.map(function (k) {", - " var value = jsonData.result[k] ? jsonData.result[k] : null;", - " var expect = expected.result[k] ? expected.result[k] : null;", - " if (expect && typeof expect === 'object') {", - " jsonData.result[k].map(function (value, index) {", - " var expect = expected.result[k][index];", - " pm.expect(value).to.be.equal(expect)", - " })", - " } else {", - " pm.expect(value).to.be.equal(expect)", - " }", - " });", - "})", - "" - ], - "type": "text/javascript" - } - } - ], - "request": { - "method": "POST", - "header": [ - { - "key": "Content-Type", - "value": "application/json" - } - ], - "body": { - "mode": "raw", - "raw": "{\n\t\"jsonrpc\":\"2.0\",\n\t\"method\":\"eth_getTransactionByBlockHashAndIndex\",\n\t\"params\":[\n\t\t\"0x785b221ec95c66579d5ae14eebe16284a769e948359615d580f02e646e93f1d5\", \n\t\t\"0x25\"\n\t],\n\t\"id\":\"1\"\n}", - "options": { - "raw": {} - } - }, - "url": { - "raw": "{{HOST}}", - "host": ["{{HOST}}"] - }, - "description": "Returns information about a transaction given the block's hash and a transaction index.\r\n\r\n**Parameters**\r\n\r\nDATA, 32 Bytes - hash of a block\r\n\r\nQUANTITY - Integer of the transaction index position\r\n\r\n**Returns**\r\n\r\nObject - An object of type Transaction or null when no transaction was found. See eth_getTransactionByHash" - }, - "response": [] - }, - { - "name": "getTransactionByBlockNumberAndIndex", - "event": [ - { - "listen": "test", - "script": { - "id": "530d1490-3007-499c-ae23-f9fd26f1787b", - "exec": [ - "var expected = {", - " \"jsonrpc\": \"2.0\",", - " \"id\": \"1\",", - " \"result\": {", - " \"blockHash\": \"0x785b221ec95c66579d5ae14eebe16284a769e948359615d580f02e646e93f1d5\",", - " \"blockNumber\": \"0x52a90b\",", - " \"from\": \"0x11b6a5fe2906f3354145613db0d99ceb51f604c9\",", - " \"gas\": \"0x6b6c\",", - " \"gasPrice\": \"0x11e1a300\",", - " \"hash\": \"0xb2fea9c4b24775af6990237aa90228e5e092c56bdaee74496992a53c208da1ee\",", - " \"input\": \"0x80dfa34a0000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000002e516d556558334448416654747442464a42315454384a617a67765744776a727a7342686973693473547532613551000000000000000000000000000000000000\",", - " \"nonce\": \"0x10\",", - " \"r\": \"0xacdf839bdcb6653da60900f739076a00ecbe0059fa046933348e9b68a62a222\",", - " \"s\": \"0x132a0517a4c52916e0c6b0e74b0479326891df2a9afd711482c7f3919b335ff6\",", - " \"to\": \"0xfa28ec7198028438514b49a3cf353bca5541ce1d\",", - " \"transactionIndex\": \"0x25\",", - " \"v\": \"0x26\",", - " \"value\": \"0x0\"", - " }", - "}", - "", - "pm.test('Has correct result', function() {", - " const jsonData = pm.response.json();", - " var keys = Object.keys(jsonData.result);", - " keys.map(function (k) {", - " var value = jsonData.result[k] ? jsonData.result[k] : null;", - " var expect = expected.result[k] ? expected.result[k] : null;", - " if (expect && typeof expect === 'object') {", - " jsonData.result[k].map(function (value, index) {", - " var expect = expected.result[k][index];", - " pm.expect(value).to.be.equal(expect)", - " })", - " } else {", - " pm.expect(value).to.be.equal(expect)", - " }", - " });", - "})", - "" - ], - "type": "text/javascript" - } - } - ], - "request": { - "method": "POST", - "header": [ - { - "key": "Content-Type", - "value": "application/json" - } - ], - "body": { - "mode": "raw", - "raw": "{\n\t\"jsonrpc\":\"2.0\",\n\t\"method\":\"eth_getTransactionByBlockNumberAndIndex\",\n\t\"params\":[\n\t\t\"0x52a90b\", \n\t\t\"0x25\"\n\t],\n\t\"id\":\"1\"\n}\n", - "options": { - "raw": {} - } - }, - "url": { - "raw": "{{HOST}}", - "host": ["{{HOST}}"] - }, - "description": "Returns information about a transaction given a block number and transaction index.\r\n\r\n**Parameters**\r\n\r\nTAG - Integer block number or one of \"earliest\", \"latest\" or \"pending\"\r\n\r\nQUANTITY - The transaction index position\r\n\r\n**Returns**\r\n\r\nObject - An object of type Transaction or null when no transaction was found. See eth_getTransactionByHash" - }, - "response": [] - }, - { - "name": "getTransactionReceipt", - "event": [ - { - "listen": "test", - "script": { - "id": "d49e47cb-cbdf-4cc1-83e2-e0ab6b860fd3", - "exec": [ - "var expected = {", - " \"jsonrpc\": \"2.0\",", - " \"id\": \"1\",", - " \"result\": {", - " \"blockHash\": \"0xf6084155ff2022773b22df3217d16e9df53cbc42689b27ca4789e06b6339beb2\",", - " \"blockNumber\": \"0x52a975\",", - " \"contractAddress\": null,", - " \"cumulativeGasUsed\": \"0x797db0\",", - " \"from\": \"0xd907941c8b3b966546fc408b8c942eb10a4f98df\",", - " \"gasUsed\": \"0x1308c\",", - " \"logs\": [", - " {", - " \"address\": \"0xd6df5935cd03a768b7b9e92637a01b25e24cb709\",", - " \"topics\": [", - " \"0x8940c4b8e215f8822c5c8f0056c12652c746cbc57eedbd2a440b175971d47a77\",", - " \"0x000000000000000000000000d907941c8b3b966546fc408b8c942eb10a4f98df\"", - " ],", - " \"data\": \"0x0000000000000000000000000000000000000000000000000000008bb2c97000\",", - " \"blockNumber\": \"0x52a975\",", - " \"transactionHash\": \"0xa3ece39ae137617669c6933b7578b94e705e765683f260fcfe30eaa41932610f\",", - " \"transactionIndex\": \"0x29\",", - " \"blockHash\": \"0xf6084155ff2022773b22df3217d16e9df53cbc42689b27ca4789e06b6339beb2\",", - " \"logIndex\": \"0x119\",", - " \"removed\": false", - " },", - " {", - " \"address\": \"0xd6df5935cd03a768b7b9e92637a01b25e24cb709\",", - " \"topics\": [", - " \"0xddf252ad1be2c89b69c2b068fc378daa952ba7f163c4a11628f55a4df523b3ef\",", - " \"0x0000000000000000000000000000000000000000000000000000000000000000\",", - " \"0x000000000000000000000000d907941c8b3b966546fc408b8c942eb10a4f98df\"", - " ],", - " \"data\": \"0x0000000000000000000000000000000000000000000000000000008bb2c97000\",", - " \"blockNumber\": \"0x52a975\",", - " \"transactionHash\": \"0xa3ece39ae137617669c6933b7578b94e705e765683f260fcfe30eaa41932610f\",", - " \"transactionIndex\": \"0x29\",", - " \"blockHash\": \"0xf6084155ff2022773b22df3217d16e9df53cbc42689b27ca4789e06b6339beb2\",", - " \"logIndex\": \"0x11a\",", - " \"removed\": false", - " }", - " ],", - " \"logsBloom\": \"0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000008000000000000000000000000000000000000000000000000000000000000008000000000000000000000000000000000000000000000000020000000000000000000800000000000000004010000010100000000000000000000000000000000000000000000000000040000080000000000000080000000000000000000000000000000000000000000020000000000000000000000002000000000000000000000000000000000000000000000000000020000000010000000000000000000000000000000000000000000000000000000000\",", - " \"status\": \"0x1\",", - " \"to\": \"0xd6df5935cd03a768b7b9e92637a01b25e24cb709\",", - " \"transactionHash\": \"0xa3ece39ae137617669c6933b7578b94e705e765683f260fcfe30eaa41932610f\",", - " \"transactionIndex\": \"0x29\"", - " }", - "}", - "", - "pm.test('Has correct result', function() {", - " const jsonData = pm.response.json();", - " var keys = Object.keys(jsonData.result);", - " keys.map(function (k) {", - " var value = jsonData.result[k] ? jsonData.result[k] : null;", - " var expect = expected.result[k] ? expected.result[k] : null;", - " if (expect && typeof expect === 'object') {", - " if (k !== 'logs') {", - " jsonData.result[k].map(function (value, index) {", - " var expect = expected.result[k][index];", - " pm.expect(value).to.be.equal(expect)", - " })", - " }", - " } else {", - " pm.expect(value).to.be.equal(expect)", - " }", - " });", - "})", - "" - ], - "type": "text/javascript" - } - } - ], - "request": { - "method": "POST", - "header": [ - { - "key": "Content-Type", - "value": "application/json" - } - ], - "body": { - "mode": "raw", - "raw": "{\n\t\"jsonrpc\":\"2.0\",\n\t\"method\":\"eth_getTransactionReceipt\",\n\t\"params\":[\n\t\t\"0xa3ece39ae137617669c6933b7578b94e705e765683f260fcfe30eaa41932610f\"\n\t],\n\t\"id\":\"1\"\n}", - "options": { - "raw": {} - } - }, - "url": { - "raw": "{{HOST}}", - "host": ["{{HOST}}"] - }, - "description": "Returns the receipt of a transaction given the transaction's hash.\r\n\r\n**Note**\r\n\r\nReceipts are not available for pending transactions.\r\n\r\n**Parameters**\r\n\r\nDATA, 32 Bytes - hash of a transaction\r\n\r\n**Returns**\r\n\r\nObject - An object of type TransactionReceipt or null when no receipt was found\r\n\r\ntransactionHash: DATA, 32 Bytes - hash of the transaction\r\n\r\ntransactionIndex: QUANTITY - Integer of the transactions index position in the block\r\n\r\nblockHash: DATA, 32 Bytes - hash of the block where this transaction was in\r\n\r\nblockNumber: QUANTITY - block number where this transaction was in\r\n\r\ncumulativeGasUsed: QUANTITY - The total amount of gas used when this transaction was executed in the block\r\n\r\ngasUsed: QUANTITY - The amount of gas used by this specific transaction alone\r\n\r\ncontractAddress: DATA, 20 Bytes - The contract address created, if the transaction was a contract creation, null otherwise\r\n\r\nlogs: Array - Array of log objects, which this transaction generated\r\n\r\nlogsBloom: DATA, 256 Bytes - Bloom filter for light clients to quickly retrieve related logs.\r\n\r\nroot: DATA 32 bytes - post-transaction stateroot (if the block is pre-Byzantium)\r\n\r\nstatus: QUANTITY - either 1 = success or 0 = failure (if block is Byzatnium or later)" - }, - "response": [] - } - ], - "protocolProfileBehavior": {}, - "_postman_isSubFolder": true - }, - { - "name": "uncles", - "item": [ - { - "name": "getUncleByBlockNumberAndIndex", - "event": [ - { - "listen": "test", - "script": { - "id": "bb80848b-3b1d-4d5a-8317-fe623c0be114", - "exec": [ - "var expected = {", - " \"jsonrpc\": \"2.0\",", - " \"id\": \"1\",", - " \"result\": {", - " \"difficulty\": \"0x3ff800000\",", - " \"extraData\": \"0x59617465732052616e64616c6c202d2045746865724e696e6a61\",", - " \"gasLimit\": \"0x1388\",", - " \"gasUsed\": \"0x0\",", - " \"hash\": \"0x5cd50096dbb856a6d1befa6de8f9c20decb299f375154427d90761dc0b101109\",", - " \"logsBloom\": \"0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000\",", - " \"miner\": \"0xc8ebccc5f5689fa8659d83713341e5ad19349448\",", - " \"mixHash\": \"0xf8c94dfe61cf26dcdf8cffeda337cf6a903d65c449d7691a022837f6e2d99459\",", - " \"nonce\": \"0x68b769c5451a7aea\",", - " \"number\": \"0x1\",", - " \"parentHash\": \"0xd4e56740f876aef8c010b86a40d5f56745a118d0906a34e69aec8c0db1cb8fa3\",", - " \"receiptsRoot\": \"0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421\",", - " \"sha3Uncles\": \"0x1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347\",", - " \"size\": \"0x21a\",", - " \"stateRoot\": \"0x1e6e030581fd1873b4784280859cd3b3c04aa85520f08c304cf5ee63d3935add\",", - " \"timestamp\": \"0x55ba4242\",", - " \"totalDifficulty\": \"0xffd003ffe\",", - " \"transactionsRoot\": \"0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421\",", - " \"uncles\": []", - " }", - "}", - "", - "pm.test('Has correct result', function() {", - " const jsonData = pm.response.json();", - " var keys = Object.keys(jsonData.result);", - " keys.map(function (k) {", - " var value = jsonData.result[k] ? jsonData.result[k] : null;", - " var expect = expected.result[k] ? expected.result[k] : null;", - " if (expect && typeof expect === 'object') {", - " jsonData.result[k].map(function (value, index) {", - " var expect = expected.result[k][index];", - " pm.expect(value).to.be.equal(expect)", - " })", - " } else {", - " pm.expect(value).to.be.equal(expect)", - " }", - " });", - "})", - "" - ], - "type": "text/javascript" - } - } - ], - "request": { - "method": "POST", - "header": [ - { - "key": "Content-Type", - "value": "application/json" - } - ], - "body": { - "mode": "raw", - "raw": "{\n\t\"jsonrpc\":\"2.0\",\n\t\"method\":\"eth_getUncleByBlockNumberAndIndex\",\n\t\"params\":[\n\t\t\"0x3\",\n\t\t\"0x0\"\n\t],\n\t\"id\":\"1\"\n}", - "options": { - "raw": {} - } - }, - "url": { - "raw": "{{HOST}}", - "host": ["{{HOST}}"] - }, - "description": "Returns information about an uncle given a block's number and the index of the uncle.\r\n\r\n**Parameters**\r\n\r\nTAG - Integer block number or one of \"earliest\", \"latest\" or \"pending\"\r\n\r\nQUANTITY - The uncle's index position\r\n\r\n**Returns**\r\n\r\nObject - An object of type Block (with zero transactions), or null when no uncle was found. See eth_getBlockByHash" - }, - "response": [] - }, - { - "name": "getUncleByBlockHashAndIndex", - "event": [ - { - "listen": "test", - "script": { - "id": "3ba8cc46-cd5d-4b26-a618-a54ddc3d86c4", - "exec": [ - "var expected = {", - " \"jsonrpc\": \"2.0\",", - " \"id\": \"1\",", - " \"result\": {", - " \"difficulty\": \"0x3ff800000\",", - " \"extraData\": \"0x59617465732052616e64616c6c202d2045746865724e696e6a61\",", - " \"gasLimit\": \"0x1388\",", - " \"gasUsed\": \"0x0\",", - " \"hash\": \"0x5cd50096dbb856a6d1befa6de8f9c20decb299f375154427d90761dc0b101109\",", - " \"logsBloom\": \"0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000\",", - " \"miner\": \"0xc8ebccc5f5689fa8659d83713341e5ad19349448\",", - " \"mixHash\": \"0xf8c94dfe61cf26dcdf8cffeda337cf6a903d65c449d7691a022837f6e2d99459\",", - " \"nonce\": \"0x68b769c5451a7aea\",", - " \"number\": \"0x1\",", - " \"parentHash\": \"0xd4e56740f876aef8c010b86a40d5f56745a118d0906a34e69aec8c0db1cb8fa3\",", - " \"receiptsRoot\": \"0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421\",", - " \"sha3Uncles\": \"0x1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347\",", - " \"size\": \"0x21a\",", - " \"stateRoot\": \"0x1e6e030581fd1873b4784280859cd3b3c04aa85520f08c304cf5ee63d3935add\",", - " \"timestamp\": \"0x55ba4242\",", - " \"totalDifficulty\": \"0xffd003ffe\",", - " \"transactionsRoot\": \"0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421\",", - " \"uncles\": []", - " }", - "}", - "", - "pm.test('Has correct result', function() {", - " const jsonData = pm.response.json();", - " var keys = Object.keys(jsonData.result);", - " keys.map(function (k) {", - " var value = jsonData.result[k] ? jsonData.result[k] : null;", - " var expect = expected.result[k] ? expected.result[k] : null;", - " if (expect && typeof expect === 'object') {", - " jsonData.result[k].map(function (value, index) {", - " var expect = expected.result[k][index];", - " pm.expect(value).to.be.equal(expect)", - " })", - " } else {", - " pm.expect(value).to.be.equal(expect)", - " }", - " });", - "})", - "" - ], - "type": "text/javascript" - } - } - ], - "request": { - "method": "POST", - "header": [ - { - "key": "Content-Type", - "value": "application/json" - } - ], - "body": { - "mode": "raw", - "raw": "{\n\t\"jsonrpc\":\"2.0\",\n\t\"method\":\"eth_getUncleByBlockHashAndIndex\",\n\t\"params\":[\n\t\t\"0x3d6122660cc824376f11ee842f83addc3525e2dd6756b9bcf0affa6aa88cf741\", \n\t\t\"0x0\"\n\t],\n\t\"id\":\"1\"\n}", - "options": { - "raw": {} - } - }, - "url": { - "raw": "{{HOST}}", - "host": ["{{HOST}}"] - }, - "description": "Returns information about an uncle given a block's hash and the index of the uncle.\r\n\r\n**Parameters**\r\n\r\nDATA, 32 Bytes - Hash of the block holding the uncle\r\n\r\nQUANTITY - The uncle's index position\r\n\r\n**Returns**\r\n\r\nObject - An object of type Block (with zero transactions), or null when no uncle was found. See eth_getBlockByHash" - }, - "response": [] - }, - { - "name": "getUncleCountByBlockNumber", - "event": [ - { - "listen": "test", - "script": { - "id": "790ef142-b864-4ad6-a90c-7bece105c3f8", - "exec": [ - "var expected = {", - " \"jsonrpc\": \"2.0\",", - " \"result\": \"0x1\",", - " \"id\": \"1\"", - "}", - "", - "pm.test('Has correct result', function() {", - " var isErigon = pm.environment.get('HOST') == \"{{ERIGON}}\";", - " var jsonData = pm.response.json();", - " if (!isErigon) {", - " delete jsonData.result.author;", - " delete jsonData.result.sealFields;", - " }", - " pm.expect(jsonData).to.be.deep.equal(expected);", - "})", - "" - ], - "type": "text/javascript" - } - } - ], - "request": { - "method": "POST", - "header": [ - { - "key": "Content-Type", - "value": "application/json" - } - ], - "body": { - "mode": "raw", - "raw": "{\n\t\"jsonrpc\":\"2.0\",\n\t\"method\":\"eth_getUncleCountByBlockNumber\",\n\t\"params\":[\n\t\t\"0x3\"\n ],\n\t\"id\":\"1\"\n}", - "options": { - "raw": {} - } - }, - "url": { - "raw": "{{HOST}}", - "host": ["{{HOST}}"] - }, - "description": "Returns the number of uncles in the block, if any.\r\n\r\n**Parameters**\r\n\r\nTAG - Integer block number or one of \"earliest\", \"latest\" or \"pending\"\r\n\r\n**Returns**\r\n\r\nQUANTITY - The number of uncles in the block, if any" - }, - "response": [] - }, - { - "name": "getUncleCountByBlockHash", - "event": [ - { - "listen": "test", - "script": { - "id": "d3fba91c-ae8f-4ced-b563-51f9b7e36144", - "exec": [ - "var expected = {", - " \"jsonrpc\": \"2.0\",", - " \"result\": \"0x1\",", - " \"id\": \"1\"", - "}", - "", - "pm.test('Has correct result', function() {", - " var isErigon = pm.environment.get('HOST') == \"{{ERIGON}}\";", - " var jsonData = pm.response.json();", - " if (!isErigon) {", - " delete jsonData.result.author;", - " delete jsonData.result.sealFields;", - " }", - " pm.expect(jsonData).to.be.deep.equal(expected);", - "})", - "" - ], - "type": "text/javascript" - } - } - ], - "request": { - "method": "POST", - "header": [ - { - "key": "Content-Type", - "value": "application/json" - } - ], - "body": { - "mode": "raw", - "raw": "{\n\t\"jsonrpc\":\"2.0\",\n\t\"method\":\"eth_getUncleCountByBlockHash\",\n\t\"params\":[\n\t\t\"0x3d6122660cc824376f11ee842f83addc3525e2dd6756b9bcf0affa6aa88cf741\"\n\t],\n\t\"id\":\"1\"\n}", - "options": { - "raw": {} - } - }, - "url": { - "raw": "{{HOST}}", - "host": ["{{HOST}}"] - }, - "description": "Returns the number of uncles in the block, if any.\r\n\r\n**Parameters**\r\n\r\nDATA, 32 Bytes - Hash of the block containing the uncle\r\n\r\n**Returns**\r\n\r\nQUANTITY - The number of uncles in the block, if any" - }, - "response": [] - } - ], - "protocolProfileBehavior": {}, - "_postman_isSubFolder": true - }, - { - "name": "filters", - "item": [ - { - "name": "newPendingTransactionFilter", - "event": [ - { - "listen": "test", - "script": { - "id": "2bdda0a7-7cf2-4e02-ae19-7a575f2588a2", - "exec": ["utils.notImplemented(\"eth_newPendingTransactionFilter\", pm.response.json())", ""], - "type": "text/javascript" - } - } - ], - "request": { - "method": "POST", - "header": [ - { - "key": "Content-Type", - "value": "application/json" - } - ], - "body": { - "mode": "raw", - "raw": "{\n\t\"jsonrpc\":\"2.0\",\n\t\"method\":\"eth_newPendingTransactionFilter\",\n\t\"params\":[],\n\t\"id\":\"1\"\n}", - "options": { - "raw": {} - } - }, - "url": { - "raw": "{{HOST}}", - "host": ["{{HOST}}"] - }, - "description": "Creates a pending transaction filter in the node. To check if the state has changed, call eth_getFilterChanges.\r\n\r\n**Parameters**\r\n\r\nNone\r\n\r\n**Returns**\r\n\r\nQUANTITY - A filter id" - }, - "response": [] - }, - { - "name": "newBlockFilter", - "event": [ - { - "listen": "test", - "script": { - "id": "a627cf51-a966-4f25-9447-fb3da185a3e0", - "exec": ["utils.notImplemented(\"eth_newBlockFilter\", pm.response.json())", ""], - "type": "text/javascript" - } - } - ], - "request": { - "method": "POST", - "header": [ - { - "key": "Content-Type", - "value": "application/json" - } - ], - "body": { - "mode": "raw", - "raw": "{\n\t\"jsonrpc\":\"2.0\",\n\t\"method\":\"eth_newBlockFilter\",\n\t\"params\":[],\n\t\"id\":\"1\"\n}", - "options": { - "raw": {} - } - }, - "url": { - "raw": "{{HOST}}", - "host": ["{{HOST}}"] - }, - "description": "Creates a block filter in the node, to notify when a new block arrives. To check if the state has changed, call eth_getFilterChanges.\r\n\r\n**Parameters**\r\n\r\nNone\r\n\r\n**Returns**\r\n\r\nQUANTITY - A filter id" - }, - "response": [] - }, - { - "name": "newFilter", - "event": [ - { - "listen": "test", - "script": { - "id": "44d72ef7-022a-4ebf-94b6-9778bd6925d1", - "exec": ["utils.notImplemented(\"eth_newFilter\", pm.response.json())", ""], - "type": "text/javascript" - } - } - ], - "request": { - "method": "POST", - "header": [ - { - "key": "Content-Type", - "value": "application/json" - } - ], - "body": { - "mode": "raw", - "raw": "{\n\t\"jsonrpc\":\"2.0\",\n\t\"method\":\"eth_newFilter\",\n\t\"params\": [\n {\n \"fromBlock\": \"0x1\",\n \"toBlock\": \"0x2\",\n \"address\": \" 0x8888f1f195afa192cfee860698584c030f4c9db1\",\n \"topics\": [\n \"0x000000000000000000000000a94f5374fce5edbc8e2a8697c15331677e6ebf0b\",\n null,\n [\"0x000000000000000000000000a94f5374fce5edbc8e2a8697c15331677e6ebf0b\", \"0x0000000000000000000000000aff3454fce5edbc8cca8697c15331677e6ebccc\"]\n ]\n }\n ],\n\t\"id\":\"1\"\n}", - "options": { - "raw": {} - } - }, - "url": { - "raw": "{{HOST}}", - "host": ["{{HOST}}"] - }, - "description": "Creates an arbitrary filter object, based on filter options, to notify when the state changes (logs). To check if the state has changed, call eth_getFilterChanges.\r\n\r\n**Example**\r\n\r\nA note on specifying topic filters\r\n\r\nTopics are order-dependent. A transaction with a log with topics [A, B] will be matched by the following topic filters\r\n\r\n[] \"anything\"\r\n\r\n[A] \"A in first position (and anything after)\"\r\n\r\n[null, B] \"anything in first position AND B in second position (and anything after)\"\r\n\r\n[A, B] \"A in first position AND B in second position (and anything after)\"\r\n\r\n[[A, B], [A, B]] \"(A OR B) in first position AND (A OR B) in second position (and anything after)\"\r\n\r\n**Parameters**\r\n\r\nObject - An object of type Filter\r\n\r\nQUANTITY|TAG - (optional, default \"latest\") Integer block number, or \"earliest\", \"latest\" or \"pending\" for not yet mined transactions\r\n\r\nQUANTITY|TAG - (optional, default \"latest\") Integer block number, or \"earliest\", \"latest\" or \"pending\" for not yet mined transactions\r\n\r\nDATA|Array of DATA, 20 Bytes - (optional) Contract address or a list of addresses from which logs should originate\r\n\r\nArray of DATA, - (optional) Array of 32 Bytes DATA topics. Topics are order-dependent. Each topic can also be an array of DATA with \"or\" options\r\n\r\n**Returns**\r\n\r\nQUANTITY - A filter id" - }, - "response": [] - }, - { - "name": "uninstallFilter", - "event": [ - { - "listen": "test", - "script": { - "id": "11a48bf8-6320-45ae-989c-ad4b889b5f0d", - "exec": ["utils.notImplemented(\"eth_uninstallFilter\", pm.response.json())", ""], - "type": "text/javascript" - } - } - ], - "request": { - "method": "POST", - "header": [ - { - "key": "Content-Type", - "value": "application/json" - } - ], - "body": { - "mode": "raw", - "raw": "{\n\t\"jsonrpc\":\"2.0\",\n\t\"method\":\"eth_uninstallFilter\",\n\t\"params\":[\n\t\t\"0xdeadbeef\"\n\t],\n\t\"id\":\"1\"\n}", - "options": { - "raw": {} - } - }, - "url": { - "raw": "{{HOST}}", - "host": ["{{HOST}}"] - }, - "description": "Uninstalls a previously-created filter given the filter's id. Always uninstall filters when no longer needed.\r\n\r\n**Note**\r\n\r\nFilters timeout when they are not requested with eth_getFilterChanges for a period of time.\r\n\r\n**Parameters**\r\n\r\nQUANTITY - The filter id\r\n\r\n**Returns**\r\n\r\nBoolean - true if the filter was successfully uninstalled, false otherwise" - }, - "response": [] - }, - { - "name": "getFilterChanges", - "event": [ - { - "listen": "test", - "script": { - "id": "6e68517c-5d19-4843-b1bb-39c7a594d4a5", - "exec": ["utils.notImplemented(\"eth_getFilterChanges\", pm.response.json())", ""], - "type": "text/javascript" - } - } - ], - "request": { - "method": "POST", - "header": [ - { - "key": "Content-Type", - "value": "application/json" - } - ], - "body": { - "mode": "raw", - "raw": "{\n\t\"jsonrpc\":\"2.0\",\n\t\"method\":\"eth_getFilterChanges\",\n\t\"params\":[\n\t\t\"0xdeadbeef\"\n\t],\n\t\"id\":\"1\"\n}", - "options": { - "raw": {} - } - }, - "url": { - "raw": "{{HOST}}", - "host": ["{{HOST}}"] - }, - "description": "Returns an array of objects of type Log, an array of block hashes (for eth_newBlockFilter) or an array of transaction hashes (for eth_newPendingTransactionFilter) or an empty array if nothing has changed since the last poll.\r\n\r\n**Note**\r\n\r\nIn solidity: The first topic is the hash of the signature of the event (if you have not declared the event anonymous.\r\n\r\n**Parameters**\r\n\r\nQUANTITY - The filter id\r\n\r\n**Returns**\r\n\r\nObject - An object of type FilterLog is defined as\r\n\r\nremoved: BOOLEAN - true when the log was removed, due to a chain reorganization. false if its a valid log\r\n\r\nlogIndex: QUANTITY - Integer of the log index position in the block. null when its pending log\r\n\r\ntransactionIndex: QUANTITY - Integer of the transactions index position log was created from. null when its pending log\r\n\r\ntransactionHash: DATA, 32 Bytes - hash of the transactions this log was created from. null when its pending log\r\n\r\nblockHash: DATA, 32 Bytes - hash of the block where this log was in. null when its pending. null when its pending log\r\n\r\nblockNumber: QUANTITY - The block number where this log was in. null when its pending. null when its pending log\r\n\r\naddress: DATA, 20 Bytes - address from which this log originated\r\n\r\ndata: DATA - contains one or more 32 Bytes non-indexed arguments of the log\r\n\r\ntopics: Array of DATA - Array of 0 to 4 32 Bytes DATA of indexed log arguments." - }, - "response": [] - }, - { - "name": "getLogs", - "event": [ - { - "listen": "test", - "script": { - "id": "3b0fee2d-9ef2-48d4-901b-7cab11dfbba2", - "exec": [ - "pm.test('Not tested', function() {", - " var tested = false;", - " pm.expect(tested).to.be.true", - "})", - "" - ], - "type": "text/javascript" - } - } - ], - "request": { - "method": "POST", - "header": [ - { - "key": "Content-Type", - "value": "application/json" - } - ], - "body": { - "mode": "raw", - "raw": "{\n\t\"jsonrpc\":\"2.0\",\n\t\"method\":\"eth_getLogs\",\n\t\"params\":[{\n\t\t\"topics\":[\n\t\t\t\"0x000000000000000000000000a94f5374fce5edbc8e2a8697c15331677e6ebf0b\"\n\t\t]\n\t}],\n\t\"id\":\"1\"\n}", - "options": { - "raw": {} - } - }, - "url": { - "raw": "{{HOST}}", - "host": ["{{HOST}}"] - }, - "description": "Returns an array of logs matching a given filter object.\r\n\r\n**Parameters**\r\n\r\nObject - An object of type Filter, see eth_newFilter parameters\r\n\r\n**Returns**\r\n\r\nObject - An object of type LogArray or an empty array if nothing has changed since last poll. See eth_getFilterChanges" - }, - "response": [] - } - ], - "protocolProfileBehavior": {}, - "_postman_isSubFolder": true - }, - { - "name": "accounts", - "item": [ - { - "name": "getBalance", - "event": [ - { - "listen": "test", - "script": { - "id": "2527ac10-fa47-47c1-a422-ac54f2067e83", - "exec": [ - "var expected = {", - " \"jsonrpc\": \"2.0\",", - " \"id\": \"1\",", - " \"result\": \"0x7a69\"", - "}", - "", - "pm.test('Has correct result', function() {", - " pm.expect(pm.response.json()).to.be.deep.equal(expected);", - "})", - "" - ], - "type": "text/javascript" - } - } - ], - "request": { - "method": "POST", - "header": [ - { - "key": "Content-Type", - "value": "application/json" - } - ], - "body": { - "mode": "raw", - "raw": "{\n\t\"jsonrpc\":\"2.0\",\n\t\"method\":\"eth_getBalance\",\n\t\"params\":[\n\t\t\"0x5df9b87991262f6ba471f09758cde1c0fc1de734\", \n\t\t\"0xb443\"\n\t],\n\t\"id\":\"1\"\n}", - "options": { - "raw": {} - } - }, - "url": { - "raw": "{{HOST}}", - "host": ["{{HOST}}"] - }, - "description": "Returns the balance of an account for a given address.\r\n\r\n**Parameters**\r\n\r\nDATA, 20 Bytes - Address to check for balance\r\n\r\nTAG - Integer block number or one of \"earliest\", \"latest\" or \"pending\"\r\n\r\n**Returns**\r\n\r\nQUANTITY - Integer of the current balance in wei" - }, - "response": [] - }, - { - "name": "getTransactionCount", - "event": [ - { - "listen": "test", - "script": { - "id": "bcfa7ced-fa30-4936-ad0d-28c99c7a39c5", - "exec": [ - "var expected = {", - " \"jsonrpc\": \"2.0\",", - " \"id\": \"1\",", - " \"result\": \"0xa\"", - "}", - "", - "pm.test('Has correct result', function() {", - " pm.expect(pm.response.json()).to.be.deep.equal(expected);", - "})", - "" - ], - "type": "text/javascript" - } - } - ], - "request": { - "method": "POST", - "header": [ - { - "key": "Content-Type", - "value": "application/json" - } - ], - "body": { - "mode": "raw", - "raw": "{\n\t\"jsonrpc\":\"2.0\",\n\t\"method\":\"eth_getTransactionCount\",\n\t\"params\":[\n\t\t\"0xfd2605a2bf58fdbb90db1da55df61628b47f9e8c\", \n\t\t\"0xc443\"\n\t],\n\t\"id\":\"1\"\n}\n", - "options": { - "raw": {} - } - }, - "url": { - "raw": "{{HOST}}", - "host": ["{{HOST}}"] - }, - "description": "Returns the number of transactions sent from an address (the nonce).\r\n\r\n**Parameters**\r\n\r\nDATA, 20 Bytes - Address from which to retrieve nonce\r\n\r\nTAG - Integer block number or one of \"earliest\", \"latest\" or \"pending\"\r\n\r\n**Returns**\r\n\r\nQUANTITY - Integer of the number of transactions sent from this address" - }, - "response": [] - }, - { - "name": "getCode", - "event": [ - { - "listen": "test", - "script": { - "id": "b1435da2-cfbc-48fd-97ac-24612fb6ee6d", - "exec": [ - "var expected = {", - " \"jsonrpc\": \"2.0\",", - " \"id\": \"1\",", - " \"result\": \"0x6060604052361561001f5760e060020a600035046372ea4b8c811461010c575b61011b3460008080670de0b6b3a764000084106101d557600180548101908190556003805433929081101561000257906000526020600020900160006101000a815481600160a060020a0302191690830217905550670de0b6b3a7640000840393508350670de0b6b3a76400006000600082828250540192505081905550600260016000505411151561011d5760038054829081101561000257906000526020600020900160009054906101000a9004600160a060020a0316600160a060020a03166000600060005054604051809050600060405180830381858888f150505080555060016002556101d5565b60018054016060908152602090f35b005b60018054600354910114156101d55760038054600254600101909102900392505b6003546002549003600119018310156101e357600380548490811015610002579082526040517fc2575a0e9e593c00f959f8c92f12db2869c3395a3b0502d05e2516446f71f85b9190910154600160a060020a03169082906706f05b59d3b200009082818181858883f1505090546706f05b59d3b1ffff1901835550506001929092019161013e565b505060028054600101905550505b600080548501905550505050565b506002548154919250600190810190910460001901905b60035460025490036001190183101561029a576003805484908110156100025760009182526040517fc2575a0e9e593c00f959f8c92f12db2869c3395a3b0502d05e2516446f71f85b9190910154600160a060020a03169190838504600019019082818181858883f1505081548486049003600190810190925550600290830183020460001901841415905061028e576001015b600192909201916101fa565b60038054600254810182018083559190829080158290116101c75760008390526101c7907fc2575a0e9e593c00f959f8c92f12db2869c3395a3b0502d05e2516446f71f85b9081019083015b808211156102fa57600081556001016102e6565b509056\"", - "}", - "", - "pm.test('Has correct result', function() {", - " pm.expect(pm.response.json()).to.be.deep.equal(expected);", - "})", - "" - ], - "type": "text/javascript" - } - } - ], - "request": { - "method": "POST", - "header": [ - { - "key": "Content-Type", - "value": "application/json" - } - ], - "body": { - "mode": "raw", - "raw": "{\n\t\"jsonrpc\":\"2.0\",\n\t\"method\":\"eth_getCode\",\n\t\"params\":[\n\t\t\"0x109c4f2ccc82c4d77bde15f306707320294aea3f\", \n\t\t\"0xc443\"\n\t],\n\t\"id\":\"1\"\n}", - "options": { - "raw": {} - } - }, - "url": { - "raw": "{{HOST}}", - "host": ["{{HOST}}"] - }, - "description": "Returns the byte code at a given address (if it's a smart contract).\r\n\r\n**Parameters**\r\n\r\nDATA, 20 Bytes - Address from which to retreive byte code\r\n\r\nTAG - Integer block number or one of \"earliest\", \"latest\" or \"pending\"\r\n\r\n**Returns**\r\n\r\nDATA - The byte code (if any) found at the given address" - }, - "response": [] - }, - { - "name": "getStorageAt", - "event": [ - { - "listen": "test", - "script": { - "id": "270e7931-1ec1-440a-a8e1-ba54f4f4e9a3", - "exec": [ - "var expected = {", - " \"jsonrpc\": \"2.0\",", - " \"id\": \"1\",", - " \"result\": \"0x0000000000000000000000000000000000000000000000001bc16d674ec80000\"", - "}", - "", - "pm.test('Has correct result', function() {", - " pm.expect(pm.response.json()).to.be.deep.equal(expected);", - "})", - "" - ], - "type": "text/javascript" - } - } - ], - "request": { - "method": "POST", - "header": [ - { - "key": "Content-Type", - "value": "application/json" - } - ], - "body": { - "mode": "raw", - "raw": "{\n\t\"jsonrpc\":\"2.0\", \n\t\"method\": \"eth_getStorageAt\", \n\t\"params\": [\n\t\t\"0x109c4f2ccc82c4d77bde15f306707320294aea3f\", \n\t\t\"0x0\",\n\t\t\"0xc443\"\n\t], \n\t\"id\": \"1\"\n}", - "options": { - "raw": {} - } - }, - "url": { - "raw": "{{HOST}}", - "host": ["{{HOST}}"] - }, - "description": "Returns the value from a storage position at a given address.\r\n\r\n**Parameters**\r\n\r\nDATA, 20 Bytes - Address of the contract whose storage to retreive\r\n\r\nQUANTITY - Integer of the position in the storage\r\n\r\nTAG - Integer block number or one of \"earliest\", \"latest\" or \"pending\"\r\n\r\n**Returns**\r\n\r\nDATA - The value at this storage position" - }, - "response": [] - } - ], - "protocolProfileBehavior": {}, - "_postman_isSubFolder": true - }, - { - "name": "system", - "item": [ - { - "name": "blockNumber", - "event": [ - { - "listen": "test", - "script": { - "id": "5e569618-0584-4849-9571-689ef1a79248", - "exec": ["utils.cannotTest(\"eth_blockNumber\", pm.response.json())", ""], - "type": "text/javascript" - } - } - ], - "request": { - "method": "POST", - "header": [ - { - "key": "Content-Type", - "value": "application/json" - } - ], - "body": { - "mode": "raw", - "raw": "{\n\t\"jsonrpc\":\"2.0\",\n\t\"method\":\"eth_blockNumber\",\n\t\"params\":[],\n\t\"id\":\"1\"\n}", - "options": { - "raw": {} - } - }, - "url": { - "raw": "{{HOST}}", - "host": ["{{HOST}}"] - }, - "description": "Returns the block number of most recent block.\r\n\r\n**Parameters**\r\n\r\nNone\r\n\r\n**Returns**\r\n\r\nQUANTITY - Integer of the current highest block number the client is on" - }, - "response": [] - }, - { - "name": "syncing", - "event": [ - { - "listen": "test", - "script": { - "id": "8b16926e-2282-492c-9d84-48dd950ac85b", - "exec": [ - "// There's nothing really to test here. The node is always syncing", - "pm.test('Endpoint not tested', function() {", - " pm.expect(true).to.be.true;", - "});" - ], - "type": "text/javascript" - } - } - ], - "request": { - "method": "POST", - "header": [ - { - "key": "Content-Type", - "value": "application/json" - } - ], - "body": { - "mode": "raw", - "raw": "{\n\t\"jsonrpc\":\"2.0\",\n\t\"method\":\"eth_syncing\",\n\t\"params\":[],\n\t\"id\":\"1\"\n}", - "options": { - "raw": {} - } - }, - "url": { - "raw": "{{HOST}}", - "host": ["{{HOST}}"] - }, - "description": "Returns a data object detailing the status of the sync process or false if not syncing.\r\n\r\n**Parameters**\r\n\r\nNone\r\n\r\n**Returns**\r\n\r\nObject - An object of type Syncing or false if not syncing.\r\n\r\nstartingBlock: QUANTITY - The block at which the import started (will only be reset, after the sync reached his head)\r\n\r\ncurrentBlock: QUANTITY - The current block, same as eth_blockNumber\r\n\r\nhighestBlock: QUANTITY - The estimated highest block" - }, - "response": [] - }, - { - "name": "chainId", - "event": [ - { - "listen": "test", - "script": { - "id": "82448e71-a47e-4fee-9ba7-b6c0d211c075", - "exec": [ - "pm.test('Has correct result', function() {", - " const jsonData = pm.response.json();", - " pm.expect(jsonData.result).to.be.equals(\"0x1\")", - "});", - "" - ], - "type": "text/javascript" - } - } - ], - "request": { - "method": "POST", - "header": [ - { - "key": "Content-Type", - "value": "application/json" - } - ], - "body": { - "mode": "raw", - "raw": "{\n\t\"jsonrpc\":\"2.0\",\n\t\"method\":\"eth_chainId\",\n\t\"params\":[],\n\t\"id\":\"1\"\n}", - "options": { - "raw": {} - } - }, - "url": { - "raw": "{{HOST}}", - "host": ["{{HOST}}"] - }, - "description": "Returns the current ethereum chainId.\r\n\r\n**Parameters**\r\n\r\nNone\r\n\r\n**Returns**\r\n\r\nQUANTITY - The current chainId" - }, - "response": [] - }, - { - "name": "protocolVersion", - "event": [ - { - "listen": "test", - "script": { - "id": "42dfa289-098b-43b0-9395-9ed18209fa20", - "exec": [ - "pm.test('Has correct result', function() {", - " var isParity = pm.environment.get('HOST') == \"{{PARITY}}\";", - " const jsonData = pm.response.json();", - " ", - " if (isParity) {", - " pm.expect(jsonData.result).to.be.equals(\"63\")", - " } else {", - " pm.expect(jsonData.result).to.be.equals(\"0x41\")", - " }", - "});", - "" - ], - "type": "text/javascript" - } - } - ], - "request": { - "method": "POST", - "header": [ - { - "key": "Content-Type", - "value": "application/json" - } - ], - "body": { - "mode": "raw", - "raw": "{\n\t\"jsonrpc\":\"2.0\",\n\t\"method\":\"eth_protocolVersion\",\n\t\"params\":[],\n\t\"id\":\"1\"\n}", - "options": { - "raw": {} - } - }, - "url": { - "raw": "{{HOST}}", - "host": ["{{HOST}}"] - }, - "description": "Returns the current ethereum protocol version.\r\n\r\n**Parameters**\r\n\r\nNone\r\n\r\n**Returns**\r\n\r\nQUANTITY - The current ethereum protocol version" - }, - "response": [] - }, - { - "name": "gasPrice", - "event": [ - { - "listen": "test", - "script": { - "id": "50b5578b-4008-406c-a8f6-0459f258538d", - "exec": ["utils.cannotTest(\"eth_gasPrice\", pm.response.json())", ""], - "type": "text/javascript" - } - } - ], - "request": { - "method": "POST", - "header": [ - { - "key": "Content-Type", - "value": "application/json" - } - ], - "body": { - "mode": "raw", - "raw": "{\n\t\"jsonrpc\":\"2.0\",\n\t\"method\":\"eth_gasPrice\",\n\t\"params\":[],\n\t\"id\":\"1\"\n}", - "options": { - "raw": {} - } - }, - "url": { - "raw": "{{HOST}}", - "host": ["{{HOST}}"] - }, - "description": "Returns the current price per gas in wei.\r\n\r\n**Parameters**\r\n\r\nNone\r\n\r\n**Returns**\r\n\r\nQUANTITY - Integer of the current gas price in wei" - }, - "response": [] - } - ], - "protocolProfileBehavior": {}, - "_postman_isSubFolder": true - }, - { - "name": "call", - "item": [ - { - "name": "call", - "event": [ - { - "listen": "test", - "script": { - "id": "3dde2a48-3bad-43c2-97a6-f4339f368992", - "exec": [ - "var expected = {", - " \"jsonrpc\": \"2.0\",", - " \"result\": \"0x0000000000000000000000000000000000000000000c685fa11e01ec6f000000\",", - " \"id\": \"1\"", - "}", - "", - "pm.test('Has correct result', function() {", - " var jsonData = pm.response.json();", - " pm.expect(jsonData).to.be.deep.equal(expected);", - "})", - "" - ], - "type": "text/javascript" - } - } - ], - "request": { - "method": "POST", - "header": [ - { - "key": "Content-Type", - "value": "application/json" - } - ], - "body": { - "mode": "raw", - "raw": "{\n\t\"jsonrpc\":\"2.0\",\n\t\"method\":\"eth_call\",\n\t\"params\":[\n {\n \"to\": \"0x08a2e41fb99a7599725190b9c970ad3893fa33cf\",\n \"data\": \"0x18160ddd\"\n },\n \"0xa2f2e0\"\n ],\n\t\"id\":\"1\"\n}", - "options": { - "raw": {} - } - }, - "url": { - "raw": "{{HOST}}", - "host": ["{{HOST}}"] - }, - "description": "Executes a new message call immediately without creating a transaction on the block chain.\r\n\r\n**Parameters**\r\n\r\nObject - An object of type Call\r\n\r\nDATA, 20 Bytes - (optional) The address the transaction is sent from\r\n\r\nDATA, 20 Bytes - The address the transaction is directed to\r\n\r\nQUANTITY - (optional) Integer of the gas provided for the transaction execution. eth_call consumes zero gas, but this parameter may be needed by some executions\r\n\r\nQUANTITY - (optional) Integer of the gasPrice used for each paid gas\r\n\r\nQUANTITY - (optional) Integer of the value sent with this transaction\r\n\r\nDATA - (optional) Hash of the method signature and encoded parameters. For details see Ethereum Contract ABI\r\n\r\nTAG - Integer block number or one of \"earliest\", \"latest\" or \"pending\"\r\n\r\n**Returns**\r\n\r\nDATA - The return value of executed contract" - }, - "response": [] - }, - { - "name": "estimateGas", - "event": [ - { - "listen": "test", - "script": { - "id": "61b5e2c2-b0c3-438c-a8cc-85bd6f058f75", - "exec": [ - "var expected = {", - " \"jsonrpc\": \"2.0\",", - " \"id\": \"1\",", - " \"result\": \"0x5208\"", - "}", - "", - "pm.test('Has correct result', function() {", - " pm.expect(pm.response.json()).to.be.deep.equal(expected);", - "})", - "", - "" - ], - "type": "text/javascript" - } - } - ], - "request": { - "method": "POST", - "header": [ - { - "key": "Content-Type", - "value": "application/json" - } - ], - "body": { - "mode": "raw", - "raw": "{\n\t\"jsonrpc\":\"2.0\",\n\t\"method\":\"eth_estimateGas\",\n\t\"params\":[\n {\n \"to\": \"0x3d597789ea16054a084ac84ce87f50df9198f415\",\n \"from\": \"0x3d597789ea16054a084ac84ce87f50df9198f415\",\n \"value\": \"0x1\"\n }\n ],\n\t\"id\":\"1\"\n}", - "options": { - "raw": {} - } - }, - "url": { - "raw": "{{HOST}}", - "host": ["{{HOST}}"] - }, - "description": "Returns an estimate of how much gas is necessary to allow the transaction to complete. The transaction will not be added to the blockchain.\r\n\r\n**Note**\r\n\r\nThe estimate may be significantly more than the amount of gas actually used by the transaction for a variety of reasons including EVM mechanics and node performance.\r\n\r\n**Note**\r\n\r\nIf no gas limit is specified geth uses the block gas limit from the pending block as an upper bound. As a result the returned estimate might not be enough to executed the call/transaction when the amount of gas is higher than the pending block gas limit.\r\n\r\n**Parameters**\r\n\r\nObject - An object of type Call, see eth_call parameters, expect that all properties are optional\r\n\r\n**Returns**\r\n\r\nQUANTITY - The estimated amount of gas needed for the call" - }, - "response": [] - }, - { - "name": "sendTransaction", - "event": [ - { - "listen": "test", - "script": { - "id": "6099e6b6-bb38-45ed-8178-a2c148e4d2c5", - "exec": ["utils.notImplemented(\"eth_sendTransaction\", pm.response.json())", ""], - "type": "text/javascript" - } - } - ], - "request": { - "method": "POST", - "header": [ - { - "key": "Content-Type", - "value": "application/json" - } - ], - "body": { - "mode": "raw", - "raw": "{\n\t\"jsonrpc\":\"2.0\",\n\t\"method\":\"eth_sendTransaction\",\n \"params\": [\n {\n \"from\": \" 0xb60e8dd61c5d32be8058bb8eb970870f07233155\",\n \"to\": \" 0xd46e8dd67c5d32be8058bb8eb970870f07244567\",\n \"gas\": \"0x76c0\",\n \"gasPrice\": \"0x9184e72a000\",\n \"value\": \"0x9184e72a\",\n \"data\": \"0xd46e8dd67c5d32be8d46e8dd67c5d32be8058bb8eb970870f072445675058bb8eb970870f072445675\"\n }\n ],\n\t\"id\":\"1\"\n}", - "options": { - "raw": {} - } - }, - "url": { - "raw": "{{HOST}}", - "host": ["{{HOST}}"] - }, - "description": "Creates new message call transaction or a contract creation if the data field contains code.\r\n\r\n**Note**\r\n\r\nUse eth_getTransactionReceipt to get the contract address, after the transaction was mined, when you created a contract\r\n\r\n**Parameters**\r\n\r\nObject - An object of type SendTransaction\r\n\r\nDATA, 20 Bytes - The address the transaction is send from\r\n\r\nDATA, 20 Bytes - (optional when creating new contract) The address the transaction is directed to\r\n\r\nQUANTITY - (optional, default 90000) Integer of the gas provided for the transaction execution. It will return unused gas\r\n\r\nQUANTITY - (optional, default To-Be-Determined) Integer of the gasPrice used for each paid gas\r\n\r\nQUANTITY - (optional) Integer of the value sent with this transaction\r\n\r\nDATA - The compiled code of a contract OR the hash of the invoked method signature and encoded parameters. For details see Ethereum Contract ABI\r\n\r\nQUANTITY - (optional) Integer of a nonce. This allows to overwrite your own pending transactions that use the same nonce\r\n\r\n**Returns**\r\n\r\nDATA, 32 Bytes - The transaction hash, or the zero hash if the transaction is not yet available" - }, - "response": [] - }, - { - "name": "sendRawTransaction", - "event": [ - { - "listen": "test", - "script": { - "id": "3293bee1-893c-4d4c-bc5b-458235d2158b", - "exec": [ - "pm.test('Not tested', function() {", - " var tested = false;", - " pm.expect(tested).to.be.true", - "})", - "" - ], - "type": "text/javascript" - } - } - ], - "request": { - "method": "POST", - "header": [ - { - "key": "Content-Type", - "value": "application/json" - } - ], - "body": { - "mode": "raw", - "raw": "{\n\t\"jsonrpc\":\"2.0\",\n\t\"method\":\"eth_sendRawTransaction\",\n\t\"params\":[\"0xd46e8dd67c5d32be8d46e8dd67c5d32be8058bb8eb970870f072445675058bb8eb970870f072445675\"],\n\t\"id\":\"1\"\n}", - "options": { - "raw": {} - } - }, - "url": { - "raw": "{{HOST}}", - "host": ["{{HOST}}"] - }, - "description": "Creates new message call transaction or a contract creation for previously-signed transactions.\r\n\r\n**Note**\r\n\r\nUse eth_getTransactionReceipt to get the contract address, after the transaction was mined, when you created a contract.\r\n\r\n**Parameters**\r\n\r\nDATA - The signed transaction data\r\n\r\n**Returns**\r\n\r\nDATA, 32 Bytes - The transaction hash, or the zero hash if the transaction is not yet available" - }, - "response": [] - }, - { - "name": "getProof", - "event": [ - { - "listen": "test", - "script": { - "id": "3d8697ee-e17d-419f-b66a-1017f8f7ad22", - "exec": ["utils.notImplemented(\"eth_getProof\", pm.response.json())", ""], - "type": "text/javascript" - } - } - ], - "request": { - "method": "POST", - "header": [ - { - "key": "Content-Type", - "value": "application/json" - } - ], - "body": { - "mode": "raw", - "raw": "{\n \"id\": \"1\",\n \"jsonrpc\": \"2.0\",\n \"method\": \"eth_getProof\",\n \"params\": [\n \"0x7F0d15C7FAae65896648C8273B6d7E43f58Fa842\",\n [ \"0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421\" ],\n \"latest\"\n ]\n}", - "options": { - "raw": {} - } - }, - "url": { - "raw": "{{HOST}}", - "host": ["{{HOST}}"] - }, - "description": "See this EIP of more information: https://github.com/ethereum/EIPs/issues/1186\r\n\r\nPossible implementation: https://github.com/vocdoni/eth-storage-proof\r\n\r\n**Parameters**\r\n\r\nDATA, 20 Bytes - The address of the storage locations being proved\r\n\r\nDATAARRAY - one or more storage locations to prove\r\n\r\nTAG - Integer block number or one of \"earliest\", \"latest\" or \"pending\"\r\n\r\n**Returns**\r\n\r\nDATA - The Merkel proof of the storage locations" - }, - "response": [] - } - ], - "event": [ - { - "listen": "prerequest", - "script": { - "id": "952dad06-2f84-4226-98c8-696d0fc84db6", - "type": "text/javascript", - "exec": [""] - } - }, - { - "listen": "test", - "script": { - "id": "e4208e7b-1dbd-4b84-9735-94dbf509f2e4", - "type": "text/javascript", - "exec": [""] - } - } - ], - "protocolProfileBehavior": {}, - "_postman_isSubFolder": true - }, - { - "name": "mining", - "item": [ - { - "name": "coinbase", - "event": [ - { - "listen": "test", - "script": { - "id": "6136a206-96bb-43f2-94bd-08f93303cf9a", - "exec": ["utils.notImplemented(\"eth_coinbase\", pm.response.json())", ""], - "type": "text/javascript" - } - } - ], - "request": { - "method": "POST", - "header": [ - { - "key": "Content-Type", - "value": "application/json" - } - ], - "body": { - "mode": "raw", - "raw": "{\n\t\"jsonrpc\":\"2.0\",\n\t\"method\":\"eth_coinbase\",\n\t\"params\":[],\n\t\"id\":\"1\"\n}", - "options": { - "raw": {} - } - }, - "url": { - "raw": "{{HOST}}", - "host": ["{{HOST}}"] - }, - "description": "Returns the current client coinbase address.\r\n\r\n**Parameters**\r\n\r\nNone\r\n\r\n**Returns**\r\n\r\nDATA, 20 Bytes - The current coinbase address" - }, - "response": [] - }, - { - "name": "hashrate", - "event": [ - { - "listen": "test", - "script": { - "id": "9ac59f4f-7de3-4276-8e65-91cd0ad9c040", - "exec": ["utils.notImplemented(\"eth_hashrate\", pm.response.json())", ""], - "type": "text/javascript" - } - } - ], - "request": { - "method": "POST", - "header": [ - { - "key": "Content-Type", - "value": "application/json" - } - ], - "body": { - "mode": "raw", - "raw": "{\n\t\"jsonrpc\":\"2.0\",\n\t\"method\":\"eth_hashrate\",\n\t\"params\":[],\n\t\"id\":\"1\"\n}", - "options": { - "raw": {} - } - }, - "url": { - "raw": "{{HOST}}", - "host": ["{{HOST}}"] - }, - "description": "Returns the number of hashes per second that the node is mining with.\r\n\r\n**Parameters**\r\n\r\nNone\r\n\r\n**Returns**\r\n\r\nQUANTITY - Number of hashes per second" - }, - "response": [] - }, - { - "name": "mining", - "event": [ - { - "listen": "test", - "script": { - "id": "8bdc9381-dbde-4419-b736-96e7914901e0", - "exec": [ - "var expected = {", - " \"jsonrpc\": \"2.0\",", - " \"id\": \"1\",", - " \"result\": false", - "}", - "", - "pm.test('Has correct result', function() {", - " pm.expect(pm.response.json()).to.be.deep.equal(expected);", - "})", - "" - ], - "type": "text/javascript" - } - } - ], - "request": { - "method": "POST", - "header": [ - { - "key": "Content-Type", - "value": "application/json" - } - ], - "body": { - "mode": "raw", - "raw": "{\n\t\"jsonrpc\":\"2.0\",\n\t\"method\":\"eth_mining\",\n\t\"params\":[],\n\t\"id\":\"1\"\n}", - "options": { - "raw": {} - } - }, - "url": { - "raw": "{{HOST}}", - "host": ["{{HOST}}"] - }, - "description": "Returns true if client is actively mining new blocks.\r\n\r\n**Parameters**\r\n\r\nNone\r\n\r\n**Returns**\r\n\r\nBoolean - true if the client is mining, false otherwise" - }, - "response": [] - }, - { - "name": "getWork", - "event": [ - { - "listen": "test", - "script": { - "id": "99953248-ef11-4c01-92dc-26ce5ef38d9d", - "exec": ["utils.notImplemented(\"eth_getWork\", pm.response.json())", ""], - "type": "text/javascript" - } - } - ], - "request": { - "method": "POST", - "header": [ - { - "key": "Content-Type", - "value": "application/json" - } - ], - "body": { - "mode": "raw", - "raw": "{\n\t\"jsonrpc\":\"2.0\",\n\t\"method\":\"eth_getWork\",\n\t\"params\":[],\n\t\"id\":\"1\"\n}", - "options": { - "raw": {} - } - }, - "url": { - "raw": "{{HOST}}", - "host": ["{{HOST}}"] - }, - "description": "Returns the hash of the current block, the seedHash, and the boundary condition to be met ('target').\r\n\r\n**Parameters**\r\n\r\nNone\r\n\r\n**Returns**\r\n\r\nObject - An object of type Work (an array of three hashes representing block header pow-hash, seed hash and boundary condition\r\n\r\ncurrent: DATA, 32 Bytes - current block header pow-hash\r\n\r\nseed: DATA, 32 Bytes - The seed hash used for the DAG\r\n\r\nboundary: DATA, 32 Bytes - The boundary condition ('target'), 2^256 / difficulty" - }, - "response": [] - }, - { - "name": "submitWork", - "event": [ - { - "listen": "test", - "script": { - "id": "db6d4657-a901-4ed5-9995-37f11ec9da6e", - "exec": ["utils.notImplemented(\"eth_submitWork\", pm.response.json())", ""], - "type": "text/javascript" - } - } - ], - "request": { - "method": "POST", - "header": [ - { - "key": "Content-Type", - "value": "application/json" - } - ], - "body": { - "mode": "raw", - "raw": "{\n\t\"jsonrpc\":\"2.0\", \n\t\"method\":\"eth_submitWork\", \n\t\"params\":[\n\t\t\"0x1\", \n\t\t\"0x1234567890abcdef1234567890abcdef1234567890abcdef1234567890abcdef\", \n\t\t\"0xD16E5700000000000000000000000000D16E5700000000000000000000000000\"\n\t],\n\t\"id\":\"1\"\n}", - "options": { - "raw": {} - } - }, - "url": { - "raw": "{{HOST}}", - "host": ["{{HOST}}"] - }, - "description": "Submits a proof-of-work solution to the blockchain.\r\n\r\n**Parameters**\r\n\r\nDATA, 8 Bytes - The nonce found (64 bits)\r\n\r\nDATA, 32 Bytes - The header's pow-hash (256 bits)\r\n\r\nDATA, 32 Bytes - The mix digest (256 bits)\r\n\r\n**Returns**\r\n\r\nBoolean - true if the provided solution is valid, false otherwise" - }, - "response": [] - }, - { - "name": "submitHashrate", - "event": [ - { - "listen": "test", - "script": { - "id": "394114d6-fffc-4cb8-a897-89e0bb6b0aa2", - "exec": ["utils.notImplemented(\"eth_submitHashrate\", pm.response.json())", ""], - "type": "text/javascript" - } - } - ], - "request": { - "method": "POST", - "header": [ - { - "key": "Content-Type", - "value": "application/json" - } - ], - "body": { - "mode": "raw", - "raw": "{\n\t\"jsonrpc\":\"2.0\", \n\t\"method\":\"eth_submitHashrate\", \n\t\"params\":[\n\t\t\"0x0000000000000000000000000000000000000000000000000000000000500000\", \n\t\t\"0x59daa26581d0acd1fce254fb7e85952f4c09d0915afd33d3886cd914bc7d283c\"\n\t],\n\t\"id\":\"1\"\n}", - "options": { - "raw": {} - } - }, - "url": { - "raw": "{{HOST}}", - "host": ["{{HOST}}"] - }, - "description": "Submit the mining hashrate to the blockchain.\r\n\r\n**Parameters**\r\n\r\nDATA, 32 Bytes - a hexadecimal string representation of the hash rate\r\n\r\nString - A random hexadecimal ID identifying the client\r\n\r\n**Returns**\r\n\r\nBoolean - true if submitting went through succesfully, false otherwise" - }, - "response": [] - } - ], - "protocolProfileBehavior": {}, - "_postman_isSubFolder": true - } - ], - "protocolProfileBehavior": {} - }, - { - "name": "trace", - "item": [ - { - "name": "call", - "event": [ - { - "listen": "test", - "script": { - "id": "2e6a9c1c-38f4-4061-ae83-8fcc8a7511be", - "exec": ["utils.notImplemented(\"trace_call\", pm.response.json())", ""], - "type": "text/javascript" - } - } - ], - "request": { - "method": "POST", - "header": [ - { - "key": "Content-Type", - "value": "application/json" - } - ], - "body": { - "mode": "raw", - "raw": "{\n \"jsonrpc\":\"2.0\",\n \"method\":\"trace_call\",\n \"params\":[\n {\n \"from\":\"0x407d73d8a49eeb85d32cf465507dd71d507100c1\",\n \"to\":\"0xa94f5374fce5edbc8e2a8697c15331677e6ebf0b\",\n \"value\":\"0x186a0\"\n },\n [\"trace\",\"vmTrace\"],\n \"latest\"\n ],\n \"id\": \"1\"\n}\n", - "options": { - "raw": {} - } - }, - "url": { - "raw": "{{HOST}}", - "host": ["{{HOST}}"] - }, - "description": "Executes the given call and returns a number of possible traces for it.\r\n\r\n**Parameters**\r\n\r\nObject - An object of type TraceCall\r\n\r\nfrom: DATA, 20 Bytes - (optional) 20 Bytes - The address the transaction is send from.\r\n\r\nto: DATA, 20 Bytes - (optional when creating new contract) 20 Bytes - The address the transaction is directed to.\r\n\r\ngas: QUANTITY - (optional) Integer formatted as a hex string of the gas provided for the transaction execution. eth_call consumes zero gas, but this parameter may be needed by some executions.\r\n\r\ngasPrice: QUANTITY - (optional) Integer formatted as a hex string of the gas price used for each paid gas.\r\n\r\nvalue: QUANTITY - (optional) Integer formatted as a hex string of the value sent with this transaction.\r\n\r\ndata: DATA - (optional) 4 byte hash of the method signature followed by encoded parameters. For details see Ethereum Contract ABI.\r\n\r\nSTRINGARRAY - An array of strings, one or more of: \"vmTrace\", \"trace\", \"stateDiff\".\r\n\r\nTAG - (optional) Integer of a block number, or the string 'earliest', 'latest' or 'pending'.\r\n\r\n**Returns**\r\n\r\nObject - An object of type BlockTraceArray" - }, - "response": [] - }, - { - "name": "callMany", - "event": [ - { - "listen": "test", - "script": { - "id": "fbec6f83-1a35-43dd-839b-8dea5ea39cfb", - "exec": ["utils.notImplemented(\"trace_callMany\", pm.response.json())", ""], - "type": "text/javascript" - } - } - ], - "request": { - "method": "POST", - "header": [ - { - "key": "Content-Type", - "value": "application/json" - } - ], - "body": { - "mode": "raw", - "raw": "{\n\t\"jsonrpc\":\"2.0\",\n\t\"method\":\"trace_callMany\",\n \"params\": [\n [\n [\n {\n \"from\":\"0x407d73d8a49eeb85d32cf465507dd71d507100c1\",\n \"to\":\"0xa94f5374fce5edbc8e2a8697c15331677e6ebf0b\",\n \"value\":\"0x186a0\"\n },\n [\"trace\"]\n ],\n [\n {\n \"from\":\"0x407d73d8a49eeb85d32cf465507dd71d507100c1\",\n \"to\":\"0xa94f5374fce5edbc8e2a8697c15331677e6ebf0b\",\n \"value\":\"0x186a0\"\n },\n [\"trace\"]\n ]\n ],\n \"latest\"\n ],\n\t\"id\":\"1\"\n}\n", - "options": { - "raw": {} - } - }, - "url": { - "raw": "{{HOST}}", - "host": ["{{HOST}}"] - }, - "description": "Performs multiple call traces on top of the same block. i.e. transaction n will be executed on top of a pending block with all n-1 transactions applied (traced) first. Allows to trace dependent transactions.\r\n\r\n**Parameters**\r\n\r\nCALLARRAY - An array of Call objects plus strings, one or more of: \"vmTrace\", \"trace\", \"stateDiff\".\r\n\r\nTAG - (optional) integer block number, or the string 'latest', 'earliest' or 'pending', see the default block parameter.\r\n\r\n**Returns**\r\n\r\nObject - An object of type BlockTraceArray" - }, - "response": [] - }, - { - "name": "rawTransaction", - "event": [ - { - "listen": "test", - "script": { - "id": "a2465974-9dba-4410-a7bd-67b493703d29", - "exec": ["utils.notImplemented(\"trace_rawTransaction\", pm.response.json())", ""], - "type": "text/javascript" - } - } - ], - "request": { - "method": "POST", - "header": [ - { - "key": "Content-Type", - "value": "application/json" - } - ], - "body": { - "mode": "raw", - "raw": "{\n\t\"jsonrpc\":\"2.0\",\n\t\"method\":\"trace_rawTransaction\",\n\t\"params\":[\"0x17104ac9d3312d8c136b7f44d4b8b47852618065ebfa534bd2d3b5ef218ca1f3\", [\"vmTrace\"]],\n\t\"id\":\"1\"\n}", - "options": { - "raw": {} - } - }, - "url": { - "raw": "{{HOST}}", - "host": ["{{HOST}}"] - }, - "description": "Traces a call to eth_sendRawTransaction without making the call, returning the traces\r\n\r\n**Parameters**\r\nDATA - Raw transaction data.\r\n\r\nSTRINGARRAY - Type of trace, one or more of: \"vmTrace\", \"trace\", \"stateDiff\".\r\n\r\n**Returns**\r\nObject - An object of type BlockTrace." - }, - "response": [] - }, - { - "name": "replayBlockTransactions", - "event": [ - { - "listen": "test", - "script": { - "id": "7ae64e81-7268-4743-ae25-98a2d53386c0", - "exec": ["utils.notImplemented(\"trace_replayBlockTransactions\", pm.response.json())", ""], - "type": "text/javascript" - } - } - ], - "request": { - "method": "POST", - "header": [ - { - "key": "Content-Type", - "value": "application/json" - } - ], - "body": { - "mode": "raw", - "raw": "{\n\t\"jsonrpc\":\"2.0\",\n\t\"method\":\"trace_replayBlockTransactions\",\n\t\"params\":[\"0x2\",[\"trace\"]],\n\t\"id\":\"1\"\n}", - "options": { - "raw": {} - } - }, - "url": { - "raw": "{{HOST}}", - "host": ["{{HOST}}"] - }, - "description": "Replays all transactions in a block returning the requested traces for each transaction.\r\n\r\n**Parameters**\r\n\r\nTAG - Integer of a block number, or the string 'earliest', 'latest' or 'pending'.\r\n\r\nSTRINGARRAY - Type of trace, one or more of: \"vmTrace\", \"trace\", \"stateDiff\".\r\n\r\n**Returns**\r\n\r\nObject - An object of type BlockTraceArray." - }, - "response": [] - }, - { - "name": "replayTransaction", - "event": [ - { - "listen": "test", - "script": { - "id": "b60375bb-313f-47cc-9a7d-ff4abffebe99", - "exec": ["utils.notImplemented(\"trace_replayTransaction\", pm.response.json())", ""], - "type": "text/javascript" - } - } - ], - "request": { - "method": "POST", - "header": [ - { - "key": "Content-Type", - "value": "application/json" - } - ], - "body": { - "mode": "raw", - "raw": "{\n\t\"jsonrpc\":\"2.0\",\n\t\"method\":\"trace_replayTransaction\",\n \"params\": [\n \"0x02d4a872e096445e80d05276ee756cefef7f3b376bcec14246469c0cd97dad8f\",\n [\"trace\"]\n ],\n\t\"id\":\"1\"\n}", - "options": { - "raw": {} - } - }, - "url": { - "raw": "{{HOST}}", - "host": ["{{HOST}}"] - }, - "description": "Replays a transaction, returning the traces.\r\n\r\n**Parameters**\r\nDATA, 32 Bytes - The transaction's hash.\r\n\r\nSTRINGARRAY - Type of trace, one or more of: \"vmTrace\", \"trace\", \"stateDiff\".\r\n\r\n**Returns**\r\nObject - An object of type BlockTrace." - }, - "response": [] - }, - { - "name": "transaction", - "event": [ - { - "listen": "test", - "script": { - "id": "de0d1c16-7bd3-4d6c-ae80-1001f994f1ed", - "exec": [ - "var expected = {", - " \"jsonrpc\": \"2.0\",", - " \"id\": \"1\",", - " \"result\": [", - " {", - " \"action\": {", - " \"callType\": \"call\",", - " \"from\": \"0x83806d539d4ea1c140489a06660319c9a303f874\",", - " \"gas\": \"0x1a1f8\",", - " \"input\": \"0x\",", - " \"to\": \"0x1c39ba39e4735cb65978d4db400ddd70a72dc750\",", - " \"value\": \"0x7a16c911b4d00000\"", - " },", - " \"blockHash\": \"0x7eb25504e4c202cf3d62fd585d3e238f592c780cca82dacb2ed3cb5b38883add\",", - " \"blockNumber\": 3068185,", - " \"result\": {", - " \"gasUsed\": \"0x2982\",", - " \"output\": \"0x\"", - " },", - " \"subtraces\": 2,", - " \"traceAddress\": [],", - " \"transactionHash\": \"0x17104ac9d3312d8c136b7f44d4b8b47852618065ebfa534bd2d3b5ef218ca1f3\",", - " \"transactionPosition\": 2,", - " \"type\": \"call\"", - " },", - " {", - " \"action\": {", - " \"callType\": \"call\",", - " \"from\": \"0x1c39ba39e4735cb65978d4db400ddd70a72dc750\",", - " \"gas\": \"0x13e99\",", - " \"input\": \"0x16c72721\",", - " \"to\": \"0x2bd2326c993dfaef84f696526064ff22eba5b362\",", - " \"value\": \"0x0\"", - " },", - " \"blockHash\": \"0x7eb25504e4c202cf3d62fd585d3e238f592c780cca82dacb2ed3cb5b38883add\",", - " \"blockNumber\": 3068185,", - " \"result\": {", - " \"gasUsed\": \"0x183\",", - " \"output\": \"0x0000000000000000000000000000000000000000000000000000000000000001\"", - " },", - " \"subtraces\": 0,", - " \"traceAddress\": [", - " 0", - " ],", - " \"transactionHash\": \"0x17104ac9d3312d8c136b7f44d4b8b47852618065ebfa534bd2d3b5ef218ca1f3\",", - " \"transactionPosition\": 2,", - " \"type\": \"call\"", - " },", - " {", - " \"action\": {", - " \"callType\": \"call\",", - " \"from\": \"0x1c39ba39e4735cb65978d4db400ddd70a72dc750\",", - " \"gas\": \"0x8fc\",", - " \"input\": \"0x\",", - " \"to\": \"0x70faa28a6b8d6829a4b1e649d26ec9a2a39ba413\",", - " \"value\": \"0x7a16c911b4d00000\"", - " },", - " \"blockHash\": \"0x7eb25504e4c202cf3d62fd585d3e238f592c780cca82dacb2ed3cb5b38883add\",", - " \"blockNumber\": 3068185,", - " \"result\": {", - " \"gasUsed\": \"0x0\",", - " \"output\": \"0x\"", - " },", - " \"subtraces\": 0,", - " \"traceAddress\": [", - " 1", - " ],", - " \"transactionHash\": \"0x17104ac9d3312d8c136b7f44d4b8b47852618065ebfa534bd2d3b5ef218ca1f3\",", - " \"transactionPosition\": 2,", - " \"type\": \"call\"", - " }", - " ]", - "}", - "", - "pm.test('Has correct result', function() {", - " pm.expect(pm.response.json()).to.be.deep.equal(expected);", - "})", - "" - ], - "type": "text/javascript" - } - } - ], - "request": { - "method": "POST", - "header": [ - { - "key": "Content-Type", - "value": "application/json" - } - ], - "body": { - "mode": "raw", - "raw": "{\n\t\"jsonrpc\":\"2.0\",\n\t\"method\":\"trace_transaction\",\n \"params\":[\"0x17104ac9d3312d8c136b7f44d4b8b47852618065ebfa534bd2d3b5ef218ca1f3\"],\n\t\"id\":\"1\"\n}\n", - "options": { - "raw": {} - } - }, - "url": { - "raw": "{{HOST}}", - "host": ["{{HOST}}"] - }, - "description": "Returns traces for the given transaction\r\n\r\n**Parameters**\r\n\r\nDATA, 32 Bytes - The transaction's hash\r\n\r\n**Returns**\r\n\r\nObject - An object of type AdhocTraceArray, see trace_filter." - }, - "response": [] - }, - { - "name": "get", - "event": [ - { - "listen": "test", - "script": { - "id": "c1d276a3-867a-43ba-8d82-629650317491", - "exec": [ - "var expected = {", - " \"jsonrpc\": \"2.0\",", - " \"id\": \"1\",", - " \"result\": {", - " \"action\": {", - " \"callType\": \"call\",", - " \"from\": \"0x1c39ba39e4735cb65978d4db400ddd70a72dc750\",", - " \"gas\": \"0x13e99\",", - " \"input\": \"0x16c72721\",", - " \"to\": \"0x2bd2326c993dfaef84f696526064ff22eba5b362\",", - " \"value\": \"0x0\"", - " },", - " \"blockHash\": \"0x7eb25504e4c202cf3d62fd585d3e238f592c780cca82dacb2ed3cb5b38883add\",", - " \"blockNumber\": 3068185,", - " \"result\": {", - " \"gasUsed\": \"0x183\",", - " \"output\": \"0x0000000000000000000000000000000000000000000000000000000000000001\"", - " },", - " \"subtraces\": 0,", - " \"traceAddress\": [", - " 0", - " ],", - " \"transactionHash\": \"0x17104ac9d3312d8c136b7f44d4b8b47852618065ebfa534bd2d3b5ef218ca1f3\",", - " \"transactionPosition\": 2,", - " \"type\": \"call\"", - " }", - "}", - "", - "pm.test('Has correct result', function() {", - " pm.expect(pm.response.json()).to.be.deep.equal(expected);", - "})", - "" - ], - "type": "text/javascript" - } - } - ], - "request": { - "method": "POST", - "header": [ - { - "key": "Content-Type", - "value": "application/json" - } - ], - "body": { - "mode": "raw", - "raw": "{\n\t\"jsonrpc\":\"2.0\",\n\t\"method\":\"trace_get\",\n \"params\":[\n \"0x17104ac9d3312d8c136b7f44d4b8b47852618065ebfa534bd2d3b5ef218ca1f3\",\n [\"0x0\"]\n ],\n\t\"id\":\"1\"\n}\n", - "options": { - "raw": {} - } - }, - "url": { - "raw": "{{HOST}}", - "host": ["{{HOST}}"] - }, - "description": "Returns trace at given position.\r\n\r\n**Parameters**\r\n\r\nDATA, 32 Bytes - The transaction's hash.\r\n\r\nQUANTITYARRAY - The index position of the trace.\r\n\r\n**Returns**\r\n\r\nObject - An object of type AdhocTraceArray, see trace_filter." - }, - "response": [] - }, - { - "name": "block", - "event": [ - { - "listen": "test", - "script": { - "id": "0ab5009a-3398-4d25-a894-862f86e10785", - "exec": [ - "var expected = {", - " \"jsonrpc\": \"2.0\",", - " \"id\": \"1\",", - " \"result\": [", - " {", - " \"action\": {", - " \"author\": \"0x5088d623ba0fcf0131e0897a91734a4d83596aa0\",", - " \"rewardType\": \"block\",", - " \"value\": \"0x478eae0e571ba000\"", - " },", - " \"blockHash\": \"0x3d6122660cc824376f11ee842f83addc3525e2dd6756b9bcf0affa6aa88cf741\",", - " \"blockNumber\": 3,", - " \"result\": {},", - " \"subtraces\": 0,", - " \"traceAddress\": null,", - " \"transactionHash\": \"0x0000000000000000000000000000000000000000000000000000000000000000\",", - " \"transactionPosition\": 0,", - " \"type\": \"reward\"", - " },", - " {", - " \"action\": {", - " \"author\": \"0xc8ebccc5f5689fa8659d83713341e5ad19349448\",", - " \"rewardType\": \"uncle\",", - " \"value\": \"0x340aad21b3b70000\"", - " },", - " \"blockHash\": \"0x3d6122660cc824376f11ee842f83addc3525e2dd6756b9bcf0affa6aa88cf741\",", - " \"blockNumber\": 3,", - " \"result\": {},", - " \"subtraces\": 0,", - " \"traceAddress\": null,", - " \"transactionHash\": \"0x0000000000000000000000000000000000000000000000000000000000000000\",", - " \"transactionPosition\": 0,", - " \"type\": \"reward\"", - " }", - " ]", - "}", - "", - "pm.test('Has correct result', function() {", - " pm.expect(pm.response.json()).to.be.deep.equal(expected);", - "})" - ], - "type": "text/javascript" - } - } - ], - "request": { - "method": "POST", - "header": [ - { - "key": "Content-Type", - "value": "application/json" - } - ], - "body": { - "mode": "raw", - "raw": "{\n\t\"jsonrpc\":\"2.0\",\n\t\"method\":\"trace_block\",\n\t\"params\":[\"0x3\"],\n\t\"id\":\"1\"\n}\n", - "options": { - "raw": {} - } - }, - "url": { - "raw": "{{HOST}}", - "host": ["{{HOST}}"] - }, - "description": "Returns traces created at given block.\r\n\r\n**Parameters**\r\n\r\nTAG - Integer of a block number, or the string 'earliest', 'latest' or 'pending'.\r\n\r\n**Returns**\r\n\r\nObject - An object of type AdhocTraceArray." - }, - "response": [] - }, - { - "name": "filter", - "event": [ - { - "listen": "test", - "script": { - "id": "9b701d79-77b1-48fb-b8a7-4b38e6e63c5d", - "exec": [ - "var expected = {", - " \"jsonrpc\": \"2.0\",", - " \"id\": \"1\",", - " \"result\": [", - " {", - " \"action\": {", - " \"author\": \"0x5088d623ba0fcf0131e0897a91734a4d83596aa0\",", - " \"rewardType\": \"block\",", - " \"value\": \"0x478eae0e571ba000\"", - " },", - " \"blockHash\": \"0x3d6122660cc824376f11ee842f83addc3525e2dd6756b9bcf0affa6aa88cf741\",", - " \"blockNumber\": 3,", - " \"result\": {},", - " \"subtraces\": 0,", - " \"traceAddress\": null,", - " \"transactionHash\": \"0x0000000000000000000000000000000000000000000000000000000000000000\",", - " \"transactionPosition\": 0,", - " \"type\": \"reward\"", - " },", - " {", - " \"action\": {", - " \"author\": \"0xc8ebccc5f5689fa8659d83713341e5ad19349448\",", - " \"rewardType\": \"uncle\",", - " \"value\": \"0x340aad21b3b70000\"", - " },", - " \"blockHash\": \"0x3d6122660cc824376f11ee842f83addc3525e2dd6756b9bcf0affa6aa88cf741\",", - " \"blockNumber\": 3,", - " \"result\": {},", - " \"subtraces\": 0,", - " \"traceAddress\": null,", - " \"transactionHash\": \"0x0000000000000000000000000000000000000000000000000000000000000000\",", - " \"transactionPosition\": 0,", - " \"type\": \"reward\"", - " }", - " ]", - "}", - "", - "pm.test('Has correct result', function() {", - " pm.expect(pm.response.json()).to.be.deep.equal(expected);", - "})" - ], - "type": "text/javascript" - } - } - ], - "request": { - "method": "POST", - "header": [ - { - "key": "Content-Type", - "value": "application/json" - } - ], - "body": { - "mode": "raw", - "raw": "{\n\t\"jsonrpc\":\"2.0\",\n\t\"method\":\"trace_filter\",\n \"params\":[\n {\n \"fromBlock\":\"0x3\",\n \"toBlock\":\"0x3\"\n }\n ],\n\t\"id\":\"1\"\n}\n", - "options": { - "raw": {} - } - }, - "url": { - "raw": "{{HOST}}", - "host": ["{{HOST}}"] - }, - "description": "Returns traces matching given filter\r\n\r\n**Parameters**\r\n\r\nObject - An object of type TraceFilter\r\n\r\nfromBlock: TAG - (optional) From this block.\r\n\r\ntoBlock: TAG - (optional) To this block.\r\n\r\nfromAddress: DATA, 20 Bytes - (optional) Sent from these addresses.\r\n\r\ntoAddress: DATA, 20 Bytes - (optional) Sent to these addresses.\r\n\r\nafter: QUANTITY - (optional) The offset trace number\r\n\r\ncount: QUANTITY - (optional) Integer number of traces to display in a batch.\r\n\r\n**Returns**\r\nObject - An object of type AdHocTraceArray matching the given filter." - }, - "response": [] - } - ], - "protocolProfileBehavior": {} - }, - { - "name": "erigon", - "item": [ - { - "name": "forks", - "event": [ - { - "listen": "test", - "script": { - "id": "331402b4-0302-4516-b601-b160484292b3", - "exec": [ - "var expected = {", - " \"jsonrpc\": \"2.0\",", - " \"id\": \"1\",", - " \"result\": {", - " \"genesis\": \"0xd4e56740f876aef8c010b86a40d5f56745a118d0906a34e69aec8c0db1cb8fa3\",", - " \"forks\": [", - " 1150000,", - " 1920000,", - " 2463000,", - " 2675000,", - " 4370000,", - " 7280000,", - " 9069000,", - " 9200000", - " ]", - " }", - "}", - "", - "pm.test('Has correct result', function() {", - " pm.expect(pm.response.json()).to.be.deep.equal(expected);", - "})", - "" - ], - "type": "text/javascript" - } - } - ], - "request": { - "method": "POST", - "header": [ - { - "key": "Content-Type", - "value": "application/json" - } - ], - "body": { - "mode": "raw", - "raw": "{\n\t\"jsonrpc\":\"2.0\",\n\t\"method\":\"erigon_forks\",\n\t\"params\":[],\n\t\"id\":\"1\"\n}", - "options": { - "raw": {} - } - }, - "url": { - "raw": "{{HOST}}", - "host": ["{{HOST}}"] - }, - "description": "Returns the genesis block hash and a sorted list of already passed fork block numbers as well as the next fork block (if applicable)\r\n\r\n**Parameters**\r\n\r\nNone\r\n\r\n**Returns**\r\n\r\nObject - An object of type Fork\r\n\r\ngenesis: DATA, 32 Bytes - The hash of the genesis block\r\n\r\npassed: ARRAY of QUANTITY - Array of block numbers passed by this client\r\n\r\nnext: QUANTITY - (optional) the next fork block" - }, - "response": [] - }, - { - "name": "getHeaderByNumber", - "event": [ - { - "listen": "test", - "script": { - "id": "8f7e9f2d-1508-4ce6-bb7e-ab697a69ce66", - "exec": [ - "var expected = {", - " \"jsonrpc\": \"2.0\",", - " \"id\": \"1\",", - " \"result\": {", - " \"parentHash\": \"0xb495a1d7e6663152ae92708da4843337b958146015a2802f4193a410044698c9\",", - " \"sha3Uncles\": \"0x6b17b938c6e4ef18b26ad81b9ca3515f27fd9c4e82aac56a1fd8eab288785e41\",", - " \"miner\": \"0x5088d623ba0fcf0131e0897a91734a4d83596aa0\",", - " \"stateRoot\": \"0x76ab0b899e8387436ff2658e2988f83cbf1af1590b9fe9feca3714f8d1824940\",", - " \"transactionsRoot\": \"0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421\",", - " \"receiptsRoot\": \"0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421\",", - " \"logsBloom\": \"0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000\",", - " \"difficulty\": \"0x3fe802ffe\",", - " \"number\": \"0x3\",", - " \"gasLimit\": \"0x1388\",", - " \"gasUsed\": \"0x0\",", - " \"timestamp\": \"0x55ba4260\",", - " \"extraData\": \"0x476574682f76312e302e302d66633739643332642f6c696e75782f676f312e34\",", - " \"mixHash\": \"0x65e12eec23fe6555e6bcdb47aa25269ae106e5f16b54e1e92dcee25e1c8ad037\",", - " \"nonce\": \"0x2e9344e0cbde83ce\",", - " \"hash\": \"0x3d6122660cc824376f11ee842f83addc3525e2dd6756b9bcf0affa6aa88cf741\"", - " }", - "}", - "", - "pm.test('Has correct result', function() {", - " pm.expect(pm.response.json()).to.be.deep.equal(expected);", - "})", - "" - ], - "type": "text/javascript" - } - } - ], - "request": { - "method": "POST", - "header": [ - { - "key": "Content-Type", - "value": "application/json" - } - ], - "body": { - "mode": "raw", - "raw": "{\n\t\"jsonrpc\":\"2.0\",\n\t\"method\":\"erigon_getHeaderByNumber\",\n\t\"params\":[\n\t\t\"0x3\"\n\t],\n\t\"id\":\"1\"\n}", - "options": { - "raw": {} - } - }, - "url": { - "raw": "{{HOST}}", - "host": ["{{HOST}}"] - }, - "description": "Returns a block's header given a block number ignoring the block's transaction and uncle list (may be faster).\r\n\r\n**Parameters**\r\n\r\nTAG - Integer block number or one of \"earliest\", \"latest\" or \"pending\"\r\n\r\n**Returns**\r\n\r\nObject - An object of type BlockHeader or null when no block was found. See eth_getBlockByHash" - }, - "response": [] - }, - { - "name": "getHeaderByHash", - "event": [ - { - "listen": "test", - "script": { - "id": "2ca80cf3-6a70-44ae-8741-3d8851096b65", - "exec": [ - "var expected = {", - " \"jsonrpc\": \"2.0\",", - " \"id\": \"1\",", - " \"result\": {", - " \"parentHash\": \"0xb495a1d7e6663152ae92708da4843337b958146015a2802f4193a410044698c9\",", - " \"sha3Uncles\": \"0x6b17b938c6e4ef18b26ad81b9ca3515f27fd9c4e82aac56a1fd8eab288785e41\",", - " \"miner\": \"0x5088d623ba0fcf0131e0897a91734a4d83596aa0\",", - " \"stateRoot\": \"0x76ab0b899e8387436ff2658e2988f83cbf1af1590b9fe9feca3714f8d1824940\",", - " \"transactionsRoot\": \"0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421\",", - " \"receiptsRoot\": \"0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421\",", - " \"logsBloom\": \"0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000\",", - " \"difficulty\": \"0x3fe802ffe\",", - " \"number\": \"0x3\",", - " \"gasLimit\": \"0x1388\",", - " \"gasUsed\": \"0x0\",", - " \"timestamp\": \"0x55ba4260\",", - " \"extraData\": \"0x476574682f76312e302e302d66633739643332642f6c696e75782f676f312e34\",", - " \"mixHash\": \"0x65e12eec23fe6555e6bcdb47aa25269ae106e5f16b54e1e92dcee25e1c8ad037\",", - " \"nonce\": \"0x2e9344e0cbde83ce\",", - " \"hash\": \"0x3d6122660cc824376f11ee842f83addc3525e2dd6756b9bcf0affa6aa88cf741\"", - " }", - "}", - "", - "pm.test('Has correct result', function() {", - " pm.expect(pm.response.json()).to.be.deep.equal(expected);", - "})", - "" - ], - "type": "text/javascript" - } - } - ], - "request": { - "method": "POST", - "header": [ - { - "key": "Content-Type", - "value": "application/json" - } - ], - "body": { - "mode": "raw", - "raw": "{\n\t\"jsonrpc\":\"2.0\",\n\t\"method\":\"erigon_getHeaderByHash\",\n\t\"params\":[\n\t\t\"0x3d6122660cc824376f11ee842f83addc3525e2dd6756b9bcf0affa6aa88cf741\"\n\t],\n\t\"id\":\"1\"\n}", - "options": { - "raw": {} - } - }, - "url": { - "raw": "{{HOST}}", - "host": ["{{HOST}}"] - }, - "description": "Returns a block's header given a block's hash.\r\n\r\n**Parameters**\r\n\r\nDATA, 32 Bytes - Hash of a block\r\n\r\n**Returns**\r\n\r\nObject - An object of type BlockHeader or null when no block was found. See eth_getBlockByHash" - }, - "response": [] - }, - { - "name": "getLogsByHash", - "event": [ - { - "listen": "test", - "script": { - "id": "6a55ab5e-fa04-4e14-b7f9-1b387ee51188", - "exec": [ - "var expected = [", - " null,", - " [", - " {", - " \"address\": \"0xb8c77482e45f1f44de1745f52c74426c631bdd52\",", - " \"topics\": [", - " \"0xddf252ad1be2c89b69c2b068fc378daa952ba7f163c4a11628f55a4df523b3ef\",", - " \"0x000000000000000000000000001866ae5b3de6caa5a51543fd9fb64f524f5478\",", - " \"0x00000000000000000000000016a9c11e229ce221578a9adb3e7c0a48482e8063\"", - " ],", - " \"data\": \"0x00000000000000000000000000000000000000000000021ea4a7ecbf3c280000\",", - " \"blockNumber\": \"0x3d0cec\",", - " \"transactionHash\": \"0x99f91752d50d0c2c92e681fda082843747e8284d846f8b623e4cd280fbd7bb65\",", - " \"transactionIndex\": \"0x2\",", - " \"blockHash\": \"0x2f244c154cbacb0305581295b80efa6dffb0224b60386a5fc6ae9585e2a140c4\",", - " \"logIndex\": \"0x0\",", - " \"removed\": false", - " }", - " ]", - "]", - "", - "pm.test('Has correct result', function() {", - " // We test just two log entries to keep the test case small", - " var jsonData = pm.response.json();", - " pm.expect(jsonData.result[0]).to.be.deep.equal(expected[0]);", - " pm.expect(jsonData.result[2]).to.be.deep.equal(expected[1]);", - "})", - "" - ], - "type": "text/javascript" - } - } - ], - "request": { - "method": "POST", - "header": [ - { - "key": "Content-Type", - "value": "application/json" - } - ], - "body": { - "mode": "raw", - "raw": "{\n\t\"jsonrpc\":\"2.0\",\n\t\"method\":\"erigon_getLogsByHash\",\n\t\"params\":[\n\t\t\"0x2f244c154cbacb0305581295b80efa6dffb0224b60386a5fc6ae9585e2a140c4\"\n\t],\n\t\"id\":\"1\"\n}", - "options": { - "raw": {} - } - }, - "url": { - "raw": "{{HOST}}", - "host": ["{{HOST}}"] - }, - "description": "Returns an array of arrays of logs generated by the transactions in the block given by the block's hash.\r\n\r\n**Note**\r\n\r\nThe returned value is an array of arrays of log entries. There is an entry for each transaction in the block. BR BR If transaction X did not create any logs, the entry at result[X] will be null BR BR If transaction X generated N logs, the entry at position result[X] will be an array of N log objects\r\n\r\n**Parameters**\r\n\r\nDATA, 32 Bytes - Hash of block at which to retreive data\r\n\r\n**Returns**\r\n\r\nObject - An object of type LogArray some of which may be null found in the block. See eth_getFilterChanges" - }, - "response": [] - }, - { - "name": "issuance", - "event": [ - { - "listen": "test", - "script": { - "id": "b5a34317-4baa-4fb9-95a8-83f4f757c842", - "exec": [ - "var expected = {", - " \"jsonrpc\": \"2.0\",", - " \"id\": \"1\",", - " \"result\": {", - " \"blockReward\": \"0x478eae0e571ba000\",", - " \"uncleReward\": \"0x340aad21b3b70000\",", - " \"issuance\": \"0x7b995b300ad2a000\"", - " }", - "}", - "", - "pm.test('Has correct result', function() {", - " pm.expect(pm.response.json()).to.be.deep.equal(expected);", - "})", - "" - ], - "type": "text/javascript" - } - } - ], - "request": { - "method": "POST", - "header": [ - { - "key": "Content-Type", - "value": "application/json" - } - ], - "body": { - "mode": "raw", - "raw": "{\n\t\"jsonrpc\":\"2.0\",\n\t\"method\":\"erigon_issuance\",\n\t\"params\":[\n\t\t\"0x3\"\n\t],\n\t\"id\":\"1\"\n}", - "options": { - "raw": {} - } - }, - "url": { - "raw": "{{HOST}}", - "host": ["{{HOST}}"] - }, - "description": "Returns the total issuance (block reward plus uncle reward) for the given block.\r\n\r\n**Parameters**\r\n\r\nTAG - Integer block number or one of \"earliest\", \"latest\" or \"pending\"\r\n\r\n**Returns**\r\n\r\nObject - An object of type Issuance\r\n\r\nblockReward: QUANTITY - The issuance to the miner of the block (includes nephew reward but not transaction fees)\r\n\r\nuncleReward: QUANTITY - The issuance to miners of included uncle (if any)\r\n\r\nissuance: QUANTITY - The sum of blockReward and uncleReward" - }, - "response": [] - } - ], - "protocolProfileBehavior": {} - }, - { - "name": "debug", - "item": [ - { - "name": "storageRangeAt", - "event": [ - { - "listen": "test", - "script": { - "id": "c4bcaf47-dd81-42af-9bbd-9256ba908426", - "exec": [ - "var isSilk = pm.environment.get('HOST') == \"{{SILKRPC}}\";", - "if (isSilk) {", - " utils.notImplemented(\"debug_storageRangeAt\", pm.response.json())", - " return;", - "}", - "var isErigon = pm.environment.get('HOST') == \"{{ERIGON}}\";", - "if (isErigon) {", - " utils.cannotTest(\"debug_accountRange\", pm.response.json())", - " return;", - "}", - "var expected = {", - " \"jsonrpc\": \"2.0\",", - " \"id\": \"1\",", - " \"result\": {", - " \"storage\": {", - " \"0x290decd9548b62a8d60345a988386fc84ba6bc95484008f6362f93160ef3e563\": {", - " \"key\": \"0x0000000000000000000000000000000000000000000000000000000000000000\",", - " \"value\": \"0x000000000000000000000000ed2f1401f8994d3ff2b2a923e743c24c2914ab4f\"", - " },", - " \"0xb10e2d527612073b26eecdfd717e6a320cf44b4afac2b0732d9fcbe2b7fa0cf6\": {", - " \"key\": \"0x0000000000000000000000000000000000000000000000000000000000000001\",", - " \"value\": \"0x000000000000000000000000739c71235a9669f6b900490ab1c95310c19abc71\"", - " }", - " },", - " \"nextKey\": \"0x0000000000000000000000000000000000000000000000000000000000000002\"", - " }", - "}", - "", - "pm.test('Has correct result', function() {", - " pm.expect(pm.response.json()).to.be.deep.equal(expected);", - "})", - "" - ], - "type": "text/javascript" - } - } - ], - "request": { - "method": "POST", - "header": [ - { - "key": "Content-Type", - "value": "application/json" - } - ], - "body": { - "mode": "raw", - "raw": "{\n\t\"jsonrpc\":\"2.0\",\n\t\"method\":\"debug_storageRangeAt\",\n\t\"params\":[\n\t\t\"0xd3f1853788b02e31067f2c6e65cb0ae56729e23e3c92e2393af9396fa182701d\", \n 1,\n \"0xb734c74ff4087493373a27834074f80acbd32827\",\n\t\t\"0x00\",\n 2\n\t],\n\t\"id\":\"1\"\n}", - "options": { - "raw": {} - } - }, - "url": { - "raw": "{{HOST}}", - "host": ["{{HOST}}"] - }, - "description": "Returns information about a range of storage locations (if any) for the given address.\r\n\r\n**Parameters**\r\n\r\nDATA, 32 Bytes - Hash of block at which to retreive data\r\n\r\nQUANTITY, 8 bytes - Transaction index in the give block\r\n\r\nDATA, 20 Bytes - Contract address from which to retreive storage data\r\n\r\nDATA, 32 Bytes - Storage key to retreive\r\n\r\nQUANTITY, 8 bytes - The number of values to retreive\r\n\r\n**Returns**\r\n\r\nObject - An object of type StorageRangeResult which is defined as\r\n\r\npair: KEY/VALUE - A key value pair of the storage location\r\n\r\nnextKey: DATA, 32 Bytes - (optional) Hash pointing to next storage pair or empty" - }, - "response": [] - }, - { - "name": "accountRange", - "event": [ - { - "listen": "test", - "script": { - "id": "8fa3bd0b-1c56-4fd5-b46b-66d52a22d7fc", - "exec": [ - "var expected = {", - " \"jsonrpc\": \"2.0\",", - " \"id\": \"1\",", - " \"result\": {", - " \"root\": \"0x8d8f6ffa5f2e55c0f8f0b88c3421d647e497f3ee0d66825f3f7433d7e244dde8\",", - " \"accounts\": {", - " \"0x0000000000000000000000000000000000000001\": {", - " \"balance\": \"0\",", - " \"nonce\": 0,", - " \"root\": \"0000000000000000000000000000000000000000000000000000000000000000\",", - " \"codeHash\": \"c5d2460186f7233c927e7db2dcc703c0e500b653ca82273b7bfad8045d85a470\"", - " }", - " },", - " \"next\": \"AAAAAAAAAAAAAAAAAAAAAAAAAAI=\"", - " }", - "}", - "", - "pm.test('Has correct result', function() {", - " pm.expect(pm.response.json()).to.be.deep.equal(expected);", - "})", - "" - ], - "type": "text/javascript" - } - } - ], - "request": { - "method": "POST", - "header": [ - { - "key": "Content-Type", - "value": "application/json" - } - ], - "body": { - "mode": "raw", - "raw": "{\n\t\"jsonrpc\":\"2.0\",\n\t\"method\":\"debug_accountRange\",\n\t\"params\":[\n\t\t\"0xaaaaa\", \n\t\t[1],\n 1,\n true,\n true,\n true\n\t],\n\t\"id\":\"1\"\n}", - "options": { - "raw": {} - } - }, - "url": { - "raw": "{{HOST}}", - "host": ["{{HOST}}"] - }, - "description": "Returns a range of accounts involved in the given block range\r\n\r\n**Parameters**\r\n\r\nTAG - Integer block number or one of \"earliest\", \"latest\" or \"pending\"\r\n\r\nDATAARRAY - an array of prefixs against which to match account addresses (report only on accounts addresses that begin with this prefix, default matches all accounts)\r\n\r\nQUANTITY, 8 bytes - the maximum number of accounts to retreive\r\n\r\nBoolean - if true, do not return byte code from the address, if false return the byte code (if any)\r\n\r\nBoolean - if true, do not return storage from the address, if false return storage (if any)\r\n\r\nBoolean - if true, do not return missing preimages, if false do return them\r\n\r\n**Returns**\r\n\r\nObject - An object of type IteratorDump which is defined as\r\n\r\nroot: string - IteratorDump\r\n\r\naccounts: map[common.Address]DumpAccount - IteratorDump\r\n\r\nnext: []byte - IteratorDump\r\n\r\nbalance: string - DumpAccount\r\n\r\nnonce: uint64 - DumpAccount\r\n\r\nroot: string - DumpAccount\r\n\r\ncodeHash: string - DumpAccount\r\n\r\ncode: string - DumpAccount\r\n\r\nstorage: map[string]string - DumpAccount\r\n\r\naddress: common.Address - (optional) DumpAccount\r\n\r\nsecureKey: hexutil.Bytes - DumpAccount\r\n\r\n" - }, - "response": [] - }, - { - "name": "getModifiedAccountsByNumber", - "event": [ - { - "listen": "test", - "script": { - "id": "019465f3-a3d5-457c-bd86-4f50b02e518c", - "exec": [ - "var isSilk = pm.environment.get('HOST') == \"{{SILKRPC}}\";", - "if (isSilk) {", - " utils.notImplemented(\"debug_getModifiedAccountsByNumber\", pm.response.json())", - " return;", - "}", - "var expected = {", - " \"jsonrpc\": \"2.0\",", - " \"id\": \"1\",", - " \"result\": [", - " \"0x8764b360076809bba4635c4281c3f44c1677d013\",", - " \"0x1194e966965418c7d73a42cceeb254d875860356\",", - " \"0x42e6723a0c884e922240e56d7b618bec96f35800\",", - " \"0x52bc44d5378309ee2abf1539bf71de1b7d7be3b5\",", - " \"0xdf88d2cf450e1134e0cd794c3b89d648c3269ffc\",", - " \"0x2a65aca4d5fc5b5c859090a6c34d164135398226\",", - " \"0x68795c4aa09d6f4ed3e5deddf8c2ad3049a601da\",", - " \"0x8751355da8bb4854620e247904fc64c2dbff0484\"", - " ]", - "}", - "", - "pm.test('Has correct result', function() {", - " const jsonData = pm.response.json();", - " jsonData.result = jsonData.result.sort();", - " expected.result = expected.result.sort();", - " pm.expect(jsonData).to.be.deep.equal(expected);", - "})", - "" - ], - "type": "text/javascript" - } - } - ], - "request": { - "method": "POST", - "header": [ - { - "key": "Content-Type", - "value": "application/json" - } - ], - "body": { - "mode": "raw", - "raw": "{\n\t\"jsonrpc\":\"2.0\",\n\t\"method\":\"debug_getModifiedAccountsByNumber\",\n\t\"params\":[\n\t\t\"0xccccd\",\n\t\t\"0xcccce\"\n\t],\n\t\"id\":\"1\"\n}", - "options": { - "raw": {} - } - }, - "url": { - "raw": "{{HOST}}", - "host": ["{{HOST}}"] - }, - "description": "Returns a list of accounts modified in the given block.\r\n\r\n**Parameters**\r\n\r\nTAG - Integer block number or one of \"earliest\", \"latest\" or \"pending\"\r\n\r\nTAG - Integer block number or one of \"earliest\", \"latest\" or \"pending\". Optional, defaults to startNum\r\n\r\n**Returns**\r\n\r\nArray of DATA, 20 Bytes - Array of addresses modifed in the given block range" - }, - "response": [] - }, - { - "name": "getModifiedAccountsByHash", - "event": [ - { - "listen": "test", - "script": { - "id": "bbbf909f-9ce2-4558-8e29-abc5ac1f5899", - "exec": [ - "var isSilk = pm.environment.get('HOST') == \"{{SILKRPC}}\";", - "if (isSilk) {", - " utils.notImplemented(\"debug_getModifiedAccountsByHash\", pm.response.json())", - " return;", - "}", - "var expected = {", - " \"jsonrpc\": \"2.0\",", - " \"id\": \"1\",", - " \"result\": [", - " \"0x8764b360076809bba4635c4281c3f44c1677d013\",", - " \"0x1194e966965418c7d73a42cceeb254d875860356\",", - " \"0x42e6723a0c884e922240e56d7b618bec96f35800\",", - " \"0x52bc44d5378309ee2abf1539bf71de1b7d7be3b5\",", - " \"0xdf88d2cf450e1134e0cd794c3b89d648c3269ffc\",", - " \"0x2a65aca4d5fc5b5c859090a6c34d164135398226\",", - " \"0x68795c4aa09d6f4ed3e5deddf8c2ad3049a601da\",", - " \"0x8751355da8bb4854620e247904fc64c2dbff0484\"", - " ]", - "}", - "", - "pm.test('Has correct result', function() {", - " const jsonData = pm.response.json();", - " jsonData.result = jsonData.result.sort();", - " expected.result = expected.result.sort();", - " pm.expect(jsonData).to.be.deep.equal(expected);", - "})", - "" - ], - "type": "text/javascript" - } - } - ], - "request": { - "method": "POST", - "header": [ - { - "key": "Content-Type", - "value": "application/json" - } - ], - "body": { - "mode": "raw", - "raw": "{\n\t\"jsonrpc\":\"2.0\",\n\t\"method\":\"debug_getModifiedAccountsByHash\",\n\t\"params\":[\n\t\t\"0x2a1af018e33bcbd5015c96a356117a5251fcccf94a9c7c8f0148e25fdee37aec\",\n\t\t\"0x4e3d3e7eee350df0ee6e94a44471ee2d22cfb174db89bbf8e6c5f6aef7b360c5\"\n\t],\n\t\"id\":\"1\"\n}", - "options": { - "raw": {} - } - }, - "url": { - "raw": "{{HOST}}", - "host": ["{{HOST}}"] - }, - "description": "Returns a list of accounts modified in the given block.\r\n\r\n**Parameters**\r\n\r\nDATA, 32 Bytes - the first hash of block at which to retreive data\r\n\r\nDATA, 32 Bytes - the last hash of block at which to retreive data. Optional, defaults to startHash\r\n\r\n**Returns**\r\n\r\nArray of DATA, 20 Bytes - Array of addresses modifed in the given block range" - }, - "response": [] - }, - { - "name": "traceTransaction", - "event": [ - { - "listen": "test", - "script": { - "id": "a2e80bc5-85c6-4415-8e06-22ebe0d310cd", - "exec": [ - "var isSilk = pm.environment.get('HOST') == \"{{SILKRPC}}\";", - "if (isSilk) {", - " utils.notImplemented(\"debug_traceTransaction\", pm.response.json())", - " return;", - "}", - "var expected = [", - " {", - " \"pc\": 0,", - " \"op\": \"PUSH1\",", - " \"gas\": 179000,", - " \"gasCost\": 3,", - " \"depth\": 1,", - " \"stack\": [],", - " \"memory\": [],", - " \"storage\": {}", - " },", - " {", - " \"pc\": 2,", - " \"op\": \"PUSH1\",", - " \"gas\": 178997,", - " \"gasCost\": 3,", - " \"depth\": 1,", - " \"stack\": [", - " \"0000000000000000000000000000000000000000000000000000000000000060\"", - " ],", - " \"memory\": [],", - " \"storage\": {}", - " },", - " {", - " \"pc\": 284,", - " \"op\": \"STOP\",", - " \"gas\": 81142,", - " \"gasCost\": 0,", - " \"depth\": 1,", - " \"stack\": [],", - " \"memory\": [", - " \"0000000000000000000000000000000000000000000000000000000000000003\",", - " \"0000000000000000000000000000000000000000000000000000000000000000\",", - " \"0000000000000000000000000000000000000000000000000000000000000060\"", - " ],", - " \"storage\": {", - " \"0000000000000000000000000000000000000000000000000000000000000000\": \"0000000000000000000000000000000000000000000000000000000000000000\",", - " \"0000000000000000000000000000000000000000000000000000000000000001\": \"0000000000000000000000000000000000000000000000000000000000000001\",", - " \"0000000000000000000000000000000000000000000000000000000000000002\": \"0000000000000000000000000000000000000000000000000000000000000001\",", - " \"0000000000000000000000000000000000000000000000000000000000000003\": \"0000000000000000000000000000000000000000000000000000000000000006\",", - " \"c2575a0e9e593c00f959f8c92f12db2869c3395a3b0502d05e2516446f71f85b\": \"000000000000000000000000881b0a4e9c55d08e31d8d3c022144d75a454211c\",", - " \"c2575a0e9e593c00f959f8c92f12db2869c3395a3b0502d05e2516446f71f85c\": \"000000000000000000000000fd2605a2bf58fdbb90db1da55df61628b47f9e8c\"", - " }", - " }", - "]", - "", - "pm.test('Has correct result', function() {", - " // because the returned data is pretty large, we only test the first two value and the last", - " var jsonData = pm.response.json()", - " pm.expect(jsonData.result.structLogs[0]).to.be.deep.equal(expected[0]);", - " pm.expect(jsonData.result.structLogs[1]).to.be.deep.equal(expected[1]);", - " pm.expect(jsonData.result.structLogs[jsonData.result.structLogs.length-1]).to.be.deep.equal(expected[2]);", - "})", - "" - ], - "type": "text/javascript" - } - } - ], - "request": { - "method": "POST", - "header": [ - { - "key": "Content-Type", - "value": "application/json" - } - ], - "body": { - "mode": "raw", - "raw": "{\n\t\"jsonrpc\":\"2.0\",\n\t\"method\":\"debug_traceTransaction\",\n\t\"params\":[\n\t\t\"0x893c428fed019404f704cf4d9be977ed9ca01050ed93dccdd6c169422155586f\"\n\t],\n\t\"id\":\"1\"\n}", - "options": { - "raw": {} - } - }, - "url": { - "raw": "{{HOST}}", - "host": ["{{HOST}}"] - }, - "description": "Returns Geth style transaction traces.\r\n\r\n**Parameters**\r\n\r\nDATA, 32 Bytes - hash of transaction to trace.\r\n\r\n**Returns**\r\n\r\nSTACK_TRACE - An array of stack traces as per Geth" - }, - "response": [] - } - ], - "protocolProfileBehavior": {} - }, - { - "name": "deprecated", - "item": [ - { - "name": "eth", - "item": [ - { - "name": "accounts (deprecated)", - "event": [ - { - "listen": "test", - "script": { - "id": "49fab8c4-6858-4475-89f9-2c06a0acaaa0", - "exec": ["utils.isDeprecated(\"eth_accounts\", pm.response.json())", ""], - "type": "text/javascript" - } - } - ], - "request": { - "method": "POST", - "header": [ - { - "key": "Content-Type", - "value": "application/json" - } - ], - "body": { - "mode": "raw", - "raw": "{\n\t\"jsonrpc\":\"2.0\",\n\t\"method\":\"eth_accounts\",\n\t\"params\":[],\n\t\"id\":\"1\"\n}", - "options": { - "raw": {} - } - }, - "url": { - "raw": "{{HOST}}", - "host": ["{{HOST}}"] - }, - "description": "Returns a list of addresses owned by the client.\r\n\r\n**Deprecated** This function will be removed in the future.\r\n\r\n**Parameters**\r\n\r\nNone\r\n\r\n**Returns**\r\n\r\nArray of DATA, 20 Bytes - addresses owned by the client" - }, - "response": [] - }, - { - "name": "getCompilers (deprecated)", - "event": [ - { - "listen": "test", - "script": { - "id": "71e1fac7-5027-4ec7-8a6f-b7ebba79ebc7", - "exec": ["utils.isDeprecated(\"eth_getCompilers\", pm.response.json())", ""], - "type": "text/javascript" - } - } - ], - "request": { - "method": "POST", - "header": [ - { - "key": "Content-Type", - "value": "application/json" - } - ], - "body": { - "mode": "raw", - "raw": "{\n\t\"jsonrpc\":\"2.0\",\n\t\"method\":\"eth_getCompilers\",\n\t\"params\":[],\n\t\"id\":\"1\"\n}", - "options": { - "raw": {} - } - }, - "url": { - "raw": "{{HOST}}", - "host": ["{{HOST}}"] - }, - "description": "Returns a list of available compilers in the client.\r\n\r\n**Deprecated** This function will be removed in the future.\r\n\r\n**Parameters**\r\n\r\nNone\r\n\r\n**Returns**\r\n\r\nObject - An object of type StringArray of available compilers" - }, - "response": [] - }, - { - "name": "compileLLL (deprecated)", - "event": [ - { - "listen": "test", - "script": { - "id": "c143eb2a-869c-4d61-b77a-f1d96e35867d", - "exec": ["utils.isDeprecated(\"eth_compileLLL\", pm.response.json())", ""], - "type": "text/javascript" - } - } - ], - "request": { - "method": "POST", - "header": [ - { - "key": "Content-Type", - "value": "application/json" - } - ], - "body": { - "mode": "raw", - "raw": "{\n\t\"jsonrpc\":\"2.0\",\n\t\"method\":\"eth_compileLLL\",\n\t\"params\":[\n\t\t\"(returnlll (suicide (caller)))\"\n\t],\n\t\"id\":\"1\"\n}", - "options": { - "raw": {} - } - }, - "url": { - "raw": "{{HOST}}", - "host": ["{{HOST}}"] - }, - "description": "Returns compiled LLL code.\r\n\r\n**Deprecated** This function will be removed in the future.\r\n\r\n**Parameters**\r\n\r\nString - The source code\r\n\r\n**Returns**\r\n\r\nDATA - The compiled source code" - }, - "response": [] - }, - { - "name": "compileSolidity (deprecated)", - "event": [ - { - "listen": "test", - "script": { - "id": "a225f789-727a-45b7-8233-b83fa9710f0b", - "exec": ["utils.isDeprecated(\"eth_compileSolidity\", pm.response.json())", ""], - "type": "text/javascript" - } - } - ], - "request": { - "method": "POST", - "header": [ - { - "key": "Content-Type", - "value": "application/json" - } - ], - "body": { - "mode": "raw", - "raw": "{\n\t\"jsonrpc\":\"2.0\",\n\t\"method\":\"eth_compileSolidity\",\n\t\"params\":[\n\t\t\"contract test { function multiply(uint a) returns(uint d) { return a * 7; } }\"\n\t],\n\t\"id\":\"1\"\n}", - "options": { - "raw": {} - } - }, - "url": { - "raw": "{{HOST}}", - "host": ["{{HOST}}"] - }, - "description": "Returns compiled solidity code.\r\n\r\n**Deprecated** This function will be removed in the future.\r\n\r\n**Parameters**\r\n\r\nString - The source code\r\n\r\n**Returns**\r\n\r\nDATA - The compiled source code" - }, - "response": [] - }, - { - "name": "compileSerpent (deprecated)", - "event": [ - { - "listen": "test", - "script": { - "id": "281f3795-1854-47a9-b256-2e14f32ebff6", - "exec": ["utils.isDeprecated(\"eth_compileSerpent\", pm.response.json())", ""], - "type": "text/javascript" - } - } - ], - "request": { - "method": "POST", - "header": [ - { - "key": "Content-Type", - "value": "application/json" - } - ], - "body": { - "mode": "raw", - "raw": "{\n\t\"jsonrpc\":\"2.0\",\n\t\"method\":\"eth_compileSerpent\",\n\t\"params\":[\"/* some serpent */\"],\n\t\"id\":\"1\"\n}", - "options": { - "raw": {} - } - }, - "url": { - "raw": "{{HOST}}", - "host": ["{{HOST}}"] - }, - "description": "Returns compiled serpent code.\r\n\r\n**Deprecated** This function will be removed in the future.\r\n\r\n**Parameters**\r\n\r\nString - The source code\r\n\r\n**Returns**\r\n\r\nDATA - The compiled source code" - }, - "response": [] - }, - { - "name": "sign (deprecated)", - "event": [ - { - "listen": "test", - "script": { - "id": "f3a959a5-3f2a-417b-ab6f-101ca25235ab", - "exec": ["utils.isDeprecated(\"eth_sign\", pm.response.json())", ""], - "type": "text/javascript" - } - }, - { - "listen": "prerequest", - "script": { - "id": "ccbe7bce-2ee4-4872-884d-884de423d002", - "exec": [ - "var isParity = pm.environment.get('HOST') == \"{{PARITY}}\";", - "if (isParity) {", - " pm.test.skip('Skipping for parity')", - "}" - ], - "type": "text/javascript" - } - } - ], - "request": { - "method": "POST", - "header": [ - { - "key": "Content-Type", - "value": "application/json" - } - ], - "body": { - "mode": "raw", - "raw": "{\n\t\"jsonrpc\":\"2.0\",\n\t\"method\":\"eth_sign\",\n\t\"params\":[\n\t\t\"0x9b2055d370f73ec7d8a03e965129118dc8f5bf83\", \n\t\t\"0xdeadbeef\"\n\t],\n\t\"id\":\"1\"\n}", - "options": { - "raw": {} - } - }, - "url": { - "raw": "{{HOST}}", - "host": ["{{HOST}}"] - }, - "description": "Calculates an Ethereum specific signature with: sign(keccak256(\"\\x19Ethereum Signed Message:\\n\" + len(message) + message))).\r\n\r\n**Deprecated** This function will be removed in the future.\r\n\r\n**Parameters**\r\n\r\nDATA, 20 Bytes - address\r\n\r\nDATA - message to sign\r\n\r\n**Returns**\r\n\r\nDATA - The signature" - }, - "response": [] - } - ], - "protocolProfileBehavior": {}, - "_postman_isSubFolder": true - }, - { - "name": "db", - "item": [ - { - "name": "getString (deprecated)", - "event": [ - { - "listen": "test", - "script": { - "id": "adc610c7-58da-4b14-86eb-5ad2b7e1bb42", - "exec": ["utils.isDeprecated(\"db_getString\", pm.response.json())", ""], - "type": "text/javascript" - } - } - ], - "request": { - "method": "POST", - "header": [ - { - "key": "Content-Type", - "value": "application/json" - } - ], - "body": { - "mode": "raw", - "raw": "{\n\t\"jsonrpc\":\"2.0\",\n\t\"method\":\"db_getString\",\n\t\"params\":[\n\t\t\"testDB\",\n\t\t\"myKey\"\n\t],\n\t\"id\":\"1\"\n}", - "options": { - "raw": {} - } - }, - "url": { - "raw": "{{HOST}}", - "host": ["{{HOST}}"] - }, - "description": "Returns string from the local database.\r\n\r\n**Deprecated** This function will be removed in the future.\r\n\r\n**Parameters**\r\n\r\nString - Database name\r\n\r\nString - Key name\r\n\r\n**Returns**\r\n\r\nSTRING - The previously stored string" - }, - "response": [] - }, - { - "name": "putString (deprecated)", - "event": [ - { - "listen": "test", - "script": { - "id": "42952899-d220-432e-9c2f-5fd8b7f63a10", - "exec": ["utils.isDeprecated(\"db_putString\", pm.response.json())", ""], - "type": "text/javascript" - } - }, - { - "listen": "prerequest", - "script": { - "id": "765518a5-fcb0-4c40-bfd9-91a7dabaa24c", - "exec": [""], - "type": "text/javascript" - } - } - ], - "request": { - "method": "POST", - "header": [ - { - "key": "Content-Type", - "value": "application/json" - } - ], - "body": { - "mode": "raw", - "raw": "{\n\t\"jsonrpc\":\"2.0\",\n\t\"method\":\"db_putString\",\n\t\"params\":[\n\t\t\"testDB\",\n\t\t\"myKey\",\n\t\t\"myString\"\n\t],\n\t\"id\":\"1\"\n}", - "options": { - "raw": {} - } - }, - "url": { - "raw": "{{HOST}}", - "host": ["{{HOST}}"] - }, - "description": "Stores a string in the local database.\r\n\r\n**Deprecated** This function will be removed in the future.\r\n\r\n**Parameters**\r\n\r\nString - Database name\r\n\r\nString - Key name\r\n\r\nString - String to store\r\n\r\n**Returns**\r\n\r\nBoolean - true if the value was stored, false otherwise" - }, - "response": [] - }, - { - "name": "getHex (deprecated)", - "event": [ - { - "listen": "test", - "script": { - "id": "1877532c-58ef-49e6-9adc-298e68e8e519", - "exec": ["utils.isDeprecated(\"db_getHex\", pm.response.json());"], - "type": "text/javascript" - } - } - ], - "request": { - "method": "POST", - "header": [ - { - "key": "Content-Type", - "value": "application/json" - } - ], - "body": { - "mode": "raw", - "raw": "{\n\t\"jsonrpc\":\"2.0\",\n\t\"method\":\"db_getHex\"\n\t,\"params\":[\n\t\t\"testDB\",\n\t\t\"myKey\"\n\t],\n\t\"id\":\"1\"\n}", - "options": { - "raw": {} - } - }, - "url": { - "raw": "{{HOST}}", - "host": ["{{HOST}}"] - }, - "description": "Returns binary data from the local database.\r\n\r\n**Deprecated** This function will be removed in the future.\r\n\r\n**Parameters**\r\n\r\nString - Database name\r\n\r\nString - Key name\r\n\r\n**Returns**\r\n\r\nDATA - The previously stored data" - }, - "response": [] - }, - { - "name": "putHex (deprecated)", - "event": [ - { - "listen": "test", - "script": { - "id": "eb8f901f-11f6-40f1-96ba-db322d1bc017", - "exec": ["utils.isDeprecated(\"db_putHex\", pm.response.json())", ""], - "type": "text/javascript" - } - } - ], - "request": { - "method": "POST", - "header": [ - { - "key": "Content-Type", - "value": "application/json" - } - ], - "body": { - "mode": "raw", - "raw": "{\n\t\"jsonrpc\":\"2.0\",\n\t\"method\":\"db_putHex\",\n\t\"params\":[\n\t\t\"testDB\",\n\t\t\"myKey\",\n\t\t\"0x68656c6c6f20776f726c64\"\n\t],\n\t\"id\":\"1\"\n}", - "options": { - "raw": {} - } - }, - "url": { - "raw": "{{HOST}}", - "host": ["{{HOST}}"] - }, - "description": "Stores binary data in the local database.\r\n\r\n**Deprecated** This function will be removed in the future.\r\n\r\n**Parameters**\r\n\r\nString - Database name\r\n\r\nString - Key name\r\n\r\nDATA - The data to store\r\n\r\n**Returns**\r\n\r\nBoolean - true if the value was stored, false otherwise" - }, - "response": [] - } - ], - "protocolProfileBehavior": {}, - "_postman_isSubFolder": true - }, - { - "name": "shh", - "item": [ - { - "name": "post (deprecated)", - "event": [ - { - "listen": "test", - "script": { - "id": "6f40e9ca-755e-42e3-9532-c629c98d7038", - "exec": ["utils.isDeprecated(\"shh_post\", pm.response.json());", ""], - "type": "text/javascript" - } - } - ], - "request": { - "method": "POST", - "header": [ - { - "key": "Content-Type", - "value": "application/json" - } - ], - "body": { - "mode": "raw", - "raw": "{\n\t\"jsonrpc\":\"2.0\",\n\t\"method\":\"shh_post\",\n\t\"params\":[{\n\t\t\"from\":\"0xc931d93e97ab07fe42d923478ba2465f2..\",\n\t\t\"topics\": [\n\t\t\t\"0x68656c6c6f20776f726c64\"\n\t\t],\n\t\t\"payload\":\"0x68656c6c6f20776f726c64\",\n\t\t\"ttl\":\"0x64\",\n\t\t\"priority\":\"0x64\"\n\t}],\n\t\"id\":\"1\"\n}", - "options": { - "raw": {} - } - }, - "url": { - "raw": "{{HOST}}", - "host": ["{{HOST}}"] - }, - "description": "Sends a whisper message.\r\n\r\n**Deprecated** This function will be removed in the future.\r\n\r\n**Parameters**\r\n\r\nObject - An object of type Post\r\n\r\n**Returns**\r\n\r\nBoolean - true if the message was send, false otherwise" - }, - "response": [] - }, - { - "name": "version (deprecated)", - "event": [ - { - "listen": "test", - "script": { - "id": "4d2835ac-ef75-4a3e-ac48-8e6afa2508cb", - "exec": ["utils.isDeprecated(\"shh_version\", pm.response.json());", ""], - "type": "text/javascript" - } - } - ], - "request": { - "method": "POST", - "header": [ - { - "key": "Content-Type", - "value": "application/json" - } - ], - "body": { - "mode": "raw", - "raw": "{\n\t\"jsonrpc\":\"2.0\",\n\t\"method\":\"shh_version\",\n\t\"params\":[],\n\t\"id\":\"1\"\n}", - "options": { - "raw": {} - } - }, - "url": { - "raw": "{{HOST}}", - "host": ["{{HOST}}"] - }, - "description": "Returns the current whisper protocol version.\r\n\r\n**Deprecated** This function will be removed in the future.\r\n\r\n**Parameters**\r\n\r\nNone\r\n\r\n**Returns**\r\n\r\nSTRING - The current whisper protocol version" - }, - "response": [] - }, - { - "name": "newIdentity (deprecated)", - "event": [ - { - "listen": "test", - "script": { - "id": "d5ca5bc1-1972-4479-a5cb-ea621c40c1f2", - "exec": ["utils.isDeprecated(\"shh_newIdentity\", pm.response.json());", ""], - "type": "text/javascript" - } - } - ], - "request": { - "method": "POST", - "header": [ - { - "key": "Content-Type", - "value": "application/json" - } - ], - "body": { - "mode": "raw", - "raw": "{\n\t\"jsonrpc\":\"2.0\",\n\t\"method\":\"shh_newIdentity\",\n\t\"params\":[],\n\t\"id\":\"1\"\n}", - "options": { - "raw": {} - } - }, - "url": { - "raw": "{{HOST}}", - "host": ["{{HOST}}"] - }, - "description": "Creates new whisper identity in the client.\r\n\r\n**Deprecated** This function will be removed in the future.\r\n\r\n**Parameters**\r\n\r\nNone\r\n\r\n**Returns**\r\n\r\nDATA, 60 Bytes - The address of the new identiy" - }, - "response": [] - }, - { - "name": "hasIdentity (deprecated)", - "event": [ - { - "listen": "test", - "script": { - "id": "237a0212-f467-4bc7-825d-ce8eb97d02e7", - "exec": ["utils.isDeprecated(\"shh_hasIdentity\", pm.response.json());", ""], - "type": "text/javascript" - } - } - ], - "request": { - "method": "POST", - "header": [ - { - "key": "Content-Type", - "value": "application/json" - } - ], - "body": { - "mode": "raw", - "raw": "{\n\t\"jsonrpc\":\"2.0\",\n\t\"method\":\"shh_hasIdentity\",\n\t\"params\":[\n\t\t\"0x04f96a5e25610293e42a73908e93ccc8c4d4dc0edcfa9fa872f50cb214e08ebf61a03e245533f97284d442460f2998cd41858798ddfd4d661997d3940272b717b1\"\n\t],\n\t\"id\":\"1\"\n}", - "options": { - "raw": {} - } - }, - "url": { - "raw": "{{HOST}}", - "host": ["{{HOST}}"] - }, - "description": "Checks if the client hold the private keys for a given identity.\r\n\r\n**Deprecated** This function will be removed in the future.\r\n\r\n**Parameters**\r\n\r\nDATA, 60 Bytes - The identity address to check\r\n\r\n**Returns**\r\n\r\nBoolean - true if the client holds the privatekey for that identity, false otherwise" - }, - "response": [] - }, - { - "name": "newGroup (deprecated)", - "event": [ - { - "listen": "test", - "script": { - "id": "498a4713-d7bf-4849-a794-bcb4ae1b13f6", - "exec": ["utils.isDeprecated(\"shh_newGroup\", pm.response.json());", ""], - "type": "text/javascript" - } - } - ], - "request": { - "method": "POST", - "header": [ - { - "key": "Content-Type", - "value": "application/json" - } - ], - "body": { - "mode": "raw", - "raw": "{\n\t\"jsonrpc\":\"2.0\",\n\t\"method\":\"shh_newGroup\",\n\t\"params\":[],\n\t\"id\":\"1\"\n}", - "options": { - "raw": {} - } - }, - "url": { - "raw": "{{HOST}}", - "host": ["{{HOST}}"] - }, - "description": "Create a new group.\r\n\r\n**Deprecated** This function will be removed in the future.\r\n\r\n**Parameters**\r\n\r\nNone\r\n\r\n**Returns**\r\n\r\nDATA, 60 Bytes - The address of the new group" - }, - "response": [] - }, - { - "name": "addToGroup (deprecated)", - "event": [ - { - "listen": "test", - "script": { - "id": "59a5f8d0-6cb0-4948-9a94-a67494d56deb", - "exec": ["utils.isDeprecated(\"shh_addToGroup\", pm.response.json());", ""], - "type": "text/javascript" - } - } - ], - "request": { - "method": "POST", - "header": [ - { - "key": "Content-Type", - "value": "application/json" - } - ], - "body": { - "mode": "raw", - "raw": "{\n\t\"jsonrpc\":\"2.0\",\n\t\"method\":\"shh_addToGroup\",\n\t\"params\":[\n\t\t\"0x04f96a5e25610293e42a73908e93ccc8c4d4dc0edcfa9fa872f50cb214e08ebf61a03e245533f97284d442460f2998cd41858798ddfd4d661997d3940272b717b1\"\n\t],\n\t\"id\":\"1\"\n}", - "options": { - "raw": {} - } - }, - "url": { - "raw": "{{HOST}}", - "host": ["{{HOST}}"] - }, - "description": "Add to a group.\r\n\r\n**Deprecated** This function will be removed in the future.\r\n\r\n**Parameters**\r\n\r\nDATA, 60 Bytes - The identity address to add to a group\r\n\r\n**Returns**\r\n\r\nBoolean - true if the identity was successfully added to the group, false otherwise" - }, - "response": [] - }, - { - "name": "newFilter (deprecated)", - "event": [ - { - "listen": "test", - "script": { - "id": "a3c1325c-f738-473f-b981-7a8f271377bd", - "exec": ["utils.isDeprecated(\"shh_newFilter\", pm.response.json());", ""], - "type": "text/javascript" - } - } - ], - "request": { - "method": "POST", - "header": [ - { - "key": "Content-Type", - "value": "application/json" - } - ], - "body": { - "mode": "raw", - "raw": "{\n\t\"jsonrpc\":\"2.0\",\n\t\"method\":\"shh_newFilter\",\n\t\"params\":[{\n\t\t\"topics\": [\n\t\t\t\"0x12341234bf4b564f\"\n\t\t],\n\t\t\"to\": \"0x2341234bf4b2341234bf4b564f...\"\n\t}],\n\t\"id\":\"1\"\n}", - "options": { - "raw": {} - } - }, - "url": { - "raw": "{{HOST}}", - "host": ["{{HOST}}"] - }, - "description": "Creates filter to notify, when client receives whisper message matching the filter options.\r\n\r\n**Deprecated** This function will be removed in the future.\r\n\r\n**Parameters**\r\n\r\nObject - An object of type Filter\r\n\r\n**Returns**\r\n\r\nQUANTITY - The newly created filter id" - }, - "response": [] - }, - { - "name": "uninstallFilter (deprecated)", - "event": [ - { - "listen": "test", - "script": { - "id": "1f635382-7c93-456e-a4e0-6c9a31c3ff3e", - "exec": ["utils.isDeprecated(\"shh_uninstallFilter\", pm.response.json());", ""], - "type": "text/javascript" - } - } - ], - "request": { - "method": "POST", - "header": [ - { - "key": "Content-Type", - "value": "application/json" - } - ], - "body": { - "mode": "raw", - "raw": "{\n\t\"jsonrpc\":\"2.0\",\n\t\"method\":\"shh_uninstallFilter\",\n\t\"params\":[\n\t\t\"0x7\"\n\t],\n\t\"id\":\"1\"\n}", - "options": { - "raw": {} - } - }, - "url": { - "raw": "{{HOST}}", - "host": ["{{HOST}}"] - }, - "description": "Uninstalls a filter with given id.\r\n\r\n**Deprecated** This function will be removed in the future.\r\n\r\n**Parameters**\r\n\r\nQUANTITY - The filter id\r\n\r\n**Returns**\r\n\r\nBoolean - true if the filter was successfully uninstalled, false otherwise" - }, - "response": [] - }, - { - "name": "getFilterChanges (deprecated)", - "event": [ - { - "listen": "test", - "script": { - "id": "1b86fec4-c310-4ad4-b87d-d6bcaa3e707c", - "exec": ["utils.isDeprecated(\"shh_getFilterChanges\", pm.response.json());", ""], - "type": "text/javascript" - } - } - ], - "request": { - "method": "POST", - "header": [ - { - "key": "Content-Type", - "value": "application/json" - } - ], - "body": { - "mode": "raw", - "raw": "{\n\t\"jsonrpc\":\"2.0\",\n\t\"method\":\"shh_getFilterChanges\",\n\t\"params\":[\n\t\t\"0x7\"\n\t],\n\t\"id\":\"1\"\n}", - "options": { - "raw": {} - } - }, - "url": { - "raw": "{{HOST}}", - "host": ["{{HOST}}"] - }, - "description": "Polling method for whisper filters. Returns new messages since the last call of this method.\r\n\r\n**Deprecated** This function will be removed in the future.\r\n\r\n**Parameters**\r\n\r\nQUANTITY - The filter id\r\n\r\n**Returns**\r\n\r\nObject - An object of type MessageArray received since last poll" - }, - "response": [] - }, - { - "name": "getMessages (deprecated)", - "event": [ - { - "listen": "test", - "script": { - "id": "8cdf20b9-4b07-43ad-a96e-66d49cacb651", - "exec": ["utils.isDeprecated(\"shh_getMessages\", pm.response.json());", ""], - "type": "text/javascript" - } - } - ], - "request": { - "method": "POST", - "header": [ - { - "key": "Content-Type", - "value": "application/json" - } - ], - "body": { - "mode": "raw", - "raw": "{\n\t\"jsonrpc\":\"2.0\",\n\t\"method\":\"shh_getMessages\",\n\t\"params\":[\n\t\t\"0x7\"\n\t],\n\t\"id\":\"1\"\n}", - "options": { - "raw": {} - } - }, - "url": { - "raw": "{{HOST}}", - "host": ["{{HOST}}"] - }, - "description": "Get all messages matching a filter. Unlike shh_getFilterChanges this returns all messages.\r\n\r\n**Deprecated** This function will be removed in the future.\r\n\r\n**Parameters**\r\n\r\nQUANTITY - The filter id\r\n\r\n**Returns**\r\n\r\nObject - An object of type MessageArray received since last poll" - }, - "response": [] - } - ], - "protocolProfileBehavior": {}, - "_postman_isSubFolder": true - } - ], - "description": "RPC commands in this group have been deprecated.", - "event": [ - { - "listen": "prerequest", - "script": { - "id": "f3715e8c-8219-4b4c-a797-283787c030da", - "type": "text/javascript", - "exec": [""] - } - }, - { - "listen": "test", - "script": { - "id": "b1b0fe57-01a2-480a-a5bf-fd11942fd43c", - "type": "text/javascript", - "exec": [""] - } - } - ], - "protocolProfileBehavior": {} - } - ], - "event": [ - { - "listen": "prerequest", - "script": { - "id": "28916081-d267-4803-b88f-38f0cfac83f3", - "type": "text/javascript", - "exec": [ - "utils = {", - " notImplemented: function(methodName, jsonData) {", - " var isErigon = pm.environment.get('HOST') == \"{{ERIGON}}\";", - " var isSilk = pm.environment.get('HOST') == \"{{SILKRPC}}\";", - " if (!isErigon && !isSilk) // only test erigon", - " return;", - "", - " var testNotImplemented = pm.globals.get('TEST_NOT_IMPLEMENTED') === 'true';", - " if (testNotImplemented) { // defaults to false, therefore don't test", - " pm.test('NOT IMPLEMENTED', function() {", - " pm.expect(false).to.be(true);", - " })", - " } else {", - " // pass unless user has explicitly told us to test not implemented", - " var expected = {", - " \"jsonrpc\": \"2.0\",", - " \"id\": \"1\",", - " \"error\": {", - " \"code\": -32000,", - " \"message\": \"the method is currently not implemented: \" + methodName", - " }", - " }", - " if (jsonData.error)", - " delete jsonData.error.data;", - " pm.test('NOT IMPLEMENTED', function() {", - " pm.expect(jsonData).to.deep.equals(expected);", - " })", - " }", - " },", - "", - " isDeprecated: function(methodName, jsonData) {", - " var isErigon = pm.environment.get('HOST') == \"{{ERIGON}}\";", - " var isSilk = pm.environment.get('HOST') == \"{{SILKRPC}}\";", - " if (!isErigon && !isSilk) // only test erigon", - " return;", - "", - " var testDeprecated = pm.globals.get('TEST_DEPRECATED') === 'true';", - " if (testDeprecated) { // defaults to false, therefore don't test", - " pm.test('DEPRECATED', function() {", - " console.log(\"testDeprecated2: \", testDeprecated)", - " pm.expect(false).to.be(true);", - " })", - " } else {", - " // pass unless user has explicitly told us to fail deprecated", - " var expected = {", - " \"jsonrpc\": \"2.0\",", - " \"id\": \"1\",", - " \"error\": {", - " \"code\": -32000,", - " \"message\": \"the method has been deprecated: \" + methodName", - " }", - " }", - " if (jsonData.error)", - " delete jsonData.error.data;", - " pm.test('DEPRECATED', function() {", - " pm.expect(jsonData).to.deep.equals(expected);", - " })", - " }", - " },", - "", - " cannotTest: function(methodName, jsonData) {", - " var isErigon = pm.environment.get('HOST') == \"{{ERIGON}}\";", - " var isSilk = pm.environment.get('HOST') == \"{{SILKRPC}}\";", - " if (!isErigon && !isSilk) // only test erigon", - " return;", - "", - " var expected = {", - " \"jsonrpc\": \"2.0\",", - " \"id\": \"1\",", - " \"result\": \"Cannot test - value changes\"", - " }", - " pm.test('VALUE CHANGES, CANNOT TEST: ' + methodName, function() {", - " jsonData.result = \"Cannot test - value changes\";", - " pm.expect(jsonData).to.deep.equals(expected);", - " })", - " },", - "};" - ] - } - }, - { - "listen": "test", - "script": { - "id": "be6e47aa-dcea-4eaf-941f-889669172f43", - "type": "text/javascript", - "exec": [ - "pm.test('Base tests', function() {", - " const jsonData = pm.response.json();", - " pm.response.to.have.status(200);", - " pm.expect(jsonData !== null)", - " jsonData.errors == null || pm.expect(jsonData.errors).to.be.empty;", - "})", - "" - ] - } - } - ], - "protocolProfileBehavior": {} -} diff --git a/cmd/rpcdaemon22/postman/Trace_Testing.json b/cmd/rpcdaemon22/postman/Trace_Testing.json deleted file mode 100644 index 74e7221beb9..00000000000 --- a/cmd/rpcdaemon22/postman/Trace_Testing.json +++ /dev/null @@ -1,7474 +0,0 @@ -{ - "info": { - "_postman_id": "7b2a3a4b-0c75-4b99-8e8b-4237bcbd2494", - "name": "Trace Testing", - "description": "Tests related to tracing", - "schema": "https://schema.getpostman.com/json/collection/v2.1.0/collection.json" - }, - "item": [ - { - "name": "trace_call", - "item": [ - { - "name": "trace_call - all", - "event": [ - { - "listen": "test", - "script": { - "id": "cad5e0e8-19aa-4c85-b322-fe4e9e40f0f7", - "exec": [ - "var expected = {", - " \"jsonrpc\": \"2.0\",", - " \"result\": {", - " \"output\": \"0x\",", - " \"stateDiff\": {", - " \"0x407d73d8a49eeb85d32cf465507dd71d507100c1\": {", - " \"balance\": {", - " \"+\": \"0x0\"", - " },", - " \"code\": {", - " \"+\": \"0x\"", - " },", - " \"nonce\": {", - " \"+\": \"0x1\"", - " },", - " \"storage\": {}", - " },", - " \"0xa94f5374fce5edbc8e2a8697c15331677e6ebf0b\": {", - " \"balance\": {", - " \"+\": \"0x186a0\"", - " },", - " \"code\": {", - " \"+\": \"0x\"", - " },", - " \"nonce\": {", - " \"+\": \"0x0\"", - " },", - " \"storage\": {}", - " }", - " },", - " \"trace\": [", - " {", - " \"action\": {", - " \"callType\": \"call\",", - " \"from\": \"0x407d73d8a49eeb85d32cf465507dd71d507100c1\",", - " \"gas\": \"0x1dcd12f8\",", - " \"input\": \"0x\",", - " \"to\": \"0xa94f5374fce5edbc8e2a8697c15331677e6ebf0b\",", - " \"value\": \"0x186a0\"", - " },", - " \"result\": {", - " \"gasUsed\": \"0x0\",", - " \"output\": \"0x\"", - " },", - " \"subtraces\": 0,", - " \"traceAddress\": [],", - " \"type\": \"call\"", - " }", - " ],", - " \"vmTrace\": {", - " \"code\": \"0x\",", - " \"ops\": []", - " }", - " },", - " \"id\": \"1\"", - "}", - "", - "pm.test('Has correct result', function() {", - " pm.expect(pm.response.json()).to.be.deep.equal(expected);", - "})", - "" - ], - "type": "text/javascript" - } - } - ], - "request": { - "method": "POST", - "header": [ - { - "key": "Content-Type", - "value": "application/json" - } - ], - "body": { - "mode": "raw", - "raw": "{\n \"jsonrpc\":\"2.0\",\n \"method\":\"trace_call\",\n \"params\":[\n {\n \"from\":\"0x407d73d8a49eeb85d32cf465507dd71d507100c1\",\n \"to\":\"0xa94f5374fce5edbc8e2a8697c15331677e6ebf0b\",\n \"value\":\"0x186a0\"\n },\n [\"trace\",\"vmTrace\",\"stateDiff\"],\n \"0x186a0\"\n ],\n \"id\": \"1\"\n}\n", - "options": { - "raw": {} - } - }, - "url": { - "raw": "{{HOST}}", - "host": [ - "{{HOST}}" - ] - } - }, - "response": [] - }, - { - "name": "trace_call - none", - "event": [ - { - "listen": "test", - "script": { - "id": "b5c127ba-f385-4ae5-a779-038281427a49", - "exec": [ - "var expected = {", - " \"jsonrpc\": \"2.0\",", - " \"result\": {", - " \"output\": \"0x\",", - " \"stateDiff\": null,", - " \"trace\": [],", - " \"vmTrace\": null", - " },", - " \"id\": \"1\"", - "}", - "", - "pm.test('Has correct result', function() {", - " pm.expect(pm.response.json()).to.be.deep.equal(expected);", - "})", - "" - ], - "type": "text/javascript" - } - } - ], - "request": { - "method": "POST", - "header": [ - { - "key": "Content-Type", - "value": "application/json" - } - ], - "body": { - "mode": "raw", - "raw": "{\n \"jsonrpc\":\"2.0\",\n \"method\":\"trace_call\",\n \"params\":[\n {\n \"from\":\"0x407d73d8a49eeb85d32cf465507dd71d507100c1\",\n \"to\":\"0xa94f5374fce5edbc8e2a8697c15331677e6ebf0b\",\n \"value\":\"0x186a0\"\n },\n [],\n \"0x186a0\"\n ],\n \"id\": \"1\"\n}\n", - "options": { - "raw": {} - } - }, - "url": { - "raw": "{{HOST}}", - "host": [ - "{{HOST}}" - ] - } - }, - "response": [] - }, - { - "name": "trace_call - trace only", - "event": [ - { - "listen": "test", - "script": { - "id": "719796d3-02f9-499d-b22d-c7f42f9fa80a", - "exec": [ - "var expected = {", - " \"jsonrpc\": \"2.0\",", - " \"result\": {", - " \"output\": \"0x\",", - " \"stateDiff\": null,", - " \"trace\": [", - " {", - " \"action\": {", - " \"callType\": \"call\",", - " \"from\": \"0x407d73d8a49eeb85d32cf465507dd71d507100c1\",", - " \"gas\": \"0x1dcd12f8\",", - " \"input\": \"0x\",", - " \"to\": \"0xa94f5374fce5edbc8e2a8697c15331677e6ebf0b\",", - " \"value\": \"0x186a0\"", - " },", - " \"result\": {", - " \"gasUsed\": \"0x0\",", - " \"output\": \"0x\"", - " },", - " \"subtraces\": 0,", - " \"traceAddress\": [],", - " \"type\": \"call\"", - " }", - " ],", - " \"vmTrace\": null", - " },", - " \"id\": \"1\"", - "}", - "", - "pm.test('Has correct result', function() {", - " pm.expect(pm.response.json()).to.be.deep.equal(expected);", - "})", - "" - ], - "type": "text/javascript" - } - } - ], - "request": { - "method": "POST", - "header": [ - { - "key": "Content-Type", - "value": "application/json" - } - ], - "body": { - "mode": "raw", - "raw": "{\n \"jsonrpc\":\"2.0\",\n \"method\":\"trace_call\",\n \"params\":[\n {\n \"from\":\"0x407d73d8a49eeb85d32cf465507dd71d507100c1\",\n \"to\":\"0xa94f5374fce5edbc8e2a8697c15331677e6ebf0b\",\n \"value\":\"0x186a0\"\n },\n [\"trace\"],\n \"0x186a0\"\n ],\n \"id\": \"1\"\n}\n", - "options": { - "raw": {} - } - }, - "url": { - "raw": "{{HOST}}", - "host": [ - "{{HOST}}" - ] - } - }, - "response": [] - }, - { - "name": "trace_call - vmTrace only", - "event": [ - { - "listen": "test", - "script": { - "id": "bf873e95-ad20-42ef-b5da-71ef503f314c", - "exec": [ - "var expected = {", - " \"jsonrpc\": \"2.0\",", - " \"result\": {", - " \"output\": \"0x\",", - " \"stateDiff\": null,", - " \"trace\": [],", - " \"vmTrace\": {", - " \"code\": \"0x\",", - " \"ops\": []", - " }", - " },", - " \"id\": \"1\"", - "}", - "", - "pm.test('Has correct result', function() {", - " pm.expect(pm.response.json()).to.be.deep.equal(expected);", - "})", - "" - ], - "type": "text/javascript" - } - } - ], - "request": { - "method": "POST", - "header": [ - { - "key": "Content-Type", - "value": "application/json" - } - ], - "body": { - "mode": "raw", - "raw": "{\n \"jsonrpc\":\"2.0\",\n \"method\":\"trace_call\",\n \"params\":[\n {\n \"from\":\"0x407d73d8a49eeb85d32cf465507dd71d507100c1\",\n \"to\":\"0xa94f5374fce5edbc8e2a8697c15331677e6ebf0b\",\n \"value\":\"0x186a0\"\n },\n [\"vmTrace\"],\n \"0x186a0\"\n ],\n \"id\": \"1\"\n}\n", - "options": { - "raw": {} - } - }, - "url": { - "raw": "{{HOST}}", - "host": [ - "{{HOST}}" - ] - } - }, - "response": [] - }, - { - "name": "trace_call - stateDiff only", - "event": [ - { - "listen": "test", - "script": { - "id": "eabf15c8-247b-4bfb-acfd-81c1851fa9d7", - "exec": [ - "var expected = {", - " \"jsonrpc\": \"2.0\",", - " \"result\": {", - " \"output\": \"0x\",", - " \"stateDiff\": {", - " \"0x407d73d8a49eeb85d32cf465507dd71d507100c1\": {", - " \"balance\": {", - " \"+\": \"0x0\"", - " },", - " \"code\": {", - " \"+\": \"0x\"", - " },", - " \"nonce\": {", - " \"+\": \"0x1\"", - " },", - " \"storage\": {}", - " },", - " \"0xa94f5374fce5edbc8e2a8697c15331677e6ebf0b\": {", - " \"balance\": {", - " \"+\": \"0x186a0\"", - " },", - " \"code\": {", - " \"+\": \"0x\"", - " },", - " \"nonce\": {", - " \"+\": \"0x0\"", - " },", - " \"storage\": {}", - " }", - " },", - " \"trace\": [],", - " \"vmTrace\": null", - " },", - " \"id\": \"1\"", - "}", - "", - "pm.test('Has correct result', function() {", - " pm.expect(pm.response.json()).to.be.deep.equal(expected);", - "})", - "" - ], - "type": "text/javascript" - } - } - ], - "request": { - "method": "POST", - "header": [ - { - "key": "Content-Type", - "value": "application/json" - } - ], - "body": { - "mode": "raw", - "raw": "{\n \"jsonrpc\":\"2.0\",\n \"method\":\"trace_call\",\n \"params\":[\n {\n \"from\":\"0x407d73d8a49eeb85d32cf465507dd71d507100c1\",\n \"to\":\"0xa94f5374fce5edbc8e2a8697c15331677e6ebf0b\",\n \"value\":\"0x186a0\"\n },\n [\"stateDiff\"],\n \"0x186a0\"\n ],\n \"id\": \"1\"\n}\n", - "options": { - "raw": {} - } - }, - "url": { - "raw": "{{HOST}}", - "host": [ - "{{HOST}}" - ] - } - }, - "response": [] - } - ], - "protocolProfileBehavior": {} - }, - { - "name": "trace_callMany", - "item": [ - { - "name": "trace_callMany - all", - "event": [ - { - "listen": "test", - "script": { - "id": "7949387e-4c36-4942-a5a7-1759d7c43975", - "exec": [ - "var expected = {", - " \"jsonrpc\": \"2.0\",", - " \"result\": [", - " {", - " \"output\": \"0x\",", - " \"stateDiff\": {", - " \"0x407d73d8a49eeb85d32cf465507dd71d507100c1\": {", - " \"balance\": {", - " \"+\": \"0x0\"", - " },", - " \"code\": {", - " \"+\": \"0x\"", - " },", - " \"nonce\": {", - " \"+\": \"0x1\"", - " },", - " \"storage\": {}", - " },", - " \"0xa94f5374fce5edbc8e2a8697c15331677e6ebf0b\": {", - " \"balance\": {", - " \"+\": \"0x186a0\"", - " },", - " \"code\": {", - " \"+\": \"0x\"", - " },", - " \"nonce\": {", - " \"+\": \"0x0\"", - " },", - " \"storage\": {}", - " }", - " },", - " \"trace\": [", - " {", - " \"action\": {", - " \"callType\": \"call\",", - " \"from\": \"0x407d73d8a49eeb85d32cf465507dd71d507100c1\",", - " \"gas\": \"0x1dcd12f8\",", - " \"input\": \"0x\",", - " \"to\": \"0xa94f5374fce5edbc8e2a8697c15331677e6ebf0b\",", - " \"value\": \"0x186a0\"", - " },", - " \"result\": {", - " \"gasUsed\": \"0x0\",", - " \"output\": \"0x\"", - " },", - " \"subtraces\": 0,", - " \"traceAddress\": [],", - " \"type\": \"call\"", - " }", - " ],", - " \"vmTrace\": {", - " \"code\": \"0x\",", - " \"ops\": []", - " }", - " },", - " {", - " \"output\": \"0x\",", - " \"stateDiff\": {", - " \"0x407d73d8a49eeb85d32cf465507dd71d507100c1\": {", - " \"balance\": \"=\",", - " \"code\": \"=\",", - " \"nonce\": {", - " \"*\": {", - " \"from\": \"0x1\",", - " \"to\": \"0x2\"", - " }", - " },", - " \"storage\": {}", - " },", - " \"0xa94f5374fce5edbc8e2a8697c15331677e6ebf0b\": {", - " \"balance\": {", - " \"*\": {", - " \"from\": \"0x186a0\",", - " \"to\": \"0x30d40\"", - " }", - " },", - " \"code\": \"=\",", - " \"nonce\": \"=\",", - " \"storage\": {}", - " }", - " },", - " \"trace\": [", - " {", - " \"action\": {", - " \"callType\": \"call\",", - " \"from\": \"0x407d73d8a49eeb85d32cf465507dd71d507100c1\",", - " \"gas\": \"0x1dcd12f8\",", - " \"input\": \"0x\",", - " \"to\": \"0xa94f5374fce5edbc8e2a8697c15331677e6ebf0b\",", - " \"value\": \"0x186a0\"", - " },", - " \"result\": {", - " \"gasUsed\": \"0x0\",", - " \"output\": \"0x\"", - " },", - " \"subtraces\": 0,", - " \"traceAddress\": [],", - " \"type\": \"call\"", - " }", - " ],", - " \"vmTrace\": {", - " \"code\": \"0x\",", - " \"ops\": []", - " }", - " }", - " ],", - " \"id\": \"1\"", - "}", - "", - "pm.test('Has correct result', function() {", - " pm.expect(pm.response.json()).to.be.deep.equal(expected);", - "})", - "" - ], - "type": "text/javascript" - } - } - ], - "request": { - "method": "POST", - "header": [ - { - "key": "Content-Type", - "value": "application/json" - } - ], - "body": { - "mode": "raw", - "raw": "{\n\t\"jsonrpc\":\"2.0\",\n\t\"method\":\"trace_callMany\",\n \"params\": [\n [\n [\n {\n \"from\":\"0x407d73d8a49eeb85d32cf465507dd71d507100c1\",\n \"to\":\"0xa94f5374fce5edbc8e2a8697c15331677e6ebf0b\",\n \"value\":\"0x186a0\"\n },\n [\"trace\",\"vmTrace\",\"stateDiff\"]\n ],\n [\n {\n \"from\":\"0x407d73d8a49eeb85d32cf465507dd71d507100c1\",\n \"to\":\"0xa94f5374fce5edbc8e2a8697c15331677e6ebf0b\",\n \"value\":\"0x186a0\"\n },\n [\"trace\",\"vmTrace\",\"stateDiff\"]\n ]\n ],\n \"0x186a0\"\n ],\n\t\"id\":\"1\"\n}\n", - "options": { - "raw": {} - } - }, - "url": { - "raw": "{{HOST}}", - "host": [ - "{{HOST}}" - ] - } - }, - "response": [] - }, - { - "name": "trace_callMany - none", - "event": [ - { - "listen": "test", - "script": { - "id": "1670fbac-fbed-4c5d-9e4f-cacf151fab86", - "exec": [ - "var expected = {", - " \"jsonrpc\": \"2.0\",", - " \"result\": [", - " {", - " \"output\": \"0x\",", - " \"stateDiff\": null,", - " \"trace\": [],", - " \"vmTrace\": null", - " },", - " {", - " \"output\": \"0x\",", - " \"stateDiff\": null,", - " \"trace\": [],", - " \"vmTrace\": null", - " }", - " ],", - " \"id\": \"1\"", - "}", - "", - "pm.test('Has correct result', function() {", - " pm.expect(pm.response.json()).to.be.deep.equal(expected);", - "})", - "" - ], - "type": "text/javascript" - } - } - ], - "request": { - "method": "POST", - "header": [ - { - "key": "Content-Type", - "value": "application/json" - } - ], - "body": { - "mode": "raw", - "raw": "{\n\t\"jsonrpc\":\"2.0\",\n\t\"method\":\"trace_callMany\",\n \"params\": [\n [\n [\n {\n \"from\":\"0x407d73d8a49eeb85d32cf465507dd71d507100c1\",\n \"to\":\"0xa94f5374fce5edbc8e2a8697c15331677e6ebf0b\",\n \"value\":\"0x186a0\"\n },\n []\n ],\n [\n {\n \"from\":\"0x407d73d8a49eeb85d32cf465507dd71d507100c1\",\n \"to\":\"0xa94f5374fce5edbc8e2a8697c15331677e6ebf0b\",\n \"value\":\"0x186a0\"\n },\n []\n ]\n ],\n \"0x186a0\"\n ],\n\t\"id\":\"1\"\n}\n", - "options": { - "raw": {} - } - }, - "url": { - "raw": "{{HOST}}", - "host": [ - "{{HOST}}" - ] - } - }, - "response": [] - }, - { - "name": "trace_callMany - trace only", - "event": [ - { - "listen": "test", - "script": { - "id": "1e12f8e6-f089-458c-9b8f-9cc17d1f2828", - "exec": [ - "var expected = {", - " \"jsonrpc\": \"2.0\",", - " \"result\": [", - " {", - " \"output\": \"0x\",", - " \"stateDiff\": null,", - " \"trace\": [", - " {", - " \"action\": {", - " \"callType\": \"call\",", - " \"from\": \"0x407d73d8a49eeb85d32cf465507dd71d507100c1\",", - " \"gas\": \"0x1dcd12f8\",", - " \"input\": \"0x\",", - " \"to\": \"0xa94f5374fce5edbc8e2a8697c15331677e6ebf0b\",", - " \"value\": \"0x186a0\"", - " },", - " \"result\": {", - " \"gasUsed\": \"0x0\",", - " \"output\": \"0x\"", - " },", - " \"subtraces\": 0,", - " \"traceAddress\": [],", - " \"type\": \"call\"", - " }", - " ],", - " \"vmTrace\": null", - " },", - " {", - " \"output\": \"0x\",", - " \"stateDiff\": null,", - " \"trace\": [", - " {", - " \"action\": {", - " \"callType\": \"call\",", - " \"from\": \"0x407d73d8a49eeb85d32cf465507dd71d507100c1\",", - " \"gas\": \"0x1dcd12f8\",", - " \"input\": \"0x\",", - " \"to\": \"0xa94f5374fce5edbc8e2a8697c15331677e6ebf0b\",", - " \"value\": \"0x186a0\"", - " },", - " \"result\": {", - " \"gasUsed\": \"0x0\",", - " \"output\": \"0x\"", - " },", - " \"subtraces\": 0,", - " \"traceAddress\": [],", - " \"type\": \"call\"", - " }", - " ],", - " \"vmTrace\": null", - " }", - " ],", - " \"id\": \"1\"", - "}", - "", - "pm.test('Has correct result', function() {", - " pm.expect(pm.response.json()).to.be.deep.equal(expected);", - "})", - "" - ], - "type": "text/javascript" - } - } - ], - "request": { - "method": "POST", - "header": [ - { - "key": "Content-Type", - "value": "application/json" - } - ], - "body": { - "mode": "raw", - "raw": "{\n\t\"jsonrpc\":\"2.0\",\n\t\"method\":\"trace_callMany\",\n \"params\": [\n [\n [\n {\n \"from\":\"0x407d73d8a49eeb85d32cf465507dd71d507100c1\",\n \"to\":\"0xa94f5374fce5edbc8e2a8697c15331677e6ebf0b\",\n \"value\":\"0x186a0\"\n },\n [\"trace\"]\n ],\n [\n {\n \"from\":\"0x407d73d8a49eeb85d32cf465507dd71d507100c1\",\n \"to\":\"0xa94f5374fce5edbc8e2a8697c15331677e6ebf0b\",\n \"value\":\"0x186a0\"\n },\n [\"trace\"]\n ]\n ],\n \"0x186a0\"\n ],\n\t\"id\":\"1\"\n}\n", - "options": { - "raw": {} - } - }, - "url": { - "raw": "{{HOST}}", - "host": [ - "{{HOST}}" - ] - } - }, - "response": [] - }, - { - "name": "trace_callMany - vmTrace only", - "event": [ - { - "listen": "test", - "script": { - "id": "27cc3046-2d7c-4b7a-ae7d-f1b11008fc4c", - "exec": [ - "var expected = {", - " \"jsonrpc\": \"2.0\",", - " \"result\": [", - " {", - " \"output\": \"0x\",", - " \"stateDiff\": null,", - " \"trace\": [],", - " \"vmTrace\": {", - " \"code\": \"0x\",", - " \"ops\": []", - " }", - " },", - " {", - " \"output\": \"0x\",", - " \"stateDiff\": null,", - " \"trace\": [],", - " \"vmTrace\": {", - " \"code\": \"0x\",", - " \"ops\": []", - " }", - " }", - " ],", - " \"id\": \"1\"", - "}", - "", - "pm.test('Has correct result', function() {", - " pm.expect(pm.response.json()).to.be.deep.equal(expected);", - "})", - "" - ], - "type": "text/javascript" - } - } - ], - "request": { - "method": "POST", - "header": [ - { - "key": "Content-Type", - "value": "application/json" - } - ], - "body": { - "mode": "raw", - "raw": "{\n\t\"jsonrpc\":\"2.0\",\n\t\"method\":\"trace_callMany\",\n \"params\": [\n [\n [\n {\n \"from\":\"0x407d73d8a49eeb85d32cf465507dd71d507100c1\",\n \"to\":\"0xa94f5374fce5edbc8e2a8697c15331677e6ebf0b\",\n \"value\":\"0x186a0\"\n },\n [\"vmTrace\"]\n ],\n [\n {\n \"from\":\"0x407d73d8a49eeb85d32cf465507dd71d507100c1\",\n \"to\":\"0xa94f5374fce5edbc8e2a8697c15331677e6ebf0b\",\n \"value\":\"0x186a0\"\n },\n [\"vmTrace\"]\n ]\n ],\n \"0x186a0\"\n ],\n\t\"id\":\"1\"\n}\n", - "options": { - "raw": {} - } - }, - "url": { - "raw": "{{HOST}}", - "host": [ - "{{HOST}}" - ] - } - }, - "response": [] - }, - { - "name": "trace_callMany - stateDiff only", - "event": [ - { - "listen": "test", - "script": { - "id": "d944881a-3184-4b85-a047-a1ce1ec115cd", - "exec": [ - "var expected = {", - " \"jsonrpc\": \"2.0\",", - " \"result\": [", - " {", - " \"output\": \"0x\",", - " \"stateDiff\": {", - " \"0x407d73d8a49eeb85d32cf465507dd71d507100c1\": {", - " \"balance\": {", - " \"+\": \"0x0\"", - " },", - " \"code\": {", - " \"+\": \"0x\"", - " },", - " \"nonce\": {", - " \"+\": \"0x1\"", - " },", - " \"storage\": {}", - " },", - " \"0xa94f5374fce5edbc8e2a8697c15331677e6ebf0b\": {", - " \"balance\": {", - " \"+\": \"0x186a0\"", - " },", - " \"code\": {", - " \"+\": \"0x\"", - " },", - " \"nonce\": {", - " \"+\": \"0x0\"", - " },", - " \"storage\": {}", - " }", - " },", - " \"trace\": [],", - " \"vmTrace\": null", - " },", - " {", - " \"output\": \"0x\",", - " \"stateDiff\": {", - " \"0x407d73d8a49eeb85d32cf465507dd71d507100c1\": {", - " \"balance\": \"=\",", - " \"code\": \"=\",", - " \"nonce\": {", - " \"*\": {", - " \"from\": \"0x1\",", - " \"to\": \"0x2\"", - " }", - " },", - " \"storage\": {}", - " },", - " \"0xa94f5374fce5edbc8e2a8697c15331677e6ebf0b\": {", - " \"balance\": {", - " \"*\": {", - " \"from\": \"0x186a0\",", - " \"to\": \"0x30d40\"", - " }", - " },", - " \"code\": \"=\",", - " \"nonce\": \"=\",", - " \"storage\": {}", - " }", - " },", - " \"trace\": [],", - " \"vmTrace\": null", - " }", - " ],", - " \"id\": \"1\"", - "}", - "", - "pm.test('Has correct result', function() {", - " pm.expect(pm.response.json()).to.be.deep.equal(expected);", - "})", - "" - ], - "type": "text/javascript" - } - } - ], - "request": { - "method": "POST", - "header": [ - { - "key": "Content-Type", - "value": "application/json" - } - ], - "body": { - "mode": "raw", - "raw": "{\n\t\"jsonrpc\":\"2.0\",\n\t\"method\":\"trace_callMany\",\n \"params\": [\n [\n [\n {\n \"from\":\"0x407d73d8a49eeb85d32cf465507dd71d507100c1\",\n \"to\":\"0xa94f5374fce5edbc8e2a8697c15331677e6ebf0b\",\n \"value\":\"0x186a0\"\n },\n [\"stateDiff\"]\n ],\n [\n {\n \"from\":\"0x407d73d8a49eeb85d32cf465507dd71d507100c1\",\n \"to\":\"0xa94f5374fce5edbc8e2a8697c15331677e6ebf0b\",\n \"value\":\"0x186a0\"\n },\n [\"stateDiff\"]\n ]\n ],\n \"0x1e8480\"\n ],\n\t\"id\":\"1\"\n}\n", - "options": { - "raw": {} - } - }, - "url": { - "raw": "{{HOST}}", - "host": [ - "{{HOST}}" - ] - } - }, - "response": [] - } - ], - "protocolProfileBehavior": {} - }, - { - "name": "trace_replayTransaction", - "item": [ - { - "name": "trace_replayTransaction - all", - "event": [ - { - "listen": "test", - "script": { - "id": "0df08365-de62-444f-a2a8-1585c6b2d9b1", - "exec": [ - "var expected = {", - " \"jsonrpc\": \"2.0\",", - " \"result\": {", - " \"output\": \"0x\",", - " \"stateDiff\": {", - " \"0x00a63d34051602b2cb268ea344d4b8bc4767f2d4\": {", - " \"balance\": {", - " \"*\": {", - " \"from\": \"0x52b7cb1385ccf49b2b\",", - " \"to\": \"0x5236bafcfeb4e73b2b\"", - " }", - " },", - " \"code\": \"=\",", - " \"nonce\": {", - " \"*\": {", - " \"from\": \"0xc6f\",", - " \"to\": \"0xc70\"", - " }", - " },", - " \"storage\": {}", - " },", - " \"0x1a060b0604883a99809eb3f798df71bef6c358f1\": {", - " \"balance\": {", - " \"*\": {", - " \"from\": \"0x6f9b59db405cf2c70\",", - " \"to\": \"0x6f9b71bb0e49d6c70\"", - " }", - " },", - " \"code\": \"=\",", - " \"nonce\": \"=\",", - " \"storage\": {}", - " },", - " \"0x87cc0d78ee64a9f11b5affdd9ea523872eae14e4\": {", - " \"balance\": {", - " \"*\": {", - " \"from\": \"0x3afccb788fcd0e00\",", - " \"to\": \"0xbc0b6402c90c2e00\"", - " }", - " },", - " \"code\": \"=\",", - " \"nonce\": \"=\",", - " \"storage\": {}", - " }", - " },", - " \"trace\": [", - " {", - " \"action\": {", - " \"callType\": \"call\",", - " \"from\": \"0x00a63d34051602b2cb268ea344d4b8bc4767f2d4\",", - " \"gas\": \"0x0\",", - " \"input\": \"0x\",", - " \"to\": \"0x87cc0d78ee64a9f11b5affdd9ea523872eae14e4\",", - " \"value\": \"0x810e988a393f2000\"", - " },", - " \"result\": {", - " \"gasUsed\": \"0x0\",", - " \"output\": \"0x\"", - " },", - " \"subtraces\": 0,", - " \"traceAddress\": [],", - " \"type\": \"call\"", - " }", - " ],", - " \"vmTrace\": {", - " \"code\": \"0x\",", - " \"ops\": []", - " }", - " },", - " \"id\": \"1\"", - "}", - "", - "pm.test('Has correct result', function() {", - " pm.expect(pm.response.json()).to.be.deep.equal(expected);", - "})", - "" - ], - "type": "text/javascript" - } - } - ], - "request": { - "method": "POST", - "header": [ - { - "key": "Content-Type", - "value": "application/json" - } - ], - "body": { - "mode": "raw", - "raw": "{\n\t\"jsonrpc\":\"2.0\",\n\t\"method\":\"trace_replayTransaction\",\n\t\"params\":[\n \"0x02d4a872e096445e80d05276ee756cefef7f3b376bcec14246469c0cd97dad8f\",\n [\"trace\",\"vmTrace\",\"stateDiff\"]\n ],\n\t\"id\":\"1\"\n}", - "options": { - "raw": {} - } - }, - "url": { - "raw": "{{HOST}}", - "host": [ - "{{HOST}}" - ] - } - }, - "response": [] - }, - { - "name": "trace_replayTransaction - none", - "event": [ - { - "listen": "test", - "script": { - "id": "5f84df2a-b6ab-45ca-9b18-f4f9534c1458", - "exec": [ - "var expected = {", - " \"jsonrpc\": \"2.0\",", - " \"result\": {", - " \"output\": \"0x\",", - " \"stateDiff\": null,", - " \"trace\": [],", - " \"vmTrace\": null", - " },", - " \"id\": \"1\"", - "}", - "", - "pm.test('Has correct result', function() {", - " pm.expect(pm.response.json()).to.be.deep.equal(expected);", - "})", - "" - ], - "type": "text/javascript" - } - } - ], - "request": { - "method": "POST", - "header": [ - { - "key": "Content-Type", - "value": "application/json" - } - ], - "body": { - "mode": "raw", - "raw": "{\n\t\"jsonrpc\":\"2.0\",\n\t\"method\":\"trace_replayTransaction\",\n\t\"params\":[\n \"0x02d4a872e096445e80d05276ee756cefef7f3b376bcec14246469c0cd97dad8f\",\n []\n ],\n\t\"id\":\"1\"\n}", - "options": { - "raw": {} - } - }, - "url": { - "raw": "{{HOST}}", - "host": [ - "{{HOST}}" - ] - } - }, - "response": [] - }, - { - "name": "trace_replayTransaction - trace only", - "event": [ - { - "listen": "test", - "script": { - "id": "33278201-153b-490c-acb2-07b233baac25", - "exec": [ - "var expected = {", - " \"jsonrpc\": \"2.0\",", - " \"result\": {", - " \"output\": \"0x\",", - " \"stateDiff\": null,", - " \"trace\": [", - " {", - " \"action\": {", - " \"callType\": \"call\",", - " \"from\": \"0x00a63d34051602b2cb268ea344d4b8bc4767f2d4\",", - " \"gas\": \"0x0\",", - " \"input\": \"0x\",", - " \"to\": \"0x87cc0d78ee64a9f11b5affdd9ea523872eae14e4\",", - " \"value\": \"0x810e988a393f2000\"", - " },", - " \"result\": {", - " \"gasUsed\": \"0x0\",", - " \"output\": \"0x\"", - " },", - " \"subtraces\": 0,", - " \"traceAddress\": [],", - " \"type\": \"call\"", - " }", - " ],", - " \"vmTrace\": null", - " },", - " \"id\": \"1\"", - "}", - "", - "pm.test('Has correct result', function() {", - " pm.expect(pm.response.json()).to.be.deep.equal(expected);", - "})", - "" - ], - "type": "text/javascript" - } - } - ], - "request": { - "method": "POST", - "header": [ - { - "key": "Content-Type", - "value": "application/json" - } - ], - "body": { - "mode": "raw", - "raw": "{\n\t\"jsonrpc\":\"2.0\",\n\t\"method\":\"trace_replayTransaction\",\n\t\"params\":[\n \"0x02d4a872e096445e80d05276ee756cefef7f3b376bcec14246469c0cd97dad8f\",\n [\"trace\"]\n ],\n\t\"id\":\"1\"\n}", - "options": { - "raw": {} - } - }, - "url": { - "raw": "{{HOST}}", - "host": [ - "{{HOST}}" - ] - } - }, - "response": [] - }, - { - "name": "trace_replayTransaction - vmTrace only", - "event": [ - { - "listen": "test", - "script": { - "id": "a84b1a91-ee56-46ee-b4fa-231f7aad455e", - "exec": [ - "var expected = {", - " \"jsonrpc\": \"2.0\",", - " \"result\": {", - " \"output\": \"0x\",", - " \"stateDiff\": null,", - " \"trace\": [],", - " \"vmTrace\": {", - " \"code\": \"0x\",", - " \"ops\": []", - " }", - " },", - " \"id\": \"1\"", - "}", - "", - "pm.test('Has correct result', function() {", - " pm.expect(pm.response.json()).to.be.deep.equal(expected);", - "})", - "" - ], - "type": "text/javascript" - } - } - ], - "request": { - "method": "POST", - "header": [ - { - "key": "Content-Type", - "value": "application/json" - } - ], - "body": { - "mode": "raw", - "raw": "{\n\t\"jsonrpc\":\"2.0\",\n\t\"method\":\"trace_replayTransaction\",\n\t\"params\":[\n \"0x02d4a872e096445e80d05276ee756cefef7f3b376bcec14246469c0cd97dad8f\",\n [\"vmTrace\"]\n ],\n\t\"id\":\"1\"\n}", - "options": { - "raw": {} - } - }, - "url": { - "raw": "{{HOST}}", - "host": [ - "{{HOST}}" - ] - } - }, - "response": [] - }, - { - "name": "trace_replayTransaction - stateDiff only", - "event": [ - { - "listen": "test", - "script": { - "id": "915e86b3-5172-44b3-87d9-9025c074ea5b", - "exec": [ - "var expected = {", - " \"jsonrpc\": \"2.0\",", - " \"result\": {", - " \"output\": \"0x\",", - " \"stateDiff\": {", - " \"0x00a63d34051602b2cb268ea344d4b8bc4767f2d4\": {", - " \"balance\": {", - " \"*\": {", - " \"from\": \"0x52b7cb1385ccf49b2b\",", - " \"to\": \"0x5236bafcfeb4e73b2b\"", - " }", - " },", - " \"code\": \"=\",", - " \"nonce\": {", - " \"*\": {", - " \"from\": \"0xc6f\",", - " \"to\": \"0xc70\"", - " }", - " },", - " \"storage\": {}", - " },", - " \"0x1a060b0604883a99809eb3f798df71bef6c358f1\": {", - " \"balance\": {", - " \"*\": {", - " \"from\": \"0x6f9b59db405cf2c70\",", - " \"to\": \"0x6f9b71bb0e49d6c70\"", - " }", - " },", - " \"code\": \"=\",", - " \"nonce\": \"=\",", - " \"storage\": {}", - " },", - " \"0x87cc0d78ee64a9f11b5affdd9ea523872eae14e4\": {", - " \"balance\": {", - " \"*\": {", - " \"from\": \"0x3afccb788fcd0e00\",", - " \"to\": \"0xbc0b6402c90c2e00\"", - " }", - " },", - " \"code\": \"=\",", - " \"nonce\": \"=\",", - " \"storage\": {}", - " }", - " },", - " \"trace\": [],", - " \"vmTrace\": null", - " },", - " \"id\": \"1\"", - "}", - "", - "pm.test('Has correct result', function() {", - " pm.expect(pm.response.json()).to.be.deep.equal(expected);", - "})", - "" - ], - "type": "text/javascript" - } - } - ], - "request": { - "method": "POST", - "header": [ - { - "key": "Content-Type", - "value": "application/json" - } - ], - "body": { - "mode": "raw", - "raw": "{\n\t\"jsonrpc\":\"2.0\",\n\t\"method\":\"trace_replayTransaction\",\n\t\"params\":[\n \"0x02d4a872e096445e80d05276ee756cefef7f3b376bcec14246469c0cd97dad8f\",\n [\"stateDiff\"]\n ],\n\t\"id\":\"1\"\n}", - "options": { - "raw": {} - } - }, - "url": { - "raw": "{{HOST}}", - "host": [ - "{{HOST}}" - ] - } - }, - "response": [] - } - ], - "protocolProfileBehavior": {} - }, - { - "name": "trace_replayBlockTransactions", - "item": [ - { - "name": "trace_replayBlockTransactions - all", - "event": [ - { - "listen": "test", - "script": { - "id": "38dda4a1-afc6-4d61-ae10-d59496d10eb3", - "exec": [ - "var expected = {", - " \"jsonrpc\": \"2.0\",", - " \"result\": [", - " {", - " \"output\": \"0x\",", - " \"stateDiff\": {", - " \"0x104994f45d9d697ca104e5704a7b77d7fec3537c\": {", - " \"balance\": {", - " \"+\": \"0x821878651a4d70000\"", - " },", - " \"code\": {", - " \"+\": \"0x\"", - " },", - " \"nonce\": {", - " \"+\": \"0x0\"", - " },", - " \"storage\": {}", - " },", - " \"0x32be343b94f860124dc4fee278fdcbd38c102d88\": {", - " \"balance\": {", - " \"*\": {", - " \"from\": \"0x29dd8f1fcd55eef7fe5c\",", - " \"to\": \"0x29d56d960a08fbeb9e5c\"", - " }", - " },", - " \"code\": \"=\",", - " \"nonce\": {", - " \"*\": {", - " \"from\": \"0x1efc5\",", - " \"to\": \"0x1efc6\"", - " }", - " },", - " \"storage\": {}", - " },", - " \"0x61c808d82a3ac53231750dadc13c777b59310bd9\": {", - " \"balance\": {", - " \"*\": {", - " \"from\": \"0x16d21cbe94fc6c3ebf7\",", - " \"to\": \"0x16d21ce264b14f94bf7\"", - " }", - " },", - " \"code\": \"=\",", - " \"nonce\": \"=\",", - " \"storage\": {}", - " }", - " },", - " \"trace\": [", - " {", - " \"action\": {", - " \"callType\": \"call\",", - " \"from\": \"0x32be343b94f860124dc4fee278fdcbd38c102d88\",", - " \"gas\": \"0x4c40d\",", - " \"input\": \"0x\",", - " \"to\": \"0x104994f45d9d697ca104e5704a7b77d7fec3537c\",", - " \"value\": \"0x821878651a4d70000\"", - " },", - " \"result\": {", - " \"gasUsed\": \"0x0\",", - " \"output\": \"0x\"", - " },", - " \"subtraces\": 0,", - " \"traceAddress\": [],", - " \"type\": \"call\"", - " }", - " ],", - " \"transactionHash\": \"0xc55e2b90168af6972193c1f86fa4d7d7b31a29c156665d15b9cd48618b5177ef\",", - " \"vmTrace\": {", - " \"code\": \"0x\",", - " \"ops\": []", - " }", - " }", - " ],", - " \"id\": \"1\"", - "}", - "", - "pm.test('Has correct result', function() {", - " pm.expect(pm.response.json()).to.be.deep.equal(expected);", - "})", - "" - ], - "type": "text/javascript" - } - } - ], - "request": { - "method": "POST", - "header": [ - { - "key": "Content-Type", - "value": "application/json" - } - ], - "body": { - "mode": "raw", - "raw": "{\n\t\"jsonrpc\":\"2.0\",\n\t\"method\":\"trace_replayBlockTransactions\",\n\t\"params\":[\n \"0x1e8480\",\n [\"trace\",\"vmTrace\",\"stateDiff\"]\n ],\n\t\"id\":\"1\"\n}", - "options": { - "raw": {} - } - }, - "url": { - "raw": "{{HOST}}", - "host": [ - "{{HOST}}" - ] - } - }, - "response": [] - }, - { - "name": "trace_replayBlockTransactions - none", - "event": [ - { - "listen": "test", - "script": { - "id": "7ae24d9b-a87c-4e22-a604-9c20e6641ee5", - "exec": [ - "var expected = {", - " \"jsonrpc\": \"2.0\",", - " \"result\": [", - " {", - " \"output\": \"0x\",", - " \"stateDiff\": null,", - " \"trace\": [],", - " \"transactionHash\": \"0xc55e2b90168af6972193c1f86fa4d7d7b31a29c156665d15b9cd48618b5177ef\",", - " \"vmTrace\": null", - " }", - " ],", - " \"id\": \"1\"", - "}", - "", - "pm.test('Has correct result', function() {", - " pm.expect(pm.response.json()).to.be.deep.equal(expected);", - "})", - "" - ], - "type": "text/javascript" - } - } - ], - "request": { - "method": "POST", - "header": [ - { - "key": "Content-Type", - "value": "application/json" - } - ], - "body": { - "mode": "raw", - "raw": "{\n\t\"jsonrpc\":\"2.0\",\n\t\"method\":\"trace_replayBlockTransactions\",\n\t\"params\":[\n \"0x1e8480\",\n []\n ],\n\t\"id\":\"1\"\n}", - "options": { - "raw": {} - } - }, - "url": { - "raw": "{{HOST}}", - "host": [ - "{{HOST}}" - ] - } - }, - "response": [] - }, - { - "name": "trace_replayBlockTransactions - trace only", - "event": [ - { - "listen": "test", - "script": { - "id": "ed4c941b-54ee-487d-8aff-7d0ecb750523", - "exec": [ - "var expected = {", - " \"jsonrpc\": \"2.0\",", - " \"result\": [", - " {", - " \"output\": \"0x\",", - " \"stateDiff\": null,", - " \"trace\": [", - " {", - " \"action\": {", - " \"callType\": \"call\",", - " \"from\": \"0x32be343b94f860124dc4fee278fdcbd38c102d88\",", - " \"gas\": \"0x4c40d\",", - " \"input\": \"0x\",", - " \"to\": \"0x104994f45d9d697ca104e5704a7b77d7fec3537c\",", - " \"value\": \"0x821878651a4d70000\"", - " },", - " \"result\": {", - " \"gasUsed\": \"0x0\",", - " \"output\": \"0x\"", - " },", - " \"subtraces\": 0,", - " \"traceAddress\": [],", - " \"type\": \"call\"", - " }", - " ],", - " \"transactionHash\": \"0xc55e2b90168af6972193c1f86fa4d7d7b31a29c156665d15b9cd48618b5177ef\",", - " \"vmTrace\": null", - " }", - " ],", - " \"id\": \"1\"", - "}", - "", - "pm.test('Has correct result', function() {", - " pm.expect(pm.response.json()).to.be.deep.equal(expected);", - "})", - "" - ], - "type": "text/javascript" - } - } - ], - "request": { - "method": "POST", - "header": [ - { - "key": "Content-Type", - "value": "application/json" - } - ], - "body": { - "mode": "raw", - "raw": "{\n\t\"jsonrpc\":\"2.0\",\n\t\"method\":\"trace_replayBlockTransactions\",\n\t\"params\":[\n \"0x1e8480\",\n [\"trace\"]\n ],\n\t\"id\":\"1\"\n}", - "options": { - "raw": {} - } - }, - "url": { - "raw": "{{HOST}}", - "host": [ - "{{HOST}}" - ] - } - }, - "response": [] - }, - { - "name": "trace_replayBlockTransactions - vmTrace only", - "event": [ - { - "listen": "test", - "script": { - "id": "b3703c29-7bb8-4d08-b757-efbb4de8243d", - "exec": [ - "var expected = {", - " \"jsonrpc\": \"2.0\",", - " \"result\": [", - " {", - " \"output\": \"0x\",", - " \"stateDiff\": null,", - " \"trace\": [],", - " \"transactionHash\": \"0xc55e2b90168af6972193c1f86fa4d7d7b31a29c156665d15b9cd48618b5177ef\",", - " \"vmTrace\": {", - " \"code\": \"0x\",", - " \"ops\": []", - " }", - " }", - " ],", - " \"id\": \"1\"", - "}", - "", - "pm.test('Has correct result', function() {", - " pm.expect(pm.response.json()).to.be.deep.equal(expected);", - "})", - "" - ], - "type": "text/javascript" - } - } - ], - "request": { - "method": "POST", - "header": [ - { - "key": "Content-Type", - "value": "application/json" - } - ], - "body": { - "mode": "raw", - "raw": "{\n\t\"jsonrpc\":\"2.0\",\n\t\"method\":\"trace_replayBlockTransactions\",\n\t\"params\":[\n \"0x1e8480\",\n [\"vmTrace\"]\n ],\n\t\"id\":\"1\"\n}", - "options": { - "raw": {} - } - }, - "url": { - "raw": "{{HOST}}", - "host": [ - "{{HOST}}" - ] - } - }, - "response": [] - }, - { - "name": "trace_replayBlockTransactions - stateDiff only", - "event": [ - { - "listen": "test", - "script": { - "id": "ac907f46-3f6d-436e-b0fb-3ab294e6c33f", - "exec": [ - "utils.cannotTest(\"trace_rawTransaction - all\", pm.response.json())", - "" - ], - "type": "text/javascript" - } - } - ], - "request": { - "method": "POST", - "header": [ - { - "key": "Content-Type", - "value": "application/json" - } - ], - "body": { - "mode": "raw", - "raw": "{\n\t\"jsonrpc\":\"2.0\",\n\t\"method\":\"trace_replayBlockTransactions\",\n\t\"params\":[\n \"0x1e8480\",\n [\"stateDiff\"]\n ],\n\t\"id\":\"1\"\n}", - "options": { - "raw": {} - } - }, - "url": { - "raw": "{{HOST}}", - "host": [ - "{{HOST}}" - ] - } - }, - "response": [] - } - ], - "protocolProfileBehavior": {} - }, - { - "name": "trace_rawTransaction", - "item": [ - { - "name": "trace_rawTransaction - all", - "event": [ - { - "listen": "test", - "script": { - "id": "daca0279-5627-47e0-abb3-b1d0e0e3e1ef", - "exec": [ - "utils.cannotTest(\"trace_rawTransaction - all\", pm.response.json())", - "" - ], - "type": "text/javascript" - } - } - ], - "request": { - "method": "POST", - "header": [ - { - "key": "Content-Type", - "value": "application/json" - } - ], - "body": { - "mode": "raw", - "raw": "{\n\t\"jsonrpc\":\"2.0\",\n\t\"method\":\"trace_rawTransaction\",\n\t\"params\":[\n \"0xf86d09850cf032900f83030d4094109c4f2ccc82c4d77bde15f306707320294aea3f880de0b6b3a7640000801ca02da49aa24d7fa6fa876af59d77acfd60537eba478654934430b1b32893b65c85a02cdc152d81b71f25fd23e3e271c8c0b15a3a91ce104b6af35bd476d1e6d26fdf\",\n [\"trace\",\"vmTrace\",\"stateDiff\"]\n ],\n\t\"id\":\"1\"\n}", - "options": { - "raw": {} - } - }, - "url": { - "raw": "{{HOST}}", - "host": [ - "{{HOST}}" - ] - } - }, - "response": [] - }, - { - "name": "trace_rawTransaction - none", - "event": [ - { - "listen": "test", - "script": { - "id": "7a69f86d-f9db-4377-b650-e4fc0cf09253", - "exec": [ - "var expected = {", - " \"jsonrpc\": \"2.0\",", - " \"result\": {", - " \"output\": \"0x\",", - " \"stateDiff\": null,", - " \"trace\": [],", - " \"vmTrace\": null", - " },", - " \"id\": \"1\"", - "}", - "", - "pm.test('Has correct result', function() {", - " pm.expect(pm.response.json()).to.be.deep.equal(expected);", - "})", - "" - ], - "type": "text/javascript" - } - } - ], - "request": { - "method": "POST", - "header": [ - { - "key": "Content-Type", - "value": "application/json" - } - ], - "body": { - "mode": "raw", - "raw": "{\n\t\"jsonrpc\":\"2.0\",\n\t\"method\":\"trace_rawTransaction\",\n\t\"params\":[\n \"0xf86d09850cf032900f83030d4094109c4f2ccc82c4d77bde15f306707320294aea3f880de0b6b3a7640000801ca02da49aa24d7fa6fa876af59d77acfd60537eba478654934430b1b32893b65c85a02cdc152d81b71f25fd23e3e271c8c0b15a3a91ce104b6af35bd476d1e6d26fdf\",\n []\n ],\n\t\"id\":\"1\"\n}", - "options": { - "raw": {} - } - }, - "url": { - "raw": "{{HOST}}", - "host": [ - "{{HOST}}" - ] - } - }, - "response": [] - }, - { - "name": "trace_rawTransaction - trace only", - "event": [ - { - "listen": "test", - "script": { - "id": "7ddd07c3-edfc-4a21-ba38-308efc7fb782", - "exec": [ - "var expected = {", - " \"jsonrpc\": \"2.0\",", - " \"result\": {", - " \"output\": \"0x\",", - " \"stateDiff\": null,", - " \"trace\": [", - " {", - " \"action\": {", - " \"callType\": \"call\",", - " \"from\": \"0xfd2605a2bf58fdbb90db1da55df61628b47f9e8c\",", - " \"gas\": \"0x2bb38\",", - " \"input\": \"0x\",", - " \"to\": \"0x109c4f2ccc82c4d77bde15f306707320294aea3f\",", - " \"value\": \"0xde0b6b3a7640000\"", - " },", - " \"result\": {", - " \"gasUsed\": \"0x9325\",", - " \"output\": \"0x\"", - " },", - " \"subtraces\": 0,", - " \"traceAddress\": [],", - " \"type\": \"call\"", - " }", - " ],", - " \"vmTrace\": null", - " },", - " \"id\": \"1\"", - "}", - "", - "pm.test('Has correct result', function() {", - " pm.expect(pm.response.json()).to.be.deep.equal(expected);", - "})", - "" - ], - "type": "text/javascript" - } - } - ], - "request": { - "method": "POST", - "header": [ - { - "key": "Content-Type", - "value": "application/json" - } - ], - "body": { - "mode": "raw", - "raw": "{\n\t\"jsonrpc\":\"2.0\",\n\t\"method\":\"trace_rawTransaction\",\n\t\"params\":[\n \"0xf86d09850cf032900f83030d4094109c4f2ccc82c4d77bde15f306707320294aea3f880de0b6b3a7640000801ca02da49aa24d7fa6fa876af59d77acfd60537eba478654934430b1b32893b65c85a02cdc152d81b71f25fd23e3e271c8c0b15a3a91ce104b6af35bd476d1e6d26fdf\",\n [\"trace\"]\n ],\n\t\"id\":\"1\"\n}", - "options": { - "raw": {} - } - }, - "url": { - "raw": "{{HOST}}", - "host": [ - "{{HOST}}" - ] - } - }, - "response": [] - }, - { - "name": "trace_rawTransaction - vmTrace only", - "event": [ - { - "listen": "test", - "script": { - "id": "10371f18-4769-4d59-a46c-03d13a79a3c9", - "exec": [ - "var expected = {", - " \"jsonrpc\": \"2.0\",", - " \"result\": {", - " \"output\": \"0x\",", - " \"stateDiff\": null,", - " \"trace\": [],", - " \"vmTrace\": {", - " \"code\": \"0x6060604052361561001f5760e060020a600035046372ea4b8c811461010c575b61011b3460008080670de0b6b3a764000084106101d557600180548101908190556003805433929081101561000257906000526020600020900160006101000a815481600160a060020a0302191690830217905550670de0b6b3a7640000840393508350670de0b6b3a76400006000600082828250540192505081905550600260016000505411151561011d5760038054829081101561000257906000526020600020900160009054906101000a9004600160a060020a0316600160a060020a03166000600060005054604051809050600060405180830381858888f150505080555060016002556101d5565b60018054016060908152602090f35b005b60018054600354910114156101d55760038054600254600101909102900392505b6003546002549003600119018310156101e357600380548490811015610002579082526040517fc2575a0e9e593c00f959f8c92f12db2869c3395a3b0502d05e2516446f71f85b9190910154600160a060020a03169082906706f05b59d3b200009082818181858883f1505090546706f05b59d3b1ffff1901835550506001929092019161013e565b505060028054600101905550505b600080548501905550505050565b506002548154919250600190810190910460001901905b60035460025490036001190183101561029a576003805484908110156100025760009182526040517fc2575a0e9e593c00f959f8c92f12db2869c3395a3b0502d05e2516446f71f85b9190910154600160a060020a03169190838504600019019082818181858883f1505081548486049003600190810190925550600290830183020460001901841415905061028e576001015b600192909201916101fa565b60038054600254810182018083559190829080158290116101c75760008390526101c7907fc2575a0e9e593c00f959f8c92f12db2869c3395a3b0502d05e2516446f71f85b9081019083015b808211156102fa57600081556001016102e6565b509056\",", - " \"ops\": [", - " {", - " \"cost\": 3,", - " \"ex\": {", - " \"mem\": null,", - " \"push\": [", - " \"0x60\"", - " ],", - " \"store\": null,", - " \"used\": 178997", - " },", - " \"pc\": 0,", - " \"sub\": null", - " },", - " {", - " \"cost\": 3,", - " \"ex\": {", - " \"mem\": null,", - " \"push\": [", - " \"0x40\"", - " ],", - " \"store\": null,", - " \"used\": 178994", - " },", - " \"pc\": 2,", - " \"sub\": null", - " },", - " {", - " \"cost\": 12,", - " \"ex\": {", - " \"mem\": {", - " \"data\": \"0x0000000000000000000000000000000000000000000000000000000000000060\",", - " \"off\": 64", - " },", - " \"push\": [],", - " \"store\": null,", - " \"used\": 178982", - " },", - " \"pc\": 4,", - " \"sub\": null", - " },", - " {", - " \"cost\": 2,", - " \"ex\": {", - " \"mem\": null,", - " \"push\": [", - " \"0x0\"", - " ],", - " \"store\": null,", - " \"used\": 178980", - " },", - " \"pc\": 5,", - " \"sub\": null", - " },", - " {", - " \"cost\": 3,", - " \"ex\": {", - " \"mem\": null,", - " \"push\": [", - " \"0x1\"", - " ],", - " \"store\": null,", - " \"used\": 178977", - " },", - " \"pc\": 6,", - " \"sub\": null", - " },", - " {", - " \"cost\": 3,", - " \"ex\": {", - " \"mem\": null,", - " \"push\": [", - " \"0x1f\"", - " ],", - " \"store\": null,", - " \"used\": 178974", - " },", - " \"pc\": 7,", - " \"sub\": null", - " },", - " {", - " \"cost\": 10,", - " \"ex\": {", - " \"mem\": null,", - " \"push\": [],", - " \"store\": null,", - " \"used\": 178964", - " },", - " \"pc\": 10,", - " \"sub\": null", - " },", - " {", - " \"cost\": 1,", - " \"ex\": {", - " \"mem\": null,", - " \"push\": [],", - " \"store\": null,", - " \"used\": 178963", - " },", - " \"pc\": 31,", - " \"sub\": null", - " },", - " {", - " \"cost\": 3,", - " \"ex\": {", - " \"mem\": null,", - " \"push\": [", - " \"0x11b\"", - " ],", - " \"store\": null,", - " \"used\": 178960", - " },", - " \"pc\": 32,", - " \"sub\": null", - " },", - " {", - " \"cost\": 2,", - " \"ex\": {", - " \"mem\": null,", - " \"push\": [", - " \"0xde0b6b3a7640000\"", - " ],", - " \"store\": null,", - " \"used\": 178958", - " },", - " \"pc\": 35,", - " \"sub\": null", - " },", - " {", - " \"cost\": 3,", - " \"ex\": {", - " \"mem\": null,", - " \"push\": [", - " \"0x0\"", - " ],", - " \"store\": null,", - " \"used\": 178955", - " },", - " \"pc\": 36,", - " \"sub\": null", - " },", - " {", - " \"cost\": 3,", - " \"ex\": {", - " \"mem\": null,", - " \"push\": [", - " \"0x0\",", - " \"0x0\"", - " ],", - " \"store\": null,", - " \"used\": 178952", - " },", - " \"pc\": 38,", - " \"sub\": null", - " },", - " {", - " \"cost\": 3,", - " \"ex\": {", - " \"mem\": null,", - " \"push\": [", - " \"0x0\",", - " \"0x0\"", - " ],", - " \"store\": null,", - " \"used\": 178949", - " },", - " \"pc\": 39,", - " \"sub\": null", - " },", - " {", - " \"cost\": 3,", - " \"ex\": {", - " \"mem\": null,", - " \"push\": [", - " \"0xde0b6b3a7640000\"", - " ],", - " \"store\": null,", - " \"used\": 178946", - " },", - " \"pc\": 40,", - " \"sub\": null", - " },", - " {", - " \"cost\": 3,", - " \"ex\": {", - " \"mem\": null,", - " \"push\": [", - " \"0xde0b6b3a7640000\",", - " \"0x0\",", - " \"0x0\",", - " \"0x0\",", - " \"0xde0b6b3a7640000\",", - " \"0xde0b6b3a7640000\"", - " ],", - " \"store\": null,", - " \"used\": 178943", - " },", - " \"pc\": 49,", - " \"sub\": null", - " },", - " {", - " \"cost\": 3,", - " \"ex\": {", - " \"mem\": null,", - " \"push\": [", - " \"0x0\"", - " ],", - " \"store\": null,", - " \"used\": 178940", - " },", - " \"pc\": 50,", - " \"sub\": null", - " },", - " {", - " \"cost\": 3,", - " \"ex\": {", - " \"mem\": null,", - " \"push\": [", - " \"0x1d5\"", - " ],", - " \"store\": null,", - " \"used\": 178937", - " },", - " \"pc\": 51,", - " \"sub\": null", - " },", - " {", - " \"cost\": 10,", - " \"ex\": {", - " \"mem\": null,", - " \"push\": [],", - " \"store\": null,", - " \"used\": 178927", - " },", - " \"pc\": 54,", - " \"sub\": null", - " },", - " {", - " \"cost\": 3,", - " \"ex\": {", - " \"mem\": null,", - " \"push\": [", - " \"0x1\"", - " ],", - " \"store\": null,", - " \"used\": 178924", - " },", - " \"pc\": 55,", - " \"sub\": null", - " },", - " {", - " \"cost\": 3,", - " \"ex\": {", - " \"mem\": null,", - " \"push\": [", - " \"0x1\",", - " \"0x1\"", - " ],", - " \"store\": null,", - " \"used\": 178921", - " },", - " \"pc\": 57,", - " \"sub\": null", - " },", - " {", - " \"cost\": 800,", - " \"ex\": {", - " \"mem\": null,", - " \"push\": [", - " \"0x56\"", - " ],", - " \"store\": null,", - " \"used\": 178121", - " },", - " \"pc\": 58,", - " \"sub\": null", - " },", - " {", - " \"cost\": 3,", - " \"ex\": {", - " \"mem\": null,", - " \"push\": [", - " \"0x1\",", - " \"0x56\",", - " \"0x1\"", - " ],", - " \"store\": null,", - " \"used\": 178118", - " },", - " \"pc\": 59,", - " \"sub\": null", - " },", - " {", - " \"cost\": 3,", - " \"ex\": {", - " \"mem\": null,", - " \"push\": [", - " \"0x57\"", - " ],", - " \"store\": null,", - " \"used\": 178115", - " },", - " \"pc\": 60,", - " \"sub\": null", - " },", - " {", - " \"cost\": 3,", - " \"ex\": {", - " \"mem\": null,", - " \"push\": [", - " \"0x57\",", - " \"0x1\"", - " ],", - " \"store\": null,", - " \"used\": 178112", - " },", - " \"pc\": 61,", - " \"sub\": null", - " },", - " {", - " \"cost\": 3,", - " \"ex\": {", - " \"mem\": null,", - " \"push\": [", - " \"0x57\",", - " \"0x1\",", - " \"0x57\"", - " ],", - " \"store\": null,", - " \"used\": 178109", - " },", - " \"pc\": 62,", - " \"sub\": null", - " },", - " {", - " \"cost\": 3,", - " \"ex\": {", - " \"mem\": null,", - " \"push\": [", - " \"0x57\",", - " \"0x1\"", - " ],", - " \"store\": null,", - " \"used\": 178106", - " },", - " \"pc\": 63,", - " \"sub\": null", - " },", - " {", - " \"cost\": 5000,", - " \"ex\": {", - " \"mem\": null,", - " \"push\": [],", - " \"store\": {", - " \"key\": \"0x1\",", - " \"val\": \"0x57\"", - " },", - " \"used\": 173106", - " },", - " \"pc\": 64,", - " \"sub\": null", - " },", - " {", - " \"cost\": 3,", - " \"ex\": {", - " \"mem\": null,", - " \"push\": [", - " \"0x3\"", - " ],", - " \"store\": null,", - " \"used\": 173103", - " },", - " \"pc\": 65,", - " \"sub\": null", - " },", - " {", - " \"cost\": 3,", - " \"ex\": {", - " \"mem\": null,", - " \"push\": [", - " \"0x3\",", - " \"0x3\"", - " ],", - " \"store\": null,", - " \"used\": 173100", - " },", - " \"pc\": 67,", - " \"sub\": null", - " },", - " {", - " \"cost\": 800,", - " \"ex\": {", - " \"mem\": null,", - " \"push\": [", - " \"0x5b\"", - " ],", - " \"store\": null,", - " \"used\": 172300", - " },", - " \"pc\": 68,", - " \"sub\": null", - " },", - " {", - " \"cost\": 2,", - " \"ex\": {", - " \"mem\": null,", - " \"push\": [", - " \"0xfd2605a2bf58fdbb90db1da55df61628b47f9e8c\"", - " ],", - " \"store\": null,", - " \"used\": 172298", - " },", - " \"pc\": 69,", - " \"sub\": null", - " },", - " {", - " \"cost\": 3,", - " \"ex\": {", - " \"mem\": null,", - " \"push\": [", - " \"0xfd2605a2bf58fdbb90db1da55df61628b47f9e8c\",", - " \"0x3\",", - " \"0x5b\",", - " \"0x57\"", - " ],", - " \"store\": null,", - " \"used\": 172295", - " },", - " \"pc\": 70,", - " \"sub\": null", - " },", - " {", - " \"cost\": 3,", - " \"ex\": {", - " \"mem\": null,", - " \"push\": [", - " \"0x57\",", - " \"0x5b\"", - " ],", - " \"store\": null,", - " \"used\": 172292", - " },", - " \"pc\": 71,", - " \"sub\": null", - " },", - " {", - " \"cost\": 3,", - " \"ex\": {", - " \"mem\": null,", - " \"push\": [", - " \"0x57\",", - " \"0x5b\",", - " \"0x57\"", - " ],", - " \"store\": null,", - " \"used\": 172289", - " },", - " \"pc\": 72,", - " \"sub\": null", - " },", - " {", - " \"cost\": 3,", - " \"ex\": {", - " \"mem\": null,", - " \"push\": [", - " \"0x1\"", - " ],", - " \"store\": null,", - " \"used\": 172286", - " },", - " \"pc\": 73,", - " \"sub\": null", - " },", - " {", - " \"cost\": 3,", - " \"ex\": {", - " \"mem\": null,", - " \"push\": [", - " \"0x0\"", - " ],", - " \"store\": null,", - " \"used\": 172283", - " },", - " \"pc\": 74,", - " \"sub\": null", - " },", - " {", - " \"cost\": 3,", - " \"ex\": {", - " \"mem\": null,", - " \"push\": [", - " \"0x2\"", - " ],", - " \"store\": null,", - " \"used\": 172280", - " },", - " \"pc\": 75,", - " \"sub\": null", - " },", - " {", - " \"cost\": 10,", - " \"ex\": {", - " \"mem\": null,", - " \"push\": [],", - " \"store\": null,", - " \"used\": 172270", - " },", - " \"pc\": 78,", - " \"sub\": null", - " },", - " {", - " \"cost\": 3,", - " \"ex\": {", - " \"mem\": null,", - " \"push\": [", - " \"0x57\",", - " \"0x3\"", - " ],", - " \"store\": null,", - " \"used\": 172267", - " },", - " \"pc\": 79,", - " \"sub\": null", - " },", - " {", - " \"cost\": 3,", - " \"ex\": {", - " \"mem\": null,", - " \"push\": [", - " \"0x0\"", - " ],", - " \"store\": null,", - " \"used\": 172264", - " },", - " \"pc\": 80,", - " \"sub\": null", - " },", - " {", - " \"cost\": 3,", - " \"ex\": {", - " \"mem\": {", - " \"data\": \"0x0000000000000000000000000000000000000000000000000000000000000003\",", - " \"off\": 0", - " },", - " \"push\": [],", - " \"store\": null,", - " \"used\": 172261", - " },", - " \"pc\": 82,", - " \"sub\": null", - " },", - " {", - " \"cost\": 3,", - " \"ex\": {", - " \"mem\": null,", - " \"push\": [", - " \"0x20\"", - " ],", - " \"store\": null,", - " \"used\": 172258", - " },", - " \"pc\": 83,", - " \"sub\": null", - " },", - " {", - " \"cost\": 3,", - " \"ex\": {", - " \"mem\": null,", - " \"push\": [", - " \"0x0\"", - " ],", - " \"store\": null,", - " \"used\": 172255", - " },", - " \"pc\": 85,", - " \"sub\": null", - " },", - " {", - " \"cost\": 36,", - " \"ex\": {", - " \"mem\": null,", - " \"push\": [", - " \"0xc2575a0e9e593c00f959f8c92f12db2869c3395a3b0502d05e2516446f71f85b\"", - " ],", - " \"store\": null,", - " \"used\": 172219", - " },", - " \"pc\": 87,", - " \"sub\": null", - " },", - " {", - " \"cost\": 3,", - " \"ex\": {", - " \"mem\": null,", - " \"push\": [", - " \"0xc2575a0e9e593c00f959f8c92f12db2869c3395a3b0502d05e2516446f71f85b\",", - " \"0x57\"", - " ],", - " \"store\": null,", - " \"used\": 172216", - " },", - " \"pc\": 88,", - " \"sub\": null", - " },", - " {", - " \"cost\": 3,", - " \"ex\": {", - " \"mem\": null,", - " \"push\": [", - " \"0xc2575a0e9e593c00f959f8c92f12db2869c3395a3b0502d05e2516446f71f8b2\"", - " ],", - " \"store\": null,", - " \"used\": 172213", - " },", - " \"pc\": 89,", - " \"sub\": null", - " },", - " {", - " \"cost\": 3,", - " \"ex\": {", - " \"mem\": null,", - " \"push\": [", - " \"0x0\"", - " ],", - " \"store\": null,", - " \"used\": 172210", - " },", - " \"pc\": 90,", - " \"sub\": null", - " },", - " {", - " \"cost\": 3,", - " \"ex\": {", - " \"mem\": null,", - " \"push\": [", - " \"0x100\"", - " ],", - " \"store\": null,", - " \"used\": 172207", - " },", - " \"pc\": 92,", - " \"sub\": null", - " },", - " {", - " \"cost\": 10,", - " \"ex\": {", - " \"mem\": null,", - " \"push\": [", - " \"0x1\"", - " ],", - " \"store\": null,", - " \"used\": 172197", - " },", - " \"pc\": 95,", - " \"sub\": null", - " },", - " {", - " \"cost\": 3,", - " \"ex\": {", - " \"mem\": null,", - " \"push\": [", - " \"0xc2575a0e9e593c00f959f8c92f12db2869c3395a3b0502d05e2516446f71f8b2\",", - " \"0x1\",", - " \"0xc2575a0e9e593c00f959f8c92f12db2869c3395a3b0502d05e2516446f71f8b2\"", - " ],", - " \"store\": null,", - " \"used\": 172194", - " },", - " \"pc\": 96,", - " \"sub\": null", - " },", - " {", - " \"cost\": 800,", - " \"ex\": {", - " \"mem\": null,", - " \"push\": [", - " \"0x0\"", - " ],", - " \"store\": null,", - " \"used\": 171394", - " },", - " \"pc\": 97,", - " \"sub\": null", - " },", - " {", - " \"cost\": 3,", - " \"ex\": {", - " \"mem\": null,", - " \"push\": [", - " \"0x1\",", - " \"0x0\",", - " \"0x1\"", - " ],", - " \"store\": null,", - " \"used\": 171391", - " },", - " \"pc\": 98,", - " \"sub\": null", - " },", - " {", - " \"cost\": 3,", - " \"ex\": {", - " \"mem\": null,", - " \"push\": [", - " \"0x1\"", - " ],", - " \"store\": null,", - " \"used\": 171388", - " },", - " \"pc\": 99,", - " \"sub\": null", - " },", - " {", - " \"cost\": 3,", - " \"ex\": {", - " \"mem\": null,", - " \"push\": [", - " \"0xa0\"", - " ],", - " \"store\": null,", - " \"used\": 171385", - " },", - " \"pc\": 101,", - " \"sub\": null", - " },", - " {", - " \"cost\": 3,", - " \"ex\": {", - " \"mem\": null,", - " \"push\": [", - " \"0x2\"", - " ],", - " \"store\": null,", - " \"used\": 171382", - " },", - " \"pc\": 103,", - " \"sub\": null", - " },", - " {", - " \"cost\": 60,", - " \"ex\": {", - " \"mem\": null,", - " \"push\": [", - " \"0x10000000000000000000000000000000000000000\"", - " ],", - " \"store\": null,", - " \"used\": 171322", - " },", - " \"pc\": 105,", - " \"sub\": null", - " },", - " {", - " \"cost\": 3,", - " \"ex\": {", - " \"mem\": null,", - " \"push\": [", - " \"0xffffffffffffffffffffffffffffffffffffffff\"", - " ],", - " \"store\": null,", - " \"used\": 171319", - " },", - " \"pc\": 106,", - " \"sub\": null", - " },", - " {", - " \"cost\": 5,", - " \"ex\": {", - " \"mem\": null,", - " \"push\": [", - " \"0xffffffffffffffffffffffffffffffffffffffff\"", - " ],", - " \"store\": null,", - " \"used\": 171314", - " },", - " \"pc\": 107,", - " \"sub\": null", - " },", - " {", - " \"cost\": 3,", - " \"ex\": {", - " \"mem\": null,", - " \"push\": [", - " \"0xffffffffffffffffffffffff0000000000000000000000000000000000000000\"", - " ],", - " \"store\": null,", - " \"used\": 171311", - " },", - " \"pc\": 108,", - " \"sub\": null", - " },", - " {", - " \"cost\": 3,", - " \"ex\": {", - " \"mem\": null,", - " \"push\": [", - " \"0x0\"", - " ],", - " \"store\": null,", - " \"used\": 171308", - " },", - " \"pc\": 109,", - " \"sub\": null", - " },", - " {", - " \"cost\": 3,", - " \"ex\": {", - " \"mem\": null,", - " \"push\": [", - " \"0x0\",", - " \"0x1\"", - " ],", - " \"store\": null,", - " \"used\": 171305", - " },", - " \"pc\": 110,", - " \"sub\": null", - " },", - " {", - " \"cost\": 3,", - " \"ex\": {", - " \"mem\": null,", - " \"push\": [", - " \"0xfd2605a2bf58fdbb90db1da55df61628b47f9e8c\",", - " \"0xc2575a0e9e593c00f959f8c92f12db2869c3395a3b0502d05e2516446f71f8b2\",", - " \"0x0\",", - " \"0x1\",", - " \"0xfd2605a2bf58fdbb90db1da55df61628b47f9e8c\"", - " ],", - " \"store\": null,", - " \"used\": 171302", - " },", - " \"pc\": 111,", - " \"sub\": null", - " },", - " {", - " \"cost\": 5,", - " \"ex\": {", - " \"mem\": null,", - " \"push\": [", - " \"0xfd2605a2bf58fdbb90db1da55df61628b47f9e8c\"", - " ],", - " \"store\": null,", - " \"used\": 171297", - " },", - " \"pc\": 112,", - " \"sub\": null", - " },", - " {", - " \"cost\": 3,", - " \"ex\": {", - " \"mem\": null,", - " \"push\": [", - " \"0xfd2605a2bf58fdbb90db1da55df61628b47f9e8c\"", - " ],", - " \"store\": null,", - " \"used\": 171294", - " },", - " \"pc\": 113,", - " \"sub\": null", - " },", - " {", - " \"cost\": 3,", - " \"ex\": {", - " \"mem\": null,", - " \"push\": [", - " \"0xfd2605a2bf58fdbb90db1da55df61628b47f9e8c\",", - " \"0xc2575a0e9e593c00f959f8c92f12db2869c3395a3b0502d05e2516446f71f8b2\"", - " ],", - " \"store\": null,", - " \"used\": 171291", - " },", - " \"pc\": 114,", - " \"sub\": null", - " },", - " {", - " \"cost\": 20000,", - " \"ex\": {", - " \"mem\": null,", - " \"push\": [],", - " \"store\": {", - " \"key\": \"0xc2575a0e9e593c00f959f8c92f12db2869c3395a3b0502d05e2516446f71f8b2\",", - " \"val\": \"0xfd2605a2bf58fdbb90db1da55df61628b47f9e8c\"", - " },", - " \"used\": 151291", - " },", - " \"pc\": 115,", - " \"sub\": null", - " },", - " {", - " \"cost\": 2,", - " \"ex\": {", - " \"mem\": null,", - " \"push\": [],", - " \"store\": null,", - " \"used\": 151289", - " },", - " \"pc\": 116,", - " \"sub\": null", - " },", - " {", - " \"cost\": 3,", - " \"ex\": {", - " \"mem\": null,", - " \"push\": [", - " \"0xde0b6b3a7640000\"", - " ],", - " \"store\": null,", - " \"used\": 151286", - " },", - " \"pc\": 117,", - " \"sub\": null", - " },", - " {", - " \"cost\": 3,", - " \"ex\": {", - " \"mem\": null,", - " \"push\": [", - " \"0xde0b6b3a7640000\",", - " \"0x0\",", - " \"0x0\",", - " \"0x0\",", - " \"0xde0b6b3a7640000\",", - " \"0xde0b6b3a7640000\"", - " ],", - " \"store\": null,", - " \"used\": 151283", - " },", - " \"pc\": 126,", - " \"sub\": null", - " },", - " {", - " \"cost\": 3,", - " \"ex\": {", - " \"mem\": null,", - " \"push\": [", - " \"0x0\"", - " ],", - " \"store\": null,", - " \"used\": 151280", - " },", - " \"pc\": 127,", - " \"sub\": null", - " },", - " {", - " \"cost\": 3,", - " \"ex\": {", - " \"mem\": null,", - " \"push\": [", - " \"0x0\",", - " \"0x0\",", - " \"0x0\",", - " \"0x0\",", - " \"0xde0b6b3a7640000\"", - " ],", - " \"store\": null,", - " \"used\": 151277", - " },", - " \"pc\": 128,", - " \"sub\": null", - " },", - " {", - " \"cost\": 2,", - " \"ex\": {", - " \"mem\": null,", - " \"push\": [],", - " \"store\": null,", - " \"used\": 151275", - " },", - " \"pc\": 129,", - " \"sub\": null", - " },", - " {", - " \"cost\": 3,", - " \"ex\": {", - " \"mem\": null,", - " \"push\": [", - " \"0x0\",", - " \"0x0\",", - " \"0x0\",", - " \"0x0\",", - " \"0x0\"", - " ],", - " \"store\": null,", - " \"used\": 151272", - " },", - " \"pc\": 130,", - " \"sub\": null", - " },", - " {", - " \"cost\": 2,", - " \"ex\": {", - " \"mem\": null,", - " \"push\": [],", - " \"store\": null,", - " \"used\": 151270", - " },", - " \"pc\": 131,", - " \"sub\": null", - " },", - " {", - " \"cost\": 3,", - " \"ex\": {", - " \"mem\": null,", - " \"push\": [", - " \"0xde0b6b3a7640000\"", - " ],", - " \"store\": null,", - " \"used\": 151267", - " },", - " \"pc\": 132,", - " \"sub\": null", - " },", - " {", - " \"cost\": 3,", - " \"ex\": {", - " \"mem\": null,", - " \"push\": [", - " \"0x0\"", - " ],", - " \"store\": null,", - " \"used\": 151264", - " },", - " \"pc\": 141,", - " \"sub\": null", - " },", - " {", - " \"cost\": 3,", - " \"ex\": {", - " \"mem\": null,", - " \"push\": [", - " \"0x0\"", - " ],", - " \"store\": null,", - " \"used\": 151261", - " },", - " \"pc\": 143,", - " \"sub\": null", - " },", - " {", - " \"cost\": 3,", - " \"ex\": {", - " \"mem\": null,", - " \"push\": [", - " \"0xde0b6b3a7640000\",", - " \"0x0\",", - " \"0x0\",", - " \"0xde0b6b3a7640000\"", - " ],", - " \"store\": null,", - " \"used\": 151258", - " },", - " \"pc\": 145,", - " \"sub\": null", - " },", - " {", - " \"cost\": 3,", - " \"ex\": {", - " \"mem\": null,", - " \"push\": [", - " \"0x0\",", - " \"0x0\",", - " \"0xde0b6b3a7640000\",", - " \"0x0\"", - " ],", - " \"store\": null,", - " \"used\": 151255", - " },", - " \"pc\": 146,", - " \"sub\": null", - " },", - " {", - " \"cost\": 3,", - " \"ex\": {", - " \"mem\": null,", - " \"push\": [", - " \"0x0\",", - " \"0xde0b6b3a7640000\",", - " \"0x0\",", - " \"0x0\"", - " ],", - " \"store\": null,", - " \"used\": 151252", - " },", - " \"pc\": 147,", - " \"sub\": null", - " },", - " {", - " \"cost\": 2,", - " \"ex\": {", - " \"mem\": null,", - " \"push\": [],", - " \"store\": null,", - " \"used\": 151250", - " },", - " \"pc\": 148,", - " \"sub\": null", - " },", - " {", - " \"cost\": 800,", - " \"ex\": {", - " \"mem\": null,", - " \"push\": [", - " \"0x7ce66c50e91277e9\"", - " ],", - " \"store\": null,", - " \"used\": 150450", - " },", - " \"pc\": 149,", - " \"sub\": null", - " },", - " {", - " \"cost\": 3,", - " \"ex\": {", - " \"mem\": null,", - " \"push\": [", - " \"0x8ac72304907677e9\"", - " ],", - " \"store\": null,", - " \"used\": 150447", - " },", - " \"pc\": 150,", - " \"sub\": null", - " },", - " {", - " \"cost\": 3,", - " \"ex\": {", - " \"mem\": null,", - " \"push\": [", - " \"0x8ac72304907677e9\",", - " \"0x0\",", - " \"0x0\",", - " \"0xde0b6b3a7640000\"", - " ],", - " \"store\": null,", - " \"used\": 150444", - " },", - " \"pc\": 151,", - " \"sub\": null", - " },", - " {", - " \"cost\": 2,", - " \"ex\": {", - " \"mem\": null,", - " \"push\": [],", - " \"store\": null,", - " \"used\": 150442", - " },", - " \"pc\": 152,", - " \"sub\": null", - " },", - " {", - " \"cost\": 2,", - " \"ex\": {", - " \"mem\": null,", - " \"push\": [],", - " \"store\": null,", - " \"used\": 150440", - " },", - " \"pc\": 153,", - " \"sub\": null", - " },", - " {", - " \"cost\": 3,", - " \"ex\": {", - " \"mem\": null,", - " \"push\": [", - " \"0x8ac72304907677e9\",", - " \"0x0\",", - " \"0x8ac72304907677e9\"", - " ],", - " \"store\": null,", - " \"used\": 150437", - " },", - " \"pc\": 154,", - " \"sub\": null", - " },", - " {", - " \"cost\": 3,", - " \"ex\": {", - " \"mem\": null,", - " \"push\": [", - " \"0x8ac72304907677e9\",", - " \"0x0\"", - " ],", - " \"store\": null,", - " \"used\": 150434", - " },", - " \"pc\": 155,", - " \"sub\": null", - " },", - " {", - " \"cost\": 5000,", - " \"ex\": {", - " \"mem\": null,", - " \"push\": [],", - " \"store\": {", - " \"key\": \"0x0\",", - " \"val\": \"0x8ac72304907677e9\"", - " },", - " \"used\": 145434", - " },", - " \"pc\": 156,", - " \"sub\": null", - " },", - " {", - " \"cost\": 2,", - " \"ex\": {", - " \"mem\": null,", - " \"push\": [],", - " \"store\": null,", - " \"used\": 145432", - " },", - " \"pc\": 157,", - " \"sub\": null", - " },", - " {", - " \"cost\": 3,", - " \"ex\": {", - " \"mem\": null,", - " \"push\": [", - " \"0x2\"", - " ],", - " \"store\": null,", - " \"used\": 145429", - " },", - " \"pc\": 158,", - " \"sub\": null", - " },", - " {", - " \"cost\": 3,", - " \"ex\": {", - " \"mem\": null,", - " \"push\": [", - " \"0x1\"", - " ],", - " \"store\": null,", - " \"used\": 145426", - " },", - " \"pc\": 160,", - " \"sub\": null", - " },", - " {", - " \"cost\": 3,", - " \"ex\": {", - " \"mem\": null,", - " \"push\": [", - " \"0x0\"", - " ],", - " \"store\": null,", - " \"used\": 145423", - " },", - " \"pc\": 162,", - " \"sub\": null", - " },", - " {", - " \"cost\": 2,", - " \"ex\": {", - " \"mem\": null,", - " \"push\": [],", - " \"store\": null,", - " \"used\": 145421", - " },", - " \"pc\": 164,", - " \"sub\": null", - " },", - " {", - " \"cost\": 800,", - " \"ex\": {", - " \"mem\": null,", - " \"push\": [", - " \"0x57\"", - " ],", - " \"store\": null,", - " \"used\": 144621", - " },", - " \"pc\": 165,", - " \"sub\": null", - " },", - " {", - " \"cost\": 3,", - " \"ex\": {", - " \"mem\": null,", - " \"push\": [", - " \"0x1\"", - " ],", - " \"store\": null,", - " \"used\": 144618", - " },", - " \"pc\": 166,", - " \"sub\": null", - " },", - " {", - " \"cost\": 3,", - " \"ex\": {", - " \"mem\": null,", - " \"push\": [", - " \"0x0\"", - " ],", - " \"store\": null,", - " \"used\": 144615", - " },", - " \"pc\": 167,", - " \"sub\": null", - " },", - " {", - " \"cost\": 3,", - " \"ex\": {", - " \"mem\": null,", - " \"push\": [", - " \"0x1\"", - " ],", - " \"store\": null,", - " \"used\": 144612", - " },", - " \"pc\": 168,", - " \"sub\": null", - " },", - " {", - " \"cost\": 3,", - " \"ex\": {", - " \"mem\": null,", - " \"push\": [", - " \"0x11d\"", - " ],", - " \"store\": null,", - " \"used\": 144609", - " },", - " \"pc\": 169,", - " \"sub\": null", - " },", - " {", - " \"cost\": 10,", - " \"ex\": {", - " \"mem\": null,", - " \"push\": [],", - " \"store\": null,", - " \"used\": 144599", - " },", - " \"pc\": 172,", - " \"sub\": null", - " },", - " {", - " \"cost\": 1,", - " \"ex\": {", - " \"mem\": null,", - " \"push\": [],", - " \"store\": null,", - " \"used\": 144598", - " },", - " \"pc\": 285,", - " \"sub\": null", - " },", - " {", - " \"cost\": 3,", - " \"ex\": {", - " \"mem\": null,", - " \"push\": [", - " \"0x1\"", - " ],", - " \"store\": null,", - " \"used\": 144595", - " },", - " \"pc\": 286,", - " \"sub\": null", - " },", - " {", - " \"cost\": 3,", - " \"ex\": {", - " \"mem\": null,", - " \"push\": [", - " \"0x1\",", - " \"0x1\"", - " ],", - " \"store\": null,", - " \"used\": 144592", - " },", - " \"pc\": 288,", - " \"sub\": null", - " },", - " {", - " \"cost\": 800,", - " \"ex\": {", - " \"mem\": null,", - " \"push\": [", - " \"0x57\"", - " ],", - " \"store\": null,", - " \"used\": 143792", - " },", - " \"pc\": 289,", - " \"sub\": null", - " },", - " {", - " \"cost\": 3,", - " \"ex\": {", - " \"mem\": null,", - " \"push\": [", - " \"0x3\"", - " ],", - " \"store\": null,", - " \"used\": 143789", - " },", - " \"pc\": 290,", - " \"sub\": null", - " },", - " {", - " \"cost\": 800,", - " \"ex\": {", - " \"mem\": null,", - " \"push\": [", - " \"0x5b\"", - " ],", - " \"store\": null,", - " \"used\": 142989", - " },", - " \"pc\": 292,", - " \"sub\": null", - " },", - " {", - " \"cost\": 3,", - " \"ex\": {", - " \"mem\": null,", - " \"push\": [", - " \"0x5b\",", - " \"0x57\",", - " \"0x1\"", - " ],", - " \"store\": null,", - " \"used\": 142986", - " },", - " \"pc\": 293,", - " \"sub\": null", - " },", - " {", - " \"cost\": 3,", - " \"ex\": {", - " \"mem\": null,", - " \"push\": [", - " \"0x58\"", - " ],", - " \"store\": null,", - " \"used\": 142983", - " },", - " \"pc\": 294,", - " \"sub\": null", - " },", - " {", - " \"cost\": 3,", - " \"ex\": {", - " \"mem\": null,", - " \"push\": [", - " \"0x0\"", - " ],", - " \"store\": null,", - " \"used\": 142980", - " },", - " \"pc\": 295,", - " \"sub\": null", - " },", - " {", - " \"cost\": 3,", - " \"ex\": {", - " \"mem\": null,", - " \"push\": [", - " \"0x1\"", - " ],", - " \"store\": null,", - " \"used\": 142977", - " },", - " \"pc\": 296,", - " \"sub\": null", - " },", - " {", - " \"cost\": 3,", - " \"ex\": {", - " \"mem\": null,", - " \"push\": [", - " \"0x1d5\"", - " ],", - " \"store\": null,", - " \"used\": 142974", - " },", - " \"pc\": 297,", - " \"sub\": null", - " },", - " {", - " \"cost\": 10,", - " \"ex\": {", - " \"mem\": null,", - " \"push\": [],", - " \"store\": null,", - " \"used\": 142964", - " },", - " \"pc\": 300,", - " \"sub\": null", - " },", - " {", - " \"cost\": 1,", - " \"ex\": {", - " \"mem\": null,", - " \"push\": [],", - " \"store\": null,", - " \"used\": 142963", - " },", - " \"pc\": 469,", - " \"sub\": null", - " },", - " {", - " \"cost\": 3,", - " \"ex\": {", - " \"mem\": null,", - " \"push\": [", - " \"0x0\"", - " ],", - " \"store\": null,", - " \"used\": 142960", - " },", - " \"pc\": 470,", - " \"sub\": null", - " },", - " {", - " \"cost\": 3,", - " \"ex\": {", - " \"mem\": null,", - " \"push\": [", - " \"0x0\",", - " \"0x0\"", - " ],", - " \"store\": null,", - " \"used\": 142957", - " },", - " \"pc\": 472,", - " \"sub\": null", - " },", - " {", - " \"cost\": 800,", - " \"ex\": {", - " \"mem\": null,", - " \"push\": [", - " \"0x8ac72304907677e9\"", - " ],", - " \"store\": null,", - " \"used\": 142157", - " },", - " \"pc\": 473,", - " \"sub\": null", - " },", - " {", - " \"cost\": 3,", - " \"ex\": {", - " \"mem\": null,", - " \"push\": [", - " \"0x0\",", - " \"0x0\",", - " \"0x0\",", - " \"0x0\",", - " \"0x0\",", - " \"0x8ac72304907677e9\",", - " \"0x0\"", - " ],", - " \"store\": null,", - " \"used\": 142154", - " },", - " \"pc\": 474,", - " \"sub\": null", - " },", - " {", - " \"cost\": 3,", - " \"ex\": {", - " \"mem\": null,", - " \"push\": [", - " \"0x8ac72304907677e9\"", - " ],", - " \"store\": null,", - " \"used\": 142151", - " },", - " \"pc\": 475,", - " \"sub\": null", - " },", - " {", - " \"cost\": 3,", - " \"ex\": {", - " \"mem\": null,", - " \"push\": [", - " \"0x8ac72304907677e9\",", - " \"0x0\"", - " ],", - " \"store\": null,", - " \"used\": 142148", - " },", - " \"pc\": 476,", - " \"sub\": null", - " },", - " {", - " \"cost\": 800,", - " \"ex\": {", - " \"mem\": null,", - " \"push\": [],", - " \"store\": {", - " \"key\": \"0x0\",", - " \"val\": \"0x8ac72304907677e9\"", - " },", - " \"used\": 141348", - " },", - " \"pc\": 477,", - " \"sub\": null", - " },", - " {", - " \"cost\": 2,", - " \"ex\": {", - " \"mem\": null,", - " \"push\": [],", - " \"store\": null,", - " \"used\": 141346", - " },", - " \"pc\": 478,", - " \"sub\": null", - " },", - " {", - " \"cost\": 2,", - " \"ex\": {", - " \"mem\": null,", - " \"push\": [],", - " \"store\": null,", - " \"used\": 141344", - " },", - " \"pc\": 479,", - " \"sub\": null", - " },", - " {", - " \"cost\": 2,", - " \"ex\": {", - " \"mem\": null,", - " \"push\": [],", - " \"store\": null,", - " \"used\": 141342", - " },", - " \"pc\": 480,", - " \"sub\": null", - " },", - " {", - " \"cost\": 2,", - " \"ex\": {", - " \"mem\": null,", - " \"push\": [],", - " \"store\": null,", - " \"used\": 141340", - " },", - " \"pc\": 481,", - " \"sub\": null", - " },", - " {", - " \"cost\": 8,", - " \"ex\": {", - " \"mem\": null,", - " \"push\": [],", - " \"store\": null,", - " \"used\": 141332", - " },", - " \"pc\": 482,", - " \"sub\": null", - " },", - " {", - " \"cost\": 1,", - " \"ex\": {", - " \"mem\": null,", - " \"push\": [],", - " \"store\": null,", - " \"used\": 141331", - " },", - " \"pc\": 283,", - " \"sub\": null", - " },", - " {", - " \"cost\": 0,", - " \"ex\": {", - " \"mem\": null,", - " \"push\": [],", - " \"store\": null,", - " \"used\": 141331", - " },", - " \"pc\": 284,", - " \"sub\": null", - " }", - " ]", - " }", - " },", - " \"id\": \"1\"", - "}", - "", - "pm.test('Has correct result', function() {", - " pm.expect(pm.response.json()).to.be.deep.equal(expected);", - "})", - "" - ], - "type": "text/javascript" - } - } - ], - "request": { - "method": "POST", - "header": [ - { - "key": "Content-Type", - "value": "application/json" - } - ], - "body": { - "mode": "raw", - "raw": "{\n\t\"jsonrpc\":\"2.0\",\n\t\"method\":\"trace_rawTransaction\",\n\t\"params\":[\n \"0xf86d09850cf032900f83030d4094109c4f2ccc82c4d77bde15f306707320294aea3f880de0b6b3a7640000801ca02da49aa24d7fa6fa876af59d77acfd60537eba478654934430b1b32893b65c85a02cdc152d81b71f25fd23e3e271c8c0b15a3a91ce104b6af35bd476d1e6d26fdf\",\n [\"vmTrace\"]\n ],\n\t\"id\":\"1\"\n}", - "options": { - "raw": {} - } - }, - "url": { - "raw": "{{HOST}}", - "host": [ - "{{HOST}}" - ] - } - }, - "response": [] - }, - { - "name": "trace_rawTransaction - stateDiff only", - "event": [ - { - "listen": "test", - "script": { - "id": "9fd4f1d1-1c82-4d39-b47f-feeb33836056", - "exec": [ - "utils.cannotTest(\"trace_rawTransaction - stateDiff only\", pm.response.json())", - "" - ], - "type": "text/javascript" - } - } - ], - "request": { - "method": "POST", - "header": [ - { - "key": "Content-Type", - "value": "application/json" - } - ], - "body": { - "mode": "raw", - "raw": "{\n\t\"jsonrpc\":\"2.0\",\n\t\"method\":\"trace_rawTransaction\",\n\t\"params\":[\n \"0xf86d09850cf032900f83030d4094109c4f2ccc82c4d77bde15f306707320294aea3f880de0b6b3a7640000801ca02da49aa24d7fa6fa876af59d77acfd60537eba478654934430b1b32893b65c85a02cdc152d81b71f25fd23e3e271c8c0b15a3a91ce104b6af35bd476d1e6d26fdf\",\n [\"stateDiff\"]\n ],\n\t\"id\":\"1\"\n}", - "options": { - "raw": {} - } - }, - "url": { - "raw": "{{HOST}}", - "host": [ - "{{HOST}}" - ] - } - }, - "response": [] - } - ], - "protocolProfileBehavior": {} - }, - { - "name": "trace_get", - "item": [ - { - "name": "trace_get - trace 0", - "event": [ - { - "listen": "test", - "script": { - "id": "b13645b7-48a0-480f-99e5-be52aacae86d", - "exec": [ - "var expected = {", - " \"jsonrpc\": \"2.0\",", - " \"result\": {", - " \"action\": {", - " \"callType\": \"staticcall\",", - " \"from\": \"0x7a250d5630b4cf539739df2c5dacb4c659f2488d\",", - " \"gas\": \"0x40e8b\",", - " \"input\": \"0x0902f1ac\",", - " \"to\": \"0x6ebb1c40cd3789e6fc02f003b2416383ea5c96f4\",", - " \"value\": \"0x0\"", - " },", - " \"blockHash\": \"0x4d5e647b2d8d3a3c8c1561ebb88734bc5fc3c2941016f810cf218738c0ecd99e\",", - " \"blockNumber\": 11000000,", - " \"result\": {", - " \"gasUsed\": \"0x4b4\",", - " \"output\": \"0x00000000000000000000000000000000000000000000008f63f71a5f71f77323000000000000000000000000000000000000000000000008709d1f36bd0f2f83000000000000000000000000000000000000000000000000000000005f7befab\"", - " },", - " \"subtraces\": 0,", - " \"traceAddress\": [", - " 0", - " ],", - " \"transactionHash\": \"0x88162c6bcbb040fd8f6676fe336f83d7a27a5765b18bfce524beea2a0e107159\",", - " \"transactionPosition\": 26,", - " \"type\": \"call\"", - " },", - " \"id\": \"1\"", - "}", - "", - "pm.test('Has correct result', function() {", - " pm.expect(pm.response.json()).to.be.deep.equal(expected);", - "})", - "" - ], - "type": "text/javascript" - } - } - ], - "request": { - "method": "POST", - "header": [ - { - "key": "Content-Type", - "value": "application/json" - } - ], - "body": { - "mode": "raw", - "raw": "{\n\t\"jsonrpc\":\"2.0\",\n\t\"method\":\"trace_get\",\n \"params\":[\n \"0x88162c6bcbb040fd8f6676fe336f83d7a27a5765b18bfce524beea2a0e107159\",\n [\"0x0\"]\n ],\n\t\"id\":\"1\"\n}\n", - "options": { - "raw": {} - } - }, - "url": { - "raw": "{{HOST}}", - "host": [ - "{{HOST}}" - ] - } - }, - "response": [] - }, - { - "name": "trace_get - trace 3", - "event": [ - { - "listen": "test", - "script": { - "id": "0c7a662c-06d5-45f2-85e8-6d0d30e07d64", - "exec": [ - "var expected = {", - " \"jsonrpc\": \"2.0\",", - " \"result\": {", - " \"action\": {", - " \"callType\": \"call\",", - " \"from\": \"0x7a250d5630b4cf539739df2c5dacb4c659f2488d\",", - " \"gas\": \"0x3d7ce\",", - " \"input\": \"0x23b872dd0000000000000000000000006795e7f4219a48e083157db6b52cf70002eced5f0000000000000000000000004a4354ffddb257671ac00cdcedef87503ac6b35200000000000000000000000000000000000000000000000001d1e7fc878ab04b\",", - " \"to\": \"0x5befbb272290dd5b8521d4a938f6c4757742c430\",", - " \"value\": \"0x0\"", - " },", - " \"blockHash\": \"0x4d5e647b2d8d3a3c8c1561ebb88734bc5fc3c2941016f810cf218738c0ecd99e\",", - " \"blockNumber\": 11000000,", - " \"result\": {", - " \"gasUsed\": \"0x5a30\",", - " \"output\": \"0x0000000000000000000000000000000000000000000000000000000000000001\"", - " },", - " \"subtraces\": 0,", - " \"traceAddress\": [", - " 3", - " ],", - " \"transactionHash\": \"0x88162c6bcbb040fd8f6676fe336f83d7a27a5765b18bfce524beea2a0e107159\",", - " \"transactionPosition\": 26,", - " \"type\": \"call\"", - " },", - " \"id\": \"1\"", - "}", - "", - "pm.test('Has correct result', function() {", - " pm.expect(pm.response.json()).to.be.deep.equal(expected);", - "})", - "" - ], - "type": "text/javascript" - } - } - ], - "request": { - "method": "POST", - "header": [ - { - "key": "Content-Type", - "value": "application/json" - } - ], - "body": { - "mode": "raw", - "raw": "{\n\t\"jsonrpc\":\"2.0\",\n\t\"method\":\"trace_get\",\n \"params\":[\n \"0x88162c6bcbb040fd8f6676fe336f83d7a27a5765b18bfce524beea2a0e107159\",\n [\"0x3\"]\n ],\n\t\"id\":\"1\"\n}\n", - "options": { - "raw": {} - } - }, - "url": { - "raw": "{{HOST}}", - "host": [ - "{{HOST}}" - ] - } - }, - "response": [] - }, - { - "name": "trace_get - trace 5", - "event": [ - { - "listen": "test", - "script": { - "id": "f5431b3d-4824-4fb7-9172-483588363adc", - "exec": [ - "var expected = {", - " \"jsonrpc\": \"2.0\",", - " \"result\": {", - " \"action\": {", - " \"callType\": \"call\",", - " \"from\": \"0x7a250d5630b4cf539739df2c5dacb4c659f2488d\",", - " \"gas\": \"0x271a9\",", - " \"input\": \"0x022c0d9f00000000000000000000000000000000000000000000000003483b57f55165f500000000000000000000000000000000000000000000000000000000000000000000000000000000000000006ebb1c40cd3789e6fc02f003b2416383ea5c96f400000000000000000000000000000000000000000000000000000000000000800000000000000000000000000000000000000000000000000000000000000000\",", - " \"to\": \"0x0d4a11d5eeaac28ec3f61d100daf4d40471f1852\",", - " \"value\": \"0x0\"", - " },", - " \"blockHash\": \"0x4d5e647b2d8d3a3c8c1561ebb88734bc5fc3c2941016f810cf218738c0ecd99e\",", - " \"blockNumber\": 11000000,", - " \"result\": {", - " \"gasUsed\": \"0xe1fa\",", - " \"output\": \"0x\"", - " },", - " \"subtraces\": 3,", - " \"traceAddress\": [", - " 5", - " ],", - " \"transactionHash\": \"0x88162c6bcbb040fd8f6676fe336f83d7a27a5765b18bfce524beea2a0e107159\",", - " \"transactionPosition\": 26,", - " \"type\": \"call\"", - " },", - " \"id\": \"1\"", - "}", - "", - "pm.test('Has correct result', function() {", - " pm.expect(pm.response.json()).to.be.deep.equal(expected);", - "})", - "" - ], - "type": "text/javascript" - } - } - ], - "request": { - "method": "POST", - "header": [ - { - "key": "Content-Type", - "value": "application/json" - } - ], - "body": { - "mode": "raw", - "raw": "{\n\t\"jsonrpc\":\"2.0\",\n\t\"method\":\"trace_get\",\n \"params\":[\n \"0x88162c6bcbb040fd8f6676fe336f83d7a27a5765b18bfce524beea2a0e107159\",\n [\"0x5\"]\n ],\n\t\"id\":\"1\"\n}\n", - "options": { - "raw": {} - } - }, - "url": { - "raw": "{{HOST}}", - "host": [ - "{{HOST}}" - ] - } - }, - "response": [] - }, - { - "name": "trace_get - trace 10 (non-existant)", - "event": [ - { - "listen": "test", - "script": { - "id": "77a95092-2e76-4457-b024-78b2e57d7457", - "exec": [ - "var expected = {", - " \"jsonrpc\": \"2.0\",", - " \"result\": null,", - " \"id\": \"1\"", - "}", - "", - "pm.test('Has correct result', function() {", - " pm.expect(pm.response.json()).to.be.deep.equal(expected);", - "})", - "" - ], - "type": "text/javascript" - } - } - ], - "request": { - "method": "POST", - "header": [ - { - "key": "Content-Type", - "value": "application/json" - } - ], - "body": { - "mode": "raw", - "raw": "{\n\t\"jsonrpc\":\"2.0\",\n\t\"method\":\"trace_get\",\n \"params\":[\n \"0x88162c6bcbb040fd8f6676fe336f83d7a27a5765b18bfce524beea2a0e107159\",\n [\"0xa\"]\n ],\n\t\"id\":\"1\"\n}\n", - "options": { - "raw": {} - } - }, - "url": { - "raw": "{{HOST}}", - "host": [ - "{{HOST}}" - ] - } - }, - "response": [] - } - ], - "protocolProfileBehavior": {} - }, - { - "name": "trace_transaction", - "event": [ - { - "listen": "test", - "script": { - "id": "d326776f-bac6-4630-82eb-d698fea4a4bf", - "exec": [ - "var expected = {", - " \"jsonrpc\": \"2.0\",", - " \"result\": [", - " {", - " \"action\": {", - " \"callType\": \"call\",", - " \"from\": \"0x6795e7f4219a48e083157db6b52cf70002eced5f\",", - " \"gas\": \"0x42c8c\",", - " \"input\": \"0x8803dbee0000000000000000000000000000000000000000000000003782dace9d90000000000000000000000000000000000000000000000000000001d690b82191f53800000000000000000000000000000000000000000000000000000000000000a00000000000000000000000006795e7f4219a48e083157db6b52cf70002eced5f000000000000000000000000000000000000000000000000000000005f7bf45c00000000000000000000000000000000000000000000000000000000000000040000000000000000000000005befbb272290dd5b8521d4a938f6c4757742c430000000000000000000000000dac17f958d2ee523a2206206994597c13d831ec7000000000000000000000000c02aaa39b223fe8d0a0e5c4f27ead9083c756cc20000000000000000000000001712aad2c773ee04bdc9114b32163c058321cd85\",", - " \"to\": \"0x7a250d5630b4cf539739df2c5dacb4c659f2488d\",", - " \"value\": \"0x0\"", - " },", - " \"blockHash\": \"0x4d5e647b2d8d3a3c8c1561ebb88734bc5fc3c2941016f810cf218738c0ecd99e\",", - " \"blockNumber\": 11000000,", - " \"result\": {", - " \"gasUsed\": \"0x3bafa\",", - " \"output\": \"0x0000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000000400000000000000000000000000000000000000000000000001d1e7fc878ab04b0000000000000000000000000000000000000000000000000000000004fe3dbb00000000000000000000000000000000000000000000000003483b57f55165f50000000000000000000000000000000000000000000000003782dace9d900000\"", - " },", - " \"subtraces\": 7,", - " \"traceAddress\": [],", - " \"transactionHash\": \"0x88162c6bcbb040fd8f6676fe336f83d7a27a5765b18bfce524beea2a0e107159\",", - " \"transactionPosition\": 26,", - " \"type\": \"call\"", - " },", - " {", - " \"action\": {", - " \"callType\": \"staticcall\",", - " \"from\": \"0x7a250d5630b4cf539739df2c5dacb4c659f2488d\",", - " \"gas\": \"0x40e8b\",", - " \"input\": \"0x0902f1ac\",", - " \"to\": \"0x6ebb1c40cd3789e6fc02f003b2416383ea5c96f4\",", - " \"value\": \"0x0\"", - " },", - " \"blockHash\": \"0x4d5e647b2d8d3a3c8c1561ebb88734bc5fc3c2941016f810cf218738c0ecd99e\",", - " \"blockNumber\": 11000000,", - " \"result\": {", - " \"gasUsed\": \"0x4b4\",", - " \"output\": \"0x00000000000000000000000000000000000000000000008f63f71a5f71f77323000000000000000000000000000000000000000000000008709d1f36bd0f2f83000000000000000000000000000000000000000000000000000000005f7befab\"", - " },", - " \"subtraces\": 0,", - " \"traceAddress\": [", - " 0", - " ],", - " \"transactionHash\": \"0x88162c6bcbb040fd8f6676fe336f83d7a27a5765b18bfce524beea2a0e107159\",", - " \"transactionPosition\": 26,", - " \"type\": \"call\"", - " },", - " {", - " \"action\": {", - " \"callType\": \"staticcall\",", - " \"from\": \"0x7a250d5630b4cf539739df2c5dacb4c659f2488d\",", - " \"gas\": \"0x3fc18\",", - " \"input\": \"0x0902f1ac\",", - " \"to\": \"0x0d4a11d5eeaac28ec3f61d100daf4d40471f1852\",", - " \"value\": \"0x0\"", - " },", - " \"blockHash\": \"0x4d5e647b2d8d3a3c8c1561ebb88734bc5fc3c2941016f810cf218738c0ecd99e\",", - " \"blockNumber\": 11000000,", - " \"result\": {", - " \"gasUsed\": \"0x4b4\",", - " \"output\": \"0x000000000000000000000000000000000000000000008c337fdddb8e693225210000000000000000000000000000000000000000000000000000d4a5c378ac6a000000000000000000000000000000000000000000000000000000005f7befad\"", - " },", - " \"subtraces\": 0,", - " \"traceAddress\": [", - " 1", - " ],", - " \"transactionHash\": \"0x88162c6bcbb040fd8f6676fe336f83d7a27a5765b18bfce524beea2a0e107159\",", - " \"transactionPosition\": 26,", - " \"type\": \"call\"", - " },", - " {", - " \"action\": {", - " \"callType\": \"staticcall\",", - " \"from\": \"0x7a250d5630b4cf539739df2c5dacb4c659f2488d\",", - " \"gas\": \"0x3e9b8\",", - " \"input\": \"0x0902f1ac\",", - " \"to\": \"0x4a4354ffddb257671ac00cdcedef87503ac6b352\",", - " \"value\": \"0x0\"", - " },", - " \"blockHash\": \"0x4d5e647b2d8d3a3c8c1561ebb88734bc5fc3c2941016f810cf218738c0ecd99e\",", - " \"blockNumber\": 11000000,", - " \"result\": {", - " \"gasUsed\": \"0x4b4\",", - " \"output\": \"0x0000000000000000000000000000000000000000000000076bfdff5e26f7d67500000000000000000000000000000000000000000000000000000014716b4531000000000000000000000000000000000000000000000000000000005f7beebe\"", - " },", - " \"subtraces\": 0,", - " \"traceAddress\": [", - " 2", - " ],", - " \"transactionHash\": \"0x88162c6bcbb040fd8f6676fe336f83d7a27a5765b18bfce524beea2a0e107159\",", - " \"transactionPosition\": 26,", - " \"type\": \"call\"", - " },", - " {", - " \"action\": {", - " \"callType\": \"call\",", - " \"from\": \"0x7a250d5630b4cf539739df2c5dacb4c659f2488d\",", - " \"gas\": \"0x3d7ce\",", - " \"input\": \"0x23b872dd0000000000000000000000006795e7f4219a48e083157db6b52cf70002eced5f0000000000000000000000004a4354ffddb257671ac00cdcedef87503ac6b35200000000000000000000000000000000000000000000000001d1e7fc878ab04b\",", - " \"to\": \"0x5befbb272290dd5b8521d4a938f6c4757742c430\",", - " \"value\": \"0x0\"", - " },", - " \"blockHash\": \"0x4d5e647b2d8d3a3c8c1561ebb88734bc5fc3c2941016f810cf218738c0ecd99e\",", - " \"blockNumber\": 11000000,", - " \"result\": {", - " \"gasUsed\": \"0x5a30\",", - " \"output\": \"0x0000000000000000000000000000000000000000000000000000000000000001\"", - " },", - " \"subtraces\": 0,", - " \"traceAddress\": [", - " 3", - " ],", - " \"transactionHash\": \"0x88162c6bcbb040fd8f6676fe336f83d7a27a5765b18bfce524beea2a0e107159\",", - " \"transactionPosition\": 26,", - " \"type\": \"call\"", - " },", - " {", - " \"action\": {", - " \"callType\": \"call\",", - " \"from\": \"0x7a250d5630b4cf539739df2c5dacb4c659f2488d\",", - " \"gas\": \"0x36f24\",", - " \"input\": \"0x022c0d9f00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000004fe3dbb0000000000000000000000000d4a11d5eeaac28ec3f61d100daf4d40471f185200000000000000000000000000000000000000000000000000000000000000800000000000000000000000000000000000000000000000000000000000000000\",", - " \"to\": \"0x4a4354ffddb257671ac00cdcedef87503ac6b352\",", - " \"value\": \"0x0\"", - " },", - " \"blockHash\": \"0x4d5e647b2d8d3a3c8c1561ebb88734bc5fc3c2941016f810cf218738c0ecd99e\",", - " \"blockNumber\": 11000000,", - " \"result\": {", - " \"gasUsed\": \"0xf2c2\",", - " \"output\": \"0x\"", - " },", - " \"subtraces\": 3,", - " \"traceAddress\": [", - " 4", - " ],", - " \"transactionHash\": \"0x88162c6bcbb040fd8f6676fe336f83d7a27a5765b18bfce524beea2a0e107159\",", - " \"transactionPosition\": 26,", - " \"type\": \"call\"", - " },", - " {", - " \"action\": {", - " \"callType\": \"call\",", - " \"from\": \"0x4a4354ffddb257671ac00cdcedef87503ac6b352\",", - " \"gas\": \"0x33965\",", - " \"input\": \"0xa9059cbb0000000000000000000000000d4a11d5eeaac28ec3f61d100daf4d40471f18520000000000000000000000000000000000000000000000000000000004fe3dbb\",", - " \"to\": \"0xdac17f958d2ee523a2206206994597c13d831ec7\",", - " \"value\": \"0x0\"", - " },", - " \"blockHash\": \"0x4d5e647b2d8d3a3c8c1561ebb88734bc5fc3c2941016f810cf218738c0ecd99e\",", - " \"blockNumber\": 11000000,", - " \"result\": {", - " \"gasUsed\": \"0x4c91\",", - " \"output\": \"0x\"", - " },", - " \"subtraces\": 0,", - " \"traceAddress\": [", - " 4,", - " 0", - " ],", - " \"transactionHash\": \"0x88162c6bcbb040fd8f6676fe336f83d7a27a5765b18bfce524beea2a0e107159\",", - " \"transactionPosition\": 26,", - " \"type\": \"call\"", - " },", - " {", - " \"action\": {", - " \"callType\": \"staticcall\",", - " \"from\": \"0x4a4354ffddb257671ac00cdcedef87503ac6b352\",", - " \"gas\": \"0x2e786\",", - " \"input\": \"0x70a082310000000000000000000000004a4354ffddb257671ac00cdcedef87503ac6b352\",", - " \"to\": \"0x5befbb272290dd5b8521d4a938f6c4757742c430\",", - " \"value\": \"0x0\"", - " },", - " \"blockHash\": \"0x4d5e647b2d8d3a3c8c1561ebb88734bc5fc3c2941016f810cf218738c0ecd99e\",", - " \"blockNumber\": 11000000,", - " \"result\": {", - " \"gasUsed\": \"0x4c2\",", - " \"output\": \"0x0000000000000000000000000000000000000000000000076dcfe75aae8286c0\"", - " },", - " \"subtraces\": 0,", - " \"traceAddress\": [", - " 4,", - " 1", - " ],", - " \"transactionHash\": \"0x88162c6bcbb040fd8f6676fe336f83d7a27a5765b18bfce524beea2a0e107159\",", - " \"transactionPosition\": 26,", - " \"type\": \"call\"", - " },", - " {", - " \"action\": {", - " \"callType\": \"staticcall\",", - " \"from\": \"0x4a4354ffddb257671ac00cdcedef87503ac6b352\",", - " \"gas\": \"0x2dca4\",", - " \"input\": \"0x70a082310000000000000000000000004a4354ffddb257671ac00cdcedef87503ac6b352\",", - " \"to\": \"0xdac17f958d2ee523a2206206994597c13d831ec7\",", - " \"value\": \"0x0\"", - " },", - " \"blockHash\": \"0x4d5e647b2d8d3a3c8c1561ebb88734bc5fc3c2941016f810cf218738c0ecd99e\",", - " \"blockNumber\": 11000000,", - " \"result\": {", - " \"gasUsed\": \"0x97f\",", - " \"output\": \"0x000000000000000000000000000000000000000000000000000000146c6d0776\"", - " },", - " \"subtraces\": 0,", - " \"traceAddress\": [", - " 4,", - " 2", - " ],", - " \"transactionHash\": \"0x88162c6bcbb040fd8f6676fe336f83d7a27a5765b18bfce524beea2a0e107159\",", - " \"transactionPosition\": 26,", - " \"type\": \"call\"", - " },", - " {", - " \"action\": {", - " \"callType\": \"call\",", - " \"from\": \"0x7a250d5630b4cf539739df2c5dacb4c659f2488d\",", - " \"gas\": \"0x271a9\",", - " \"input\": \"0x022c0d9f00000000000000000000000000000000000000000000000003483b57f55165f500000000000000000000000000000000000000000000000000000000000000000000000000000000000000006ebb1c40cd3789e6fc02f003b2416383ea5c96f400000000000000000000000000000000000000000000000000000000000000800000000000000000000000000000000000000000000000000000000000000000\",", - " \"to\": \"0x0d4a11d5eeaac28ec3f61d100daf4d40471f1852\",", - " \"value\": \"0x0\"", - " },", - " \"blockHash\": \"0x4d5e647b2d8d3a3c8c1561ebb88734bc5fc3c2941016f810cf218738c0ecd99e\",", - " \"blockNumber\": 11000000,", - " \"result\": {", - " \"gasUsed\": \"0xe1fa\",", - " \"output\": \"0x\"", - " },", - " \"subtraces\": 3,", - " \"traceAddress\": [", - " 5", - " ],", - " \"transactionHash\": \"0x88162c6bcbb040fd8f6676fe336f83d7a27a5765b18bfce524beea2a0e107159\",", - " \"transactionPosition\": 26,", - " \"type\": \"call\"", - " },", - " {", - " \"action\": {", - " \"callType\": \"call\",", - " \"from\": \"0x0d4a11d5eeaac28ec3f61d100daf4d40471f1852\",", - " \"gas\": \"0x23ffe\",", - " \"input\": \"0xa9059cbb0000000000000000000000006ebb1c40cd3789e6fc02f003b2416383ea5c96f400000000000000000000000000000000000000000000000003483b57f55165f5\",", - " \"to\": \"0xc02aaa39b223fe8d0a0e5c4f27ead9083c756cc2\",", - " \"value\": \"0x0\"", - " },", - " \"blockHash\": \"0x4d5e647b2d8d3a3c8c1561ebb88734bc5fc3c2941016f810cf218738c0ecd99e\",", - " \"blockNumber\": 11000000,", - " \"result\": {", - " \"gasUsed\": \"0x3b3a\",", - " \"output\": \"0x0000000000000000000000000000000000000000000000000000000000000001\"", - " },", - " \"subtraces\": 0,", - " \"traceAddress\": [", - " 5,", - " 0", - " ],", - " \"transactionHash\": \"0x88162c6bcbb040fd8f6676fe336f83d7a27a5765b18bfce524beea2a0e107159\",", - " \"transactionPosition\": 26,", - " \"type\": \"call\"", - " },", - " {", - " \"action\": {", - " \"callType\": \"staticcall\",", - " \"from\": \"0x0d4a11d5eeaac28ec3f61d100daf4d40471f1852\",", - " \"gas\": \"0x1fea6\",", - " \"input\": \"0x70a082310000000000000000000000000d4a11d5eeaac28ec3f61d100daf4d40471f1852\",", - " \"to\": \"0xc02aaa39b223fe8d0a0e5c4f27ead9083c756cc2\",", - " \"value\": \"0x0\"", - " },", - " \"blockHash\": \"0x4d5e647b2d8d3a3c8c1561ebb88734bc5fc3c2941016f810cf218738c0ecd99e\",", - " \"blockNumber\": 11000000,", - " \"result\": {", - " \"gasUsed\": \"0x4d2\",", - " \"output\": \"0x000000000000000000000000000000000000000000008c337c95a03673e0bf2c\"", - " },", - " \"subtraces\": 0,", - " \"traceAddress\": [", - " 5,", - " 1", - " ],", - " \"transactionHash\": \"0x88162c6bcbb040fd8f6676fe336f83d7a27a5765b18bfce524beea2a0e107159\",", - " \"transactionPosition\": 26,", - " \"type\": \"call\"", - " },", - " {", - " \"action\": {", - " \"callType\": \"staticcall\",", - " \"from\": \"0x0d4a11d5eeaac28ec3f61d100daf4d40471f1852\",", - " \"gas\": \"0x1f3b5\",", - " \"input\": \"0x70a082310000000000000000000000000d4a11d5eeaac28ec3f61d100daf4d40471f1852\",", - " \"to\": \"0xdac17f958d2ee523a2206206994597c13d831ec7\",", - " \"value\": \"0x0\"", - " },", - " \"blockHash\": \"0x4d5e647b2d8d3a3c8c1561ebb88734bc5fc3c2941016f810cf218738c0ecd99e\",", - " \"blockNumber\": 11000000,", - " \"result\": {", - " \"gasUsed\": \"0x97f\",", - " \"output\": \"0x0000000000000000000000000000000000000000000000000000d4a5c876ea25\"", - " },", - " \"subtraces\": 0,", - " \"traceAddress\": [", - " 5,", - " 2", - " ],", - " \"transactionHash\": \"0x88162c6bcbb040fd8f6676fe336f83d7a27a5765b18bfce524beea2a0e107159\",", - " \"transactionPosition\": 26,", - " \"type\": \"call\"", - " },", - " {", - " \"action\": {", - " \"callType\": \"call\",", - " \"from\": \"0x7a250d5630b4cf539739df2c5dacb4c659f2488d\",", - " \"gas\": \"0x1875a\",", - " \"input\": \"0x022c0d9f0000000000000000000000000000000000000000000000003782dace9d90000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000006795e7f4219a48e083157db6b52cf70002eced5f00000000000000000000000000000000000000000000000000000000000000800000000000000000000000000000000000000000000000000000000000000000\",", - " \"to\": \"0x6ebb1c40cd3789e6fc02f003b2416383ea5c96f4\",", - " \"value\": \"0x0\"", - " },", - " \"blockHash\": \"0x4d5e647b2d8d3a3c8c1561ebb88734bc5fc3c2941016f810cf218738c0ecd99e\",", - " \"blockNumber\": 11000000,", - " \"result\": {", - " \"gasUsed\": \"0x119aa\",", - " \"output\": \"0x\"", - " },", - " \"subtraces\": 3,", - " \"traceAddress\": [", - " 6", - " ],", - " \"transactionHash\": \"0x88162c6bcbb040fd8f6676fe336f83d7a27a5765b18bfce524beea2a0e107159\",", - " \"transactionPosition\": 26,", - " \"type\": \"call\"", - " },", - " {", - " \"action\": {", - " \"callType\": \"call\",", - " \"from\": \"0x6ebb1c40cd3789e6fc02f003b2416383ea5c96f4\",", - " \"gas\": \"0x15958\",", - " \"input\": \"0xa9059cbb0000000000000000000000006795e7f4219a48e083157db6b52cf70002eced5f0000000000000000000000000000000000000000000000003782dace9d900000\",", - " \"to\": \"0x1712aad2c773ee04bdc9114b32163c058321cd85\",", - " \"value\": \"0x0\"", - " },", - " \"blockHash\": \"0x4d5e647b2d8d3a3c8c1561ebb88734bc5fc3c2941016f810cf218738c0ecd99e\",", - " \"blockNumber\": 11000000,", - " \"result\": {", - " \"gasUsed\": \"0x776b\",", - " \"output\": \"0x0000000000000000000000000000000000000000000000000000000000000001\"", - " },", - " \"subtraces\": 0,", - " \"traceAddress\": [", - " 6,", - " 0", - " ],", - " \"transactionHash\": \"0x88162c6bcbb040fd8f6676fe336f83d7a27a5765b18bfce524beea2a0e107159\",", - " \"transactionPosition\": 26,", - " \"type\": \"call\"", - " },", - " {", - " \"action\": {", - " \"callType\": \"staticcall\",", - " \"from\": \"0x6ebb1c40cd3789e6fc02f003b2416383ea5c96f4\",", - " \"gas\": \"0xdcc0\",", - " \"input\": \"0x70a082310000000000000000000000006ebb1c40cd3789e6fc02f003b2416383ea5c96f4\",", - " \"to\": \"0x1712aad2c773ee04bdc9114b32163c058321cd85\",", - " \"value\": \"0x0\"", - " },", - " \"blockHash\": \"0x4d5e647b2d8d3a3c8c1561ebb88734bc5fc3c2941016f810cf218738c0ecd99e\",", - " \"blockNumber\": 11000000,", - " \"result\": {", - " \"gasUsed\": \"0x4fe\",", - " \"output\": \"0x00000000000000000000000000000000000000000000008f2c743f90d4677323\"", - " },", - " \"subtraces\": 0,", - " \"traceAddress\": [", - " 6,", - " 1", - " ],", - " \"transactionHash\": \"0x88162c6bcbb040fd8f6676fe336f83d7a27a5765b18bfce524beea2a0e107159\",", - " \"transactionPosition\": 26,", - " \"type\": \"call\"", - " },", - " {", - " \"action\": {", - " \"callType\": \"staticcall\",", - " \"from\": \"0x6ebb1c40cd3789e6fc02f003b2416383ea5c96f4\",", - " \"gas\": \"0xd1a4\",", - " \"input\": \"0x70a082310000000000000000000000006ebb1c40cd3789e6fc02f003b2416383ea5c96f4\",", - " \"to\": \"0xc02aaa39b223fe8d0a0e5c4f27ead9083c756cc2\",", - " \"value\": \"0x0\"", - " },", - " \"blockHash\": \"0x4d5e647b2d8d3a3c8c1561ebb88734bc5fc3c2941016f810cf218738c0ecd99e\",", - " \"blockNumber\": 11000000,", - " \"result\": {", - " \"gasUsed\": \"0x4d2\",", - " \"output\": \"0x00000000000000000000000000000000000000000000000873e55a8eb2609578\"", - " },", - " \"subtraces\": 0,", - " \"traceAddress\": [", - " 6,", - " 2", - " ],", - " \"transactionHash\": \"0x88162c6bcbb040fd8f6676fe336f83d7a27a5765b18bfce524beea2a0e107159\",", - " \"transactionPosition\": 26,", - " \"type\": \"call\"", - " }", - " ],", - " \"id\": \"1\"", - "}", - "", - "pm.test('Has correct result', function() {", - " pm.expect(pm.response.json()).to.be.deep.equal(expected);", - "})", - "" - ], - "type": "text/javascript" - } - } - ], - "request": { - "method": "POST", - "header": [ - { - "key": "Content-Type", - "value": "application/json" - } - ], - "body": { - "mode": "raw", - "raw": "{\n\t\"jsonrpc\":\"2.0\",\n\t\"method\":\"trace_transaction\",\n \"params\":[\"0x88162c6bcbb040fd8f6676fe336f83d7a27a5765b18bfce524beea2a0e107159\"],\n\t\"id\":\"1\"\n}\n", - "options": { - "raw": {} - } - }, - "url": { - "raw": "{{HOST}}", - "host": [ - "{{HOST}}" - ] - } - }, - "response": [] - }, - { - "name": "trace_block", - "event": [ - { - "listen": "test", - "script": { - "id": "181dcd6d-e8cd-4ab4-aae3-db15ad220580", - "exec": [ - "var expected = {", - " \"jsonrpc\": \"2.0\",", - " \"result\": [", - " {", - " \"action\": {", - " \"callType\": \"call\",", - " \"from\": \"0x969837498944ae1dc0dcac2d0c65634c88729b2d\",", - " \"gas\": \"0x4782ec\",", - " \"input\": \"0xc4463c80000000000000000000000000000000000000000000000000000000000000003b000000000000000000000000b656b2a9c3b2416437a811e07466ca712f5a5b5a000000000000000000000000f835a0247b0063c04ef22006ebe57c5f11977cc40000000000000000000000000000000000000000000000000000000000000009000000000000000000000000f35e2cc8e6523d683ed44870f5b7cc785051a77d\",", - " \"to\": \"0xc0ee9db1a9e07ca63e4ff0d5fb6f86bf68d47b89\",", - " \"value\": \"0x775ec7b96add6c8f0\"", - " },", - " \"blockHash\": \"0xcaaa13ce099342d5e1342b04d588d7733093591666af8ef756ce20cf13d16475\",", - " \"blockNumber\": 1718497,", - " \"result\": {", - " \"gasUsed\": \"0x3ee428\",", - " \"output\": \"0x0000000000000000000000000000000000000000000000000000000000000001\"", - " },", - " \"subtraces\": 2,", - " \"traceAddress\": [],", - " \"transactionHash\": \"0x0ec3f2488a93839524add10ea229e773f6bc891b4eb4794c3337d4495263790b\",", - " \"transactionPosition\": 0,", - " \"type\": \"call\"", - " },", - " {", - " \"action\": {", - " \"callType\": \"call\",", - " \"from\": \"0xc0ee9db1a9e07ca63e4ff0d5fb6f86bf68d47b89\",", - " \"gas\": \"0x8fc\",", - " \"input\": \"0x\",", - " \"to\": \"0xf35e2cc8e6523d683ed44870f5b7cc785051a77d\",", - " \"value\": \"0x775ec7b96add6c8f0\"", - " },", - " \"blockHash\": \"0xcaaa13ce099342d5e1342b04d588d7733093591666af8ef756ce20cf13d16475\",", - " \"blockNumber\": 1718497,", - " \"result\": {", - " \"gasUsed\": \"0x0\",", - " \"output\": \"0x\"", - " },", - " \"subtraces\": 0,", - " \"traceAddress\": [", - " 0", - " ],", - " \"transactionHash\": \"0x0ec3f2488a93839524add10ea229e773f6bc891b4eb4794c3337d4495263790b\",", - " \"transactionPosition\": 0,", - " \"type\": \"call\"", - " },", - " {", - " \"action\": {", - " \"callType\": \"call\",", - " \"from\": \"0xc0ee9db1a9e07ca63e4ff0d5fb6f86bf68d47b89\",", - " \"gas\": \"0x4567ba\",", - " \"input\": \"0x82661dc4000000000000000000000000000000000000000000000000000000000000003b000000000000000000000000b656b2a9c3b2416437a811e07466ca712f5a5b5a\",", - " \"to\": \"0xbb9bc244d798123fde783fcc1c72d3bb8c189413\",", - " \"value\": \"0x0\"", - " },", - " \"blockHash\": \"0xcaaa13ce099342d5e1342b04d588d7733093591666af8ef756ce20cf13d16475\",", - " \"blockNumber\": 1718497,", - " \"result\": {", - " \"gasUsed\": \"0x3d248a\",", - " \"output\": \"0x0000000000000000000000000000000000000000000000000000000000000001\"", - " },", - " \"subtraces\": 5,", - " \"traceAddress\": [", - " 1", - " ],", - " \"transactionHash\": \"0x0ec3f2488a93839524add10ea229e773f6bc891b4eb4794c3337d4495263790b\",", - " \"transactionPosition\": 0,", - " \"type\": \"call\"", - " },", - " {", - " \"action\": {", - " \"callType\": \"call\",", - " \"from\": \"0xbb9bc244d798123fde783fcc1c72d3bb8c189413\",", - " \"gas\": \"0x44f77f\",", - " \"input\": \"0xe2faf044000000000000000000000000b656b2a9c3b2416437a811e07466ca712f5a5b5a000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000057870858\",", - " \"to\": \"0x4a574510c7014e4ae985403536074abe582adfc8\",", - " \"value\": \"0x0\"", - " },", - " \"blockHash\": \"0xcaaa13ce099342d5e1342b04d588d7733093591666af8ef756ce20cf13d16475\",", - " \"blockNumber\": 1718497,", - " \"result\": {", - " \"gasUsed\": \"0x2b21c7\",", - " \"output\": \"0x000000000000000000000000304a554a310c7e546dfe434669c62820b7d83490\"", - " },", - " \"subtraces\": 1,", - " \"traceAddress\": [", - " 1,", - " 0", - " ],", - " \"transactionHash\": \"0x0ec3f2488a93839524add10ea229e773f6bc891b4eb4794c3337d4495263790b\",", - " \"transactionPosition\": 0,", - " \"type\": \"call\"", - " },", - " {", - " \"action\": {", - " \"from\": \"0x4a574510c7014e4ae985403536074abe582adfc8\",", - " \"gas\": \"0x446e14\",", - " \"init\": \"0x606060405260405160c0806132c88339610120604052905160805160a051925160e0516101005193949293828282600f829055601083905560118054610100830261010060a860020a031990911617905560405130906001906101bc8061033e8339600160a060020a03909316908301526101408201526040519081900361016001906000f060128054600160a060020a031916919091179055505060038054600160a060020a03199081168917909155600e80549091168717905550600c84905560405130906000906101bc806104fa8339018083600160a060020a0316815260200182815260200192505050604051809103906000f0600760006101000a815481600160a060020a03021916908302179055503060006040516101bc806106b68339018083600160a060020a0316815260200182815260200192505050604051809103906000f060088054600160a060020a031916919091179055600754600160a060020a03166000141561017557610002565b600854600160a060020a03166000141561018e57610002565b426002556005600190815560008054828255829080158290116101ca57600e0281600e0283600052602060002091820191016101ca9190610245565b50505030600160a060020a03908116600090815260046020526040808220805460ff19908116600190811790925560035490941683529120805490921617905550505050505050612a56806108726000396000f35b5050600060098201819055600a820155600d81018054600160a060020a03191690556001015b8082111561033a578054600160a060020a03191681556000600182810182905560028381018054848255909281161561010002600019011604601f81901061030c57505b506000600383018190556004838101805461ffff19169055600584018290556006840182905560078401805460ff19169055600884018054838255908352602090922061021f929091028101905b8082111561033a5760008082556001820181815560028301919091556003919091018054600160a060020a03191690556102d7565b601f01602090049060005260206000209081019061028991905b8082111561033a5760008155600101610326565b50905660606040818152806101bc833960a090525160805160008054600160a060020a03191690921760a060020a60ff0219167401000000000000000000000000000000000000000090910217815561016290819061005a90396000f3606060405236156100405760e060020a60003504630221038a811461004d57806318bdc79a146100aa5780638da5cb5b146100be578063d2cc718f146100d0575b6100d96001805434019055565b6100db6004356024356000805433600160a060020a0390811691161415806100755750600034115b806100a05750805460a060020a900460ff1680156100a057508054600160a060020a03848116911614155b156100f757610002565b6100db60005460ff60a060020a9091041681565b6100ed600054600160a060020a031681565b6100db60015481565b005b60408051918252519081900360200190f35b6060908152602090f35b600160a060020a0383168260608381818185876185025a03f1925050501561015c57604080518381529051600160a060020a038516917f9735b0cb909f3d21d5c16bbcccd272d85fa11446f6d679f6ecb170d2dabfecfc919081900360200190a25060015b929150505660606040818152806101bc833960a090525160805160008054600160a060020a03191690921760a060020a60ff0219167401000000000000000000000000000000000000000090910217815561016290819061005a90396000f3606060405236156100405760e060020a60003504630221038a811461004d57806318bdc79a146100aa5780638da5cb5b146100be578063d2cc718f146100d0575b6100d96001805434019055565b6100db6004356024356000805433600160a060020a0390811691161415806100755750600034115b806100a05750805460a060020a900460ff1680156100a057508054600160a060020a03848116911614155b156100f757610002565b6100db60005460ff60a060020a9091041681565b6100ed600054600160a060020a031681565b6100db60015481565b005b60408051918252519081900360200190f35b6060908152602090f35b600160a060020a0383168260608381818185876185025a03f1925050501561015c57604080518381529051600160a060020a038516917f9735b0cb909f3d21d5c16bbcccd272d85fa11446f6d679f6ecb170d2dabfecfc919081900360200190a25060015b929150505660606040818152806101bc833960a090525160805160008054600160a060020a03191690921760a060020a60ff0219167401000000000000000000000000000000000000000090910217815561016290819061005a90396000f3606060405236156100405760e060020a60003504630221038a811461004d57806318bdc79a146100aa5780638da5cb5b146100be578063d2cc718f146100d0575b6100d96001805434019055565b6100db6004356024356000805433600160a060020a0390811691161415806100755750600034115b806100a05750805460a060020a900460ff1680156100a057508054600160a060020a03848116911614155b156100f757610002565b6100db60005460ff60a060020a9091041681565b6100ed600054600160a060020a031681565b6100db60015481565b005b60408051918252519081900360200190f35b6060908152602090f35b600160a060020a0383168260608381818185876185025a03f1925050501561015c57604080518381529051600160a060020a038516917f9735b0cb909f3d21d5c16bbcccd272d85fa11446f6d679f6ecb170d2dabfecfc919081900360200190a25060015b92915050566060604052361561020e5760e060020a6000350463013cf08b8114610247578063095ea7b3146102d05780630c3b7b96146103455780630e7082031461034e578063149acf9a1461036057806318160ddd146103725780631f2dc5ef1461037b57806321b5b8dd1461039b578063237e9492146103ad57806323b872dd1461040e5780632632bf2014610441578063341458081461047257806339d1f9081461047b5780634b6753bc146104935780634df6d6cc1461049c5780634e10c3ee146104b7578063590e1ae3146104ca578063612e45a3146104db578063643f7cdd1461057a578063674ed066146105925780636837ff1e1461059b57806370a08231146105e5578063749f98891461060b57806378524b2e1461062457806381f03fcb1461067e57806382661dc41461069657806382bf6464146106b75780638b15a605146106c95780638d7af473146106d257806396d7f3f5146106e1578063a1da2fb9146106ea578063a3912ec814610704578063a9059cbb1461070f578063b7bc2c841461073f578063baac53001461074b578063be7c29c1146107b1578063c9d27afe14610817578063cc9ae3f61461082d578063cdef91d014610841578063dbde198814610859578063dd62ed3e1461087e578063e33734fd146108b2578063e5962195146108c6578063e66f53b7146108de578063eceb2945146108f0578063f8c80d261461094f575b610966600f546000906234bc000142108015610239575060125433600160a060020a03908116911614155b156109785761098033610752565b6109866004356000805482908110156100025750808052600e8202600080516020612a3683398151915201905060038101546004820154600683015460018401548454600786015460058701546009880154600a890154600d8a0154600160a060020a039586169b509599600201989760ff81811698610100909204811697949691951693168c565b61096660043560243533600160a060020a03908116600081815260156020908152604080832094871680845294825280832086905580518681529051929493927f8c5be1e5ebec7d5bd14f71427d1e84f3dd0314c0f7b2291e5b200ac8c7c3b925929181900390910190a35060015b92915050565b61096660105481565b610a7d600754600160a060020a031681565b610a7d600e54600160a060020a031681565b61096660165481565b6109665b60004262127500600f60005054031115610de557506014610983565b610a7d601254600160a060020a031681565b60408051602060248035600481810135601f810185900485028601850190965285855261096695813595919460449492939092019181908401838280828437509496505050505050506000600060006000600060003411156116a857610002565b6109666004356024356044355b60115460009060ff1680156104315750600f5442115b80156124e957506124e78461044b565b6109666000610980335b600160a060020a0381166000908152600b602052604081205481908114156129cb57610b99565b61096660065481565b6109665b600d5430600160a060020a03163103610983565b610966600f5481565b61096660043560046020526000908152604090205460ff1681565b61096660043560243560006124cb610831565b610a9a6000341115610ba457610002565b604080516020604435600481810135601f8101849004840285018401909552848452610966948135946024803595939460649492939101918190840183828082843750506040805160209735808a0135601f81018a90048a0283018a01909352828252969897608497919650602491909101945090925082915084018382808284375094965050933593505060a435915050600060006110c1336105ec565b61096660043560096020526000908152604090205481565b61096660015481565b610a9a60043530600160a060020a031633600160a060020a03161415806105db5750600160a060020a03811660009081526004602052604090205460ff16155b156121cb576121c8565b6109666004355b600160a060020a0381166000908152601460205260409020545b919050565b6109666004356024356000600034111561259957610002565b610966600062e6b680420360026000505410806106505750600354600160a060020a0390811633909116145b80156106645750600254621274ff19420190105b156126145750426002908155600180549091028155610983565b610966600435600a6020526000908152604090205481565b610966600435602435600060006000600060006000341115611ba157610002565b610a7d600854600160a060020a031681565b610966600c5481565b61096660005460001901610983565b61096660025481565b61096660043560006000600060003411156121fc57610002565b6109665b6001610983565b6109666004356024355b60115460009060ff16801561072f5750600f5442115b801561248757506124853361044b565b61096660115460ff1681565b6109666004355b60006000600f600050544210801561076a5750600034115b80156107a457506011546101009004600160a060020a0316600014806107a457506011546101009004600160a060020a0390811633909116145b15610b9f57610a9c61037f565b610a7d600435600060006000508281548110156100025750508080527f290decd9548b62a8d60345a988386fc84ba6bc95484008f6362f93160ef3e56b600e83020180548290811015610002575081526020902060030154600160a060020a0316610606565b61096660043560243560006000610e1b336105ec565b6109665b6000600034111561247c57610002565b61096660043560056020526000908152604090205481565b610966600435602435604435600061252f845b6000600060003411156127ac57610002565b610966600435602435600160a060020a0382811660009081526015602090815260408083209385168352929052205461033f565b610a9a600435600034111561254557610002565b610966600435600b6020526000908152604090205481565b610a7d600354600160a060020a031681565b604080516020606435600481810135601f81018490048402850184019095528484526109669481359460248035956044359560849492019190819084018382808284375094965050505050505060006000600034111561103257610002565b610a7d6011546101009004600160a060020a031681565b60408051918252519081900360200190f35b610980610708565b90505b90565b604051808d600160a060020a031681526020018c8152602001806020018b81526020018a815260200189815260200188815260200187815260200186815260200185815260200184815260200183600160a060020a0316815260200182810382528c818154600181600116156101000203166002900481526020019150805460018160011615610100020316600290048015610a635780601f10610a3857610100808354040283529160200191610a63565b820191906000526020600020905b815481529060010190602001808311610a4657829003601f168201915b50509d505050505050505050505050505060405180910390f35b60408051600160a060020a03929092168252519081900360200190f35b005b604051601254601434908102939093049350600160a060020a03169183900390600081818185876185025a03f150505050600160a060020a038316600081815260146020908152604080832080548601905560168054860190556013825291829020805434019055815184815291517fdbccb92686efceafb9bb7e0394df7f58f71b954061b81afb57109bf247d3d75a9281900390910190a260105460165410801590610b4c575060115460ff16155b15610b94576011805460ff1916600117905560165460408051918252517ff381a3e2428fdda36615919e8d9c35878d9eb0cf85ac6edf575088e80e4c147e9181900360200190a15b600191505b50919050565b610002565b600f5442118015610bb8575060115460ff16155b15610de357601260009054906101000a9004600160a060020a0316600160a060020a031663d2cc718f6040518160e060020a0281526004018090506020604051808303816000876161da5a03f1156100025750506040516012549051600160a060020a039190911631109050610cc9576040805160125460e060020a63d2cc718f0282529151600160a060020a039290921691630221038a913091849163d2cc718f91600482810192602092919082900301816000876161da5a03f11561000257505060408051805160e160020a63011081c5028252600160a060020a039490941660048201526024810193909352516044838101936020935082900301816000876161da5a03f115610002575050505b33600160a060020a0316600081815260136020526040808220549051909181818185876185025a03f19250505015610de35733600160a060020a03167fbb28353e4598c3b9199101a66e0989549b659a59a54d2c27fbb183f1932c8e6d6013600050600033600160a060020a03168152602001908152602001600020600050546040518082815260200191505060405180910390a26014600050600033600160a060020a0316815260200190815260200160002060005054601660008282825054039250508190555060006014600050600033600160a060020a031681526020019081526020016000206000508190555060006013600050600033600160a060020a03168152602001908152602001600020600050819055505b565b4262054600600f60005054031115610e13576201518062127500600f60005054034203046014019050610983565b50601e610983565b60001415610e2857610002565b6000341115610e3657610002565b6000805485908110156100025750600160a060020a03331681527f290decd9548b62a8d60345a988386fc84ba6bc95484008f6362f93160ef3e56e600e8602908101602052604090912054600080516020612a3683398151915291909101915060ff1680610eb05750600c810160205260406000205460ff165b80610ebf575060038101544210155b15610ec957610002565b8215610f0f5733600160a060020a03166000908152601460209081526040808320546009850180549091019055600b84019091529020805460ff19166001179055610f4b565b33600160a060020a0316600090815260146020908152604080832054600a850180549091019055600c84019091529020805460ff191660011790555b33600160a060020a03166000908152600b60205260408120541415610f77576040600020849055610feb565b33600160a060020a03166000908152600b60205260408120548154811015610002579080527f290decd9548b62a8d60345a988386fc84ba6bc95484008f6362f93160ef3e566600e909102015460038201541115610feb5733600160a060020a03166000908152600b602052604090208490555b60408051848152905133600160a060020a03169186917f86abfce99b7dd908bec0169288797f85049ec73cbe046ed9de818fab3a497ae09181900360200190a35092915050565b6000805487908110156100025750808052600e8702600080516020612a3683398151915201905090508484846040518084600160a060020a0316606060020a0281526014018381526020018280519060200190808383829060006004602084601f0104600f02600301f15090500193505050506040518091039020816005016000505414915050949350505050565b600014156110ce57610002565b82801561111857508660001415806110e857508451600014155b806111005750600354600160a060020a038981169116145b8061110b5750600034115b80611118575062093a8084105b1561112257610002565b8215801561114257506111348861115c565b158061114257506212750084105b156111fe57610002565b83546118e590600160a060020a03165b600160a060020a03811660009081526004602052604081205460ff16806111f15750601254600160a060020a039081169083161480156111f15750601260009054906101000a9004600160a060020a0316600160a060020a031663d2cc718f6040518160e060020a0281526004018090506020604051808303816000876161da5a03f115610002575050604051516006541190505b156129a157506001610606565b6249d40084111561120e57610002565b60115460ff1615806112215750600f5442105b806112365750600c5434108015611236575082155b1561124057610002565b42844201101561124f57610002565b30600160a060020a031633600160a060020a0316141561126e57610002565b60008054600181018083559091908280158290116112a557600e0281600e0283600052602060002091820191016112a5919061136a565b505060008054929450918491508110156100025750808052600e8302600080516020612a368339815191520190508054600160a060020a031916891781556001818101899055875160028084018054600082815260209081902096975091959481161561010002600019011691909104601f908101829004840193918b019083901061146257805160ff19168380011785555b5061149292915061144a565b5050600060098201819055600a820155600d81018054600160a060020a03191690556001015b8082111561145e578054600160a060020a03191681556000600182810182905560028084018054848255909281161561010002600019011604601f81901061143057505b506000600383018190556004808401805461ffff19169055600584018290556006840182905560078401805460ff191690556008840180548382559083526020909220611344929091028101905b8082111561145e57600080825560018201818155600283019190915560039091018054600160a060020a03191690556113fc565b601f0160209004906000526020600020908101906113ae91905b8082111561145e576000815560010161144a565b5090565b82800160010185558215611338579182015b82811115611338578251826000505591602001919060010190611474565b50508787866040518084600160a060020a0316606060020a0281526014018381526020018280519060200190808383829060006004602084601f0104600f02600301f150905001935050505060405180910390208160050160005081905550834201816003016000508190555060018160040160006101000a81548160ff02191690830217905550828160070160006101000a81548160ff02191690830217905550821561157857600881018054600181018083559091908280158290116115735760040281600402836000526020600020918201910161157391906113fc565b505050505b600d8082018054600160a060020a031916331790553460068301819055815401905560408051600160a060020a038a16815260208181018a9052918101859052608060608201818152895191830191909152885185937f5790de2c279e58269b93b12828f56fd5f2bc8ad15e61ce08572585c81a38756f938d938d938a938e93929160a084019185810191908190849082908590600090600490601f850104600f02600301f150905090810190601f1680156116485780820380516001836020036101000a031916815260200191505b509550505050505060405180910390a2509695505050505050565b6040805186815260208101839052815189927fdfc78bdca8e3e0b18c16c5c99323c6cb9eb5e00afde190b4e7273f5158702b07928290030190a25b5050505092915050565b6000805488908110156100025750808052600e8802600080516020612a36833981519152019050600781015490945060ff166116e757620d2f006116ec565b622398805b600485015490935060ff16801561170857506003840154830142115b15611716576117b887611890565b600384015442108061172d5750600484015460ff16155b806117ae57508360000160009054906101000a9004600160a060020a03168460010160005054876040518084600160a060020a0316606060020a0281526014018381526020018280519060200190808383829060006004602084601f0104600f02600301f15090500193505050506040518091039020846005016000505414155b1561114c57610002565b61169e565b60048401805461ff001916610100179055835460019550600160a060020a03908116309091161480159061180157508354600754600160a060020a03908116911614155b801561181d57506008548454600160a060020a03908116911614155b801561183957508354601254600160a060020a03908116911614155b801561185557506003548454600160a060020a03908116911614155b1561188b5760018401805430600160a060020a031660009081526005602052604090208054919091019055546006805490910190555b611663875b6000600060005082815481101561000257908052600e02600080516020612a36833981519152018150600481015490915060ff16156118d757600d80546006830154900390555b600401805460ff1916905550565b15156118f45761190087611890565b6001915061193161047f565b604051600d8501546006860154600160a060020a0391909116916000919082818181858883f193505050505061169e565b6001850154111561194157600091505b50600a8301546009840154865191019060049010801590611986575085600081518110156100025790602001015160f860020a900460f860020a02606860f860020a02145b80156119b6575085600181518110156100025790602001015160f860020a900460f860020a02603760f860020a02145b80156119e6575085600281518110156100025790602001015160f860020a900460f860020a0260ff60f860020a02145b8015611a16575085600381518110156100025790602001015160f860020a900460f860020a02601e60f860020a02145b8015611a45575030600160a060020a0316600090815260056020526040902054611a4290611a5d61047f565b81105b15611a4f57600091505b6001840154611a8090611a5f565b015b30600160a060020a03166000908152600560205260408120546129a961047f565b8110611ad457604051600d8501546006860154600160a060020a0391909116916000919082818181858883f193505050501515611abc57610002565b4260025560165460059004811115611ad45760056001555b6001840154611ae290611a5f565b8110158015611af85750600a8401546009850154115b8015611b015750815b1561188b578360000160009054906101000a9004600160a060020a0316600160a060020a0316846001016000505487604051808280519060200190808383829060006004602084601f0104600f02600301f150905090810190601f168015611b7d5780820380516001836020036101000a031916815260200191505b5091505060006040518083038185876185025a03f19250505015156117bd57610002565b611baa336105ec565b60001415611bb757610002565b60008054889081101561000257508052600e87027f290decd9548b62a8d60345a988386fc84ba6bc95484008f6362f93160ef3e566810154600080516020612a36833981519152919091019450421080611c1957506003840154622398800142115b80611c3257508354600160a060020a0390811690871614155b80611c425750600784015460ff16155b80611c68575033600160a060020a03166000908152600b8501602052604090205460ff16155b80611c9c575033600160a060020a03166000908152600b60205260409020548714801590611c9c5750604060009081205414155b15611ca657610002565b600884018054600090811015610002579081526020812060030154600160a060020a03161415611e1257611efc86604051600090600160a060020a038316907f9046fefd66f538ab35263248a44217dcb70e2eb2cd136629e141b8b8f9f03b60908390a260408051600e547fe2faf044000000000000000000000000000000000000000000000000000000008252600160a060020a03858116600484015260248301859052604483018590526223988042016064840152925192169163e2faf04491608480820192602092909190829003018187876161da5a03f1156100025750506040515191506106069050565b6008850180546000908110156100025781815260208082209390935530600160a060020a031681526005909252604082205481549092908110156100025790815260208120905060020155601654600885018054600090811015610002579081526020812090506001015560048401805461ff0019166101001790555b6008840180546000908110156100025781548282526020822060010154929190811015610002579081526020812090505433600160a060020a031660009081526014602052604081205460088801805493909102939093049550908110156100025790815260208120905060030154604080517fbaac530000000000000000000000000000000000000000000000000000000000815233600160a060020a0390811660048301529151929091169163baac53009186916024808301926020929190829003018185886185025a03f11561000257505060405151600014159150611f78905057610002565b60088501805460009081101561000257818152602081206003018054600160a060020a03191690931790925580549091908110156100025790815260208120905060030154600160a060020a031660001415611f5757610002565b600d5430600160a060020a0316311015611f7057610002565b611d9561047f565b6008840180546000908110156100025781548282526020822060010154929190811015610002579081526020812090506002015433600160a060020a0390811660009081526014602090815260408083205430909416835260058083528184205460099093529083205460088b018054969095029690960497509487020494508593929091908290811015610002575260208120815060030154600160a060020a0390811682526020828101939093526040918201600090812080549095019094553016835260059091529020548290101561205357610002565b30600160a060020a031660009081526005602052604081208054849003905560088501805483926009929091829081101561000257508152602080822060030154600160a060020a039081168352929052604080822080549094019093553090911681522054819010156120c657610002565b30600160a060020a0390811660009081526009602090815260408083208054869003905533909316808352601482528383205484519081529351929390927fddf252ad1be2c89b69c2b068fc378daa952ba7f163c4a11628f55a4df523b3ef929181900390910190a36121383361086c565b5033600160a060020a03166000908152601460209081526040808320805460168054919091039055839055600a9091528120556001945061169e565b30600160a060020a0390811660008181526005602090815260408083208054958716808552828520805490970190965584845283905560099091528082208054948352908220805490940190935590815290555b50565b604051600160a060020a0382811691309091163190600081818185876185025a03f192505050151561217457610002565b33600160a060020a03818116600090815260096020908152604080832054815160065460085460e060020a63d2cc718f028352935197995091969195929092169363d2cc718f936004848101949193929183900301908290876161da5a03f11561000257505050604051805190602001506005600050600033600160a060020a03168152602001908152602001600020600050540204101561229d57610002565b600160a060020a03338116600090815260096020908152604080832054815160065460085460e060020a63d2cc718f02835293519296909593169363d2cc718f93600483810194929383900301908290876161da5a03f11561000257505050604051805190602001506005600050600033600160a060020a0316815260200190815260200160002060005054020403905083156123ec57600860009054906101000a9004600160a060020a0316600160a060020a0316630221038a83600160a060020a0316630e7082036040518160e060020a0281526004018090506020604051808303816000876161da5a03f11561000257505060408051805160e160020a63011081c5028252600160a060020a031660048201526024810186905290516044808301935060209282900301816000876161da5a03f115610002575050604051511515905061245457610002565b6040805160085460e160020a63011081c5028252600160a060020a038581166004840152602483018590529251921691630221038a9160448082019260209290919082900301816000876161da5a03f115610002575050604051511515905061245457610002565b600160a060020a03331660009081526009602052604090208054909101905550600192915050565b6109803361086c565b155b80156124a257506124a23384845b6000600061293a856105ec565b80156124be57506124be83836000600034111561261c57610002565b15610b9f5750600161033f565b15156124d657610002565b6124e08383610719565b905061033f565b155b80156124fb57506124fb848484612495565b80156125185750612518848484600060003411156126c157610002565b15610b9f57506001612528565b90505b9392505050565b151561253a57610002565b61252584848461041b565b30600160a060020a031633600160a060020a031614158061258a575030600160a060020a031660009081526005602052604090205460649061258561047f565b010481115b1561259457610002565b600c55565b600354600160a060020a0390811633909116146125b557610002565b600160a060020a038316600081815260046020908152604091829020805460ff191686179055815185815291517f73ad2a153c8b67991df9459024950b318a609782cee8c7eeda47b905f9baa91f9281900390910190a250600161033f565b506000610983565b33600160a060020a03166000908152601460205260409020548290108015906126455750600082115b156126b957600160a060020a03338116600081815260146020908152604080832080548890039055938716808352918490208054870190558351868152935191937fddf252ad1be2c89b69c2b068fc378daa952ba7f163c4a11628f55a4df523b3ef929081900390910190a350600161033f565b50600061033f565b600160a060020a03841660009081526014602052604090205482901080159061270a5750601560209081526040600081812033600160a060020a03168252909252902054829010155b80156127165750600082115b156127a457600160a060020a03838116600081815260146020908152604080832080548801905588851680845281842080548990039055601583528184203390961684529482529182902080548790039055815186815291519293927fddf252ad1be2c89b69c2b068fc378daa952ba7f163c4a11628f55a4df523b3ef9281900390910190a3506001612528565b506000612528565b600160a060020a038381166000908152600a6020908152604080832054601654600754835160e060020a63d2cc718f02815293519296919591169363d2cc718f9360048181019492939183900301908290876161da5a03f11561000257505060405151905061281a866105ec565b0204101561282757610002565b600160a060020a038381166000908152600a6020908152604080832054601654600754835160e060020a63d2cc718f02815293519296919591169363d2cc718f9360048181019492939183900301908290876161da5a03f115610002575050604051519050612895866105ec565b0204039050600760009054906101000a9004600160a060020a0316600160a060020a0316630221038a84836040518360e060020a0281526004018083600160a060020a03168152602001828152602001925050506020604051808303816000876161da5a03f115610002575050604051511515905061291357610002565b600160a060020a0383166000908152600a6020526040902080548201905560019150610b99565b600160a060020a0386166000908152600a602052604090205480850291909104915081111561296857610002565b600160a060020a038581166000908152600a60205260408082208054859003905591861681522080548201905560019150509392505050565b506000610606565b0160030260166000505483020460016000505460166000505404019050610606565b600160a060020a0383166000908152600b6020526040812054815481101561000257818052600e02600080516020612a368339815191520190506003810154909150421115610b9457600160a060020a0383166000908152600b602052604081208190559150610b9956290decd9548b62a8d60345a988386fc84ba6bc95484008f6362f93160ef3e563000000000000000000000000b656b2a9c3b2416437a811e07466ca712f5a5b5a0000000000000000000000004a574510c7014e4ae985403536074abe582adfc8000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000057870858000000000000000000000000bb9bc244d798123fde783fcc1c72d3bb8c189413\",", - " \"value\": \"0x0\"", - " },", - " \"blockHash\": \"0xcaaa13ce099342d5e1342b04d588d7733093591666af8ef756ce20cf13d16475\",", - " \"blockNumber\": 1718497,", - " \"result\": {", - " \"address\": \"0x304a554a310c7e546dfe434669c62820b7d83490\",", - " \"code\": \"0x6060604052361561020e5760e060020a6000350463013cf08b8114610247578063095ea7b3146102d05780630c3b7b96146103455780630e7082031461034e578063149acf9a1461036057806318160ddd146103725780631f2dc5ef1461037b57806321b5b8dd1461039b578063237e9492146103ad57806323b872dd1461040e5780632632bf2014610441578063341458081461047257806339d1f9081461047b5780634b6753bc146104935780634df6d6cc1461049c5780634e10c3ee146104b7578063590e1ae3146104ca578063612e45a3146104db578063643f7cdd1461057a578063674ed066146105925780636837ff1e1461059b57806370a08231146105e5578063749f98891461060b57806378524b2e1461062457806381f03fcb1461067e57806382661dc41461069657806382bf6464146106b75780638b15a605146106c95780638d7af473146106d257806396d7f3f5146106e1578063a1da2fb9146106ea578063a3912ec814610704578063a9059cbb1461070f578063b7bc2c841461073f578063baac53001461074b578063be7c29c1146107b1578063c9d27afe14610817578063cc9ae3f61461082d578063cdef91d014610841578063dbde198814610859578063dd62ed3e1461087e578063e33734fd146108b2578063e5962195146108c6578063e66f53b7146108de578063eceb2945146108f0578063f8c80d261461094f575b610966600f546000906234bc000142108015610239575060125433600160a060020a03908116911614155b156109785761098033610752565b6109866004356000805482908110156100025750808052600e8202600080516020612a3683398151915201905060038101546004820154600683015460018401548454600786015460058701546009880154600a890154600d8a0154600160a060020a039586169b509599600201989760ff81811698610100909204811697949691951693168c565b61096660043560243533600160a060020a03908116600081815260156020908152604080832094871680845294825280832086905580518681529051929493927f8c5be1e5ebec7d5bd14f71427d1e84f3dd0314c0f7b2291e5b200ac8c7c3b925929181900390910190a35060015b92915050565b61096660105481565b610a7d600754600160a060020a031681565b610a7d600e54600160a060020a031681565b61096660165481565b6109665b60004262127500600f60005054031115610de557506014610983565b610a7d601254600160a060020a031681565b60408051602060248035600481810135601f810185900485028601850190965285855261096695813595919460449492939092019181908401838280828437509496505050505050506000600060006000600060003411156116a857610002565b6109666004356024356044355b60115460009060ff1680156104315750600f5442115b80156124e957506124e78461044b565b6109666000610980335b600160a060020a0381166000908152600b602052604081205481908114156129cb57610b99565b61096660065481565b6109665b600d5430600160a060020a03163103610983565b610966600f5481565b61096660043560046020526000908152604090205460ff1681565b61096660043560243560006124cb610831565b610a9a6000341115610ba457610002565b604080516020604435600481810135601f8101849004840285018401909552848452610966948135946024803595939460649492939101918190840183828082843750506040805160209735808a0135601f81018a90048a0283018a01909352828252969897608497919650602491909101945090925082915084018382808284375094965050933593505060a435915050600060006110c1336105ec565b61096660043560096020526000908152604090205481565b61096660015481565b610a9a60043530600160a060020a031633600160a060020a03161415806105db5750600160a060020a03811660009081526004602052604090205460ff16155b156121cb576121c8565b6109666004355b600160a060020a0381166000908152601460205260409020545b919050565b6109666004356024356000600034111561259957610002565b610966600062e6b680420360026000505410806106505750600354600160a060020a0390811633909116145b80156106645750600254621274ff19420190105b156126145750426002908155600180549091028155610983565b610966600435600a6020526000908152604090205481565b610966600435602435600060006000600060006000341115611ba157610002565b610a7d600854600160a060020a031681565b610966600c5481565b61096660005460001901610983565b61096660025481565b61096660043560006000600060003411156121fc57610002565b6109665b6001610983565b6109666004356024355b60115460009060ff16801561072f5750600f5442115b801561248757506124853361044b565b61096660115460ff1681565b6109666004355b60006000600f600050544210801561076a5750600034115b80156107a457506011546101009004600160a060020a0316600014806107a457506011546101009004600160a060020a0390811633909116145b15610b9f57610a9c61037f565b610a7d600435600060006000508281548110156100025750508080527f290decd9548b62a8d60345a988386fc84ba6bc95484008f6362f93160ef3e56b600e83020180548290811015610002575081526020902060030154600160a060020a0316610606565b61096660043560243560006000610e1b336105ec565b6109665b6000600034111561247c57610002565b61096660043560056020526000908152604090205481565b610966600435602435604435600061252f845b6000600060003411156127ac57610002565b610966600435602435600160a060020a0382811660009081526015602090815260408083209385168352929052205461033f565b610a9a600435600034111561254557610002565b610966600435600b6020526000908152604090205481565b610a7d600354600160a060020a031681565b604080516020606435600481810135601f81018490048402850184019095528484526109669481359460248035956044359560849492019190819084018382808284375094965050505050505060006000600034111561103257610002565b610a7d6011546101009004600160a060020a031681565b60408051918252519081900360200190f35b610980610708565b90505b90565b604051808d600160a060020a031681526020018c8152602001806020018b81526020018a815260200189815260200188815260200187815260200186815260200185815260200184815260200183600160a060020a0316815260200182810382528c818154600181600116156101000203166002900481526020019150805460018160011615610100020316600290048015610a635780601f10610a3857610100808354040283529160200191610a63565b820191906000526020600020905b815481529060010190602001808311610a4657829003601f168201915b50509d505050505050505050505050505060405180910390f35b60408051600160a060020a03929092168252519081900360200190f35b005b604051601254601434908102939093049350600160a060020a03169183900390600081818185876185025a03f150505050600160a060020a038316600081815260146020908152604080832080548601905560168054860190556013825291829020805434019055815184815291517fdbccb92686efceafb9bb7e0394df7f58f71b954061b81afb57109bf247d3d75a9281900390910190a260105460165410801590610b4c575060115460ff16155b15610b94576011805460ff1916600117905560165460408051918252517ff381a3e2428fdda36615919e8d9c35878d9eb0cf85ac6edf575088e80e4c147e9181900360200190a15b600191505b50919050565b610002565b600f5442118015610bb8575060115460ff16155b15610de357601260009054906101000a9004600160a060020a0316600160a060020a031663d2cc718f6040518160e060020a0281526004018090506020604051808303816000876161da5a03f1156100025750506040516012549051600160a060020a039190911631109050610cc9576040805160125460e060020a63d2cc718f0282529151600160a060020a039290921691630221038a913091849163d2cc718f91600482810192602092919082900301816000876161da5a03f11561000257505060408051805160e160020a63011081c5028252600160a060020a039490941660048201526024810193909352516044838101936020935082900301816000876161da5a03f115610002575050505b33600160a060020a0316600081815260136020526040808220549051909181818185876185025a03f19250505015610de35733600160a060020a03167fbb28353e4598c3b9199101a66e0989549b659a59a54d2c27fbb183f1932c8e6d6013600050600033600160a060020a03168152602001908152602001600020600050546040518082815260200191505060405180910390a26014600050600033600160a060020a0316815260200190815260200160002060005054601660008282825054039250508190555060006014600050600033600160a060020a031681526020019081526020016000206000508190555060006013600050600033600160a060020a03168152602001908152602001600020600050819055505b565b4262054600600f60005054031115610e13576201518062127500600f60005054034203046014019050610983565b50601e610983565b60001415610e2857610002565b6000341115610e3657610002565b6000805485908110156100025750600160a060020a03331681527f290decd9548b62a8d60345a988386fc84ba6bc95484008f6362f93160ef3e56e600e8602908101602052604090912054600080516020612a3683398151915291909101915060ff1680610eb05750600c810160205260406000205460ff165b80610ebf575060038101544210155b15610ec957610002565b8215610f0f5733600160a060020a03166000908152601460209081526040808320546009850180549091019055600b84019091529020805460ff19166001179055610f4b565b33600160a060020a0316600090815260146020908152604080832054600a850180549091019055600c84019091529020805460ff191660011790555b33600160a060020a03166000908152600b60205260408120541415610f77576040600020849055610feb565b33600160a060020a03166000908152600b60205260408120548154811015610002579080527f290decd9548b62a8d60345a988386fc84ba6bc95484008f6362f93160ef3e566600e909102015460038201541115610feb5733600160a060020a03166000908152600b602052604090208490555b60408051848152905133600160a060020a03169186917f86abfce99b7dd908bec0169288797f85049ec73cbe046ed9de818fab3a497ae09181900360200190a35092915050565b6000805487908110156100025750808052600e8702600080516020612a3683398151915201905090508484846040518084600160a060020a0316606060020a0281526014018381526020018280519060200190808383829060006004602084601f0104600f02600301f15090500193505050506040518091039020816005016000505414915050949350505050565b600014156110ce57610002565b82801561111857508660001415806110e857508451600014155b806111005750600354600160a060020a038981169116145b8061110b5750600034115b80611118575062093a8084105b1561112257610002565b8215801561114257506111348861115c565b158061114257506212750084105b156111fe57610002565b83546118e590600160a060020a03165b600160a060020a03811660009081526004602052604081205460ff16806111f15750601254600160a060020a039081169083161480156111f15750601260009054906101000a9004600160a060020a0316600160a060020a031663d2cc718f6040518160e060020a0281526004018090506020604051808303816000876161da5a03f115610002575050604051516006541190505b156129a157506001610606565b6249d40084111561120e57610002565b60115460ff1615806112215750600f5442105b806112365750600c5434108015611236575082155b1561124057610002565b42844201101561124f57610002565b30600160a060020a031633600160a060020a0316141561126e57610002565b60008054600181018083559091908280158290116112a557600e0281600e0283600052602060002091820191016112a5919061136a565b505060008054929450918491508110156100025750808052600e8302600080516020612a368339815191520190508054600160a060020a031916891781556001818101899055875160028084018054600082815260209081902096975091959481161561010002600019011691909104601f908101829004840193918b019083901061146257805160ff19168380011785555b5061149292915061144a565b5050600060098201819055600a820155600d81018054600160a060020a03191690556001015b8082111561145e578054600160a060020a03191681556000600182810182905560028084018054848255909281161561010002600019011604601f81901061143057505b506000600383018190556004808401805461ffff19169055600584018290556006840182905560078401805460ff191690556008840180548382559083526020909220611344929091028101905b8082111561145e57600080825560018201818155600283019190915560039091018054600160a060020a03191690556113fc565b601f0160209004906000526020600020908101906113ae91905b8082111561145e576000815560010161144a565b5090565b82800160010185558215611338579182015b82811115611338578251826000505591602001919060010190611474565b50508787866040518084600160a060020a0316606060020a0281526014018381526020018280519060200190808383829060006004602084601f0104600f02600301f150905001935050505060405180910390208160050160005081905550834201816003016000508190555060018160040160006101000a81548160ff02191690830217905550828160070160006101000a81548160ff02191690830217905550821561157857600881018054600181018083559091908280158290116115735760040281600402836000526020600020918201910161157391906113fc565b505050505b600d8082018054600160a060020a031916331790553460068301819055815401905560408051600160a060020a038a16815260208181018a9052918101859052608060608201818152895191830191909152885185937f5790de2c279e58269b93b12828f56fd5f2bc8ad15e61ce08572585c81a38756f938d938d938a938e93929160a084019185810191908190849082908590600090600490601f850104600f02600301f150905090810190601f1680156116485780820380516001836020036101000a031916815260200191505b509550505050505060405180910390a2509695505050505050565b6040805186815260208101839052815189927fdfc78bdca8e3e0b18c16c5c99323c6cb9eb5e00afde190b4e7273f5158702b07928290030190a25b5050505092915050565b6000805488908110156100025750808052600e8802600080516020612a36833981519152019050600781015490945060ff166116e757620d2f006116ec565b622398805b600485015490935060ff16801561170857506003840154830142115b15611716576117b887611890565b600384015442108061172d5750600484015460ff16155b806117ae57508360000160009054906101000a9004600160a060020a03168460010160005054876040518084600160a060020a0316606060020a0281526014018381526020018280519060200190808383829060006004602084601f0104600f02600301f15090500193505050506040518091039020846005016000505414155b1561114c57610002565b61169e565b60048401805461ff001916610100179055835460019550600160a060020a03908116309091161480159061180157508354600754600160a060020a03908116911614155b801561181d57506008548454600160a060020a03908116911614155b801561183957508354601254600160a060020a03908116911614155b801561185557506003548454600160a060020a03908116911614155b1561188b5760018401805430600160a060020a031660009081526005602052604090208054919091019055546006805490910190555b611663875b6000600060005082815481101561000257908052600e02600080516020612a36833981519152018150600481015490915060ff16156118d757600d80546006830154900390555b600401805460ff1916905550565b15156118f45761190087611890565b6001915061193161047f565b604051600d8501546006860154600160a060020a0391909116916000919082818181858883f193505050505061169e565b6001850154111561194157600091505b50600a8301546009840154865191019060049010801590611986575085600081518110156100025790602001015160f860020a900460f860020a02606860f860020a02145b80156119b6575085600181518110156100025790602001015160f860020a900460f860020a02603760f860020a02145b80156119e6575085600281518110156100025790602001015160f860020a900460f860020a0260ff60f860020a02145b8015611a16575085600381518110156100025790602001015160f860020a900460f860020a02601e60f860020a02145b8015611a45575030600160a060020a0316600090815260056020526040902054611a4290611a5d61047f565b81105b15611a4f57600091505b6001840154611a8090611a5f565b015b30600160a060020a03166000908152600560205260408120546129a961047f565b8110611ad457604051600d8501546006860154600160a060020a0391909116916000919082818181858883f193505050501515611abc57610002565b4260025560165460059004811115611ad45760056001555b6001840154611ae290611a5f565b8110158015611af85750600a8401546009850154115b8015611b015750815b1561188b578360000160009054906101000a9004600160a060020a0316600160a060020a0316846001016000505487604051808280519060200190808383829060006004602084601f0104600f02600301f150905090810190601f168015611b7d5780820380516001836020036101000a031916815260200191505b5091505060006040518083038185876185025a03f19250505015156117bd57610002565b611baa336105ec565b60001415611bb757610002565b60008054889081101561000257508052600e87027f290decd9548b62a8d60345a988386fc84ba6bc95484008f6362f93160ef3e566810154600080516020612a36833981519152919091019450421080611c1957506003840154622398800142115b80611c3257508354600160a060020a0390811690871614155b80611c425750600784015460ff16155b80611c68575033600160a060020a03166000908152600b8501602052604090205460ff16155b80611c9c575033600160a060020a03166000908152600b60205260409020548714801590611c9c5750604060009081205414155b15611ca657610002565b600884018054600090811015610002579081526020812060030154600160a060020a03161415611e1257611efc86604051600090600160a060020a038316907f9046fefd66f538ab35263248a44217dcb70e2eb2cd136629e141b8b8f9f03b60908390a260408051600e547fe2faf044000000000000000000000000000000000000000000000000000000008252600160a060020a03858116600484015260248301859052604483018590526223988042016064840152925192169163e2faf04491608480820192602092909190829003018187876161da5a03f1156100025750506040515191506106069050565b6008850180546000908110156100025781815260208082209390935530600160a060020a031681526005909252604082205481549092908110156100025790815260208120905060020155601654600885018054600090811015610002579081526020812090506001015560048401805461ff0019166101001790555b6008840180546000908110156100025781548282526020822060010154929190811015610002579081526020812090505433600160a060020a031660009081526014602052604081205460088801805493909102939093049550908110156100025790815260208120905060030154604080517fbaac530000000000000000000000000000000000000000000000000000000000815233600160a060020a0390811660048301529151929091169163baac53009186916024808301926020929190829003018185886185025a03f11561000257505060405151600014159150611f78905057610002565b60088501805460009081101561000257818152602081206003018054600160a060020a03191690931790925580549091908110156100025790815260208120905060030154600160a060020a031660001415611f5757610002565b600d5430600160a060020a0316311015611f7057610002565b611d9561047f565b6008840180546000908110156100025781548282526020822060010154929190811015610002579081526020812090506002015433600160a060020a0390811660009081526014602090815260408083205430909416835260058083528184205460099093529083205460088b018054969095029690960497509487020494508593929091908290811015610002575260208120815060030154600160a060020a0390811682526020828101939093526040918201600090812080549095019094553016835260059091529020548290101561205357610002565b30600160a060020a031660009081526005602052604081208054849003905560088501805483926009929091829081101561000257508152602080822060030154600160a060020a039081168352929052604080822080549094019093553090911681522054819010156120c657610002565b30600160a060020a0390811660009081526009602090815260408083208054869003905533909316808352601482528383205484519081529351929390927fddf252ad1be2c89b69c2b068fc378daa952ba7f163c4a11628f55a4df523b3ef929181900390910190a36121383361086c565b5033600160a060020a03166000908152601460209081526040808320805460168054919091039055839055600a9091528120556001945061169e565b30600160a060020a0390811660008181526005602090815260408083208054958716808552828520805490970190965584845283905560099091528082208054948352908220805490940190935590815290555b50565b604051600160a060020a0382811691309091163190600081818185876185025a03f192505050151561217457610002565b33600160a060020a03818116600090815260096020908152604080832054815160065460085460e060020a63d2cc718f028352935197995091969195929092169363d2cc718f936004848101949193929183900301908290876161da5a03f11561000257505050604051805190602001506005600050600033600160a060020a03168152602001908152602001600020600050540204101561229d57610002565b600160a060020a03338116600090815260096020908152604080832054815160065460085460e060020a63d2cc718f02835293519296909593169363d2cc718f93600483810194929383900301908290876161da5a03f11561000257505050604051805190602001506005600050600033600160a060020a0316815260200190815260200160002060005054020403905083156123ec57600860009054906101000a9004600160a060020a0316600160a060020a0316630221038a83600160a060020a0316630e7082036040518160e060020a0281526004018090506020604051808303816000876161da5a03f11561000257505060408051805160e160020a63011081c5028252600160a060020a031660048201526024810186905290516044808301935060209282900301816000876161da5a03f115610002575050604051511515905061245457610002565b6040805160085460e160020a63011081c5028252600160a060020a038581166004840152602483018590529251921691630221038a9160448082019260209290919082900301816000876161da5a03f115610002575050604051511515905061245457610002565b600160a060020a03331660009081526009602052604090208054909101905550600192915050565b6109803361086c565b155b80156124a257506124a23384845b6000600061293a856105ec565b80156124be57506124be83836000600034111561261c57610002565b15610b9f5750600161033f565b15156124d657610002565b6124e08383610719565b905061033f565b155b80156124fb57506124fb848484612495565b80156125185750612518848484600060003411156126c157610002565b15610b9f57506001612528565b90505b9392505050565b151561253a57610002565b61252584848461041b565b30600160a060020a031633600160a060020a031614158061258a575030600160a060020a031660009081526005602052604090205460649061258561047f565b010481115b1561259457610002565b600c55565b600354600160a060020a0390811633909116146125b557610002565b600160a060020a038316600081815260046020908152604091829020805460ff191686179055815185815291517f73ad2a153c8b67991df9459024950b318a609782cee8c7eeda47b905f9baa91f9281900390910190a250600161033f565b506000610983565b33600160a060020a03166000908152601460205260409020548290108015906126455750600082115b156126b957600160a060020a03338116600081815260146020908152604080832080548890039055938716808352918490208054870190558351868152935191937fddf252ad1be2c89b69c2b068fc378daa952ba7f163c4a11628f55a4df523b3ef929081900390910190a350600161033f565b50600061033f565b600160a060020a03841660009081526014602052604090205482901080159061270a5750601560209081526040600081812033600160a060020a03168252909252902054829010155b80156127165750600082115b156127a457600160a060020a03838116600081815260146020908152604080832080548801905588851680845281842080548990039055601583528184203390961684529482529182902080548790039055815186815291519293927fddf252ad1be2c89b69c2b068fc378daa952ba7f163c4a11628f55a4df523b3ef9281900390910190a3506001612528565b506000612528565b600160a060020a038381166000908152600a6020908152604080832054601654600754835160e060020a63d2cc718f02815293519296919591169363d2cc718f9360048181019492939183900301908290876161da5a03f11561000257505060405151905061281a866105ec565b0204101561282757610002565b600160a060020a038381166000908152600a6020908152604080832054601654600754835160e060020a63d2cc718f02815293519296919591169363d2cc718f9360048181019492939183900301908290876161da5a03f115610002575050604051519050612895866105ec565b0204039050600760009054906101000a9004600160a060020a0316600160a060020a0316630221038a84836040518360e060020a0281526004018083600160a060020a03168152602001828152602001925050506020604051808303816000876161da5a03f115610002575050604051511515905061291357610002565b600160a060020a0383166000908152600a6020526040902080548201905560019150610b99565b600160a060020a0386166000908152600a602052604090205480850291909104915081111561296857610002565b600160a060020a038581166000908152600a60205260408082208054859003905591861681522080548201905560019150509392505050565b506000610606565b0160030260166000505483020460016000505460166000505404019050610606565b600160a060020a0383166000908152600b6020526040812054815481101561000257818052600e02600080516020612a368339815191520190506003810154909150421115610b9457600160a060020a0383166000908152600b602052604081208190559150610b9956290decd9548b62a8d60345a988386fc84ba6bc95484008f6362f93160ef3e563\",", - " \"gasUsed\": \"0x2a97ef\"", - " },", - " \"subtraces\": 3,", - " \"traceAddress\": [", - " 1,", - " 0,", - " 0", - " ],", - " \"transactionHash\": \"0x0ec3f2488a93839524add10ea229e773f6bc891b4eb4794c3337d4495263790b\",", - " \"transactionPosition\": 0,", - " \"type\": \"create\"", - " },", - " {", - " \"action\": {", - " \"from\": \"0x304a554a310c7e546dfe434669c62820b7d83490\",", - " \"gas\": \"0x433f5e\",", - " \"init\": \"0x60606040818152806101bc833960a090525160805160008054600160a060020a03191690921760a060020a60ff0219167401000000000000000000000000000000000000000090910217815561016290819061005a90396000f3606060405236156100405760e060020a60003504630221038a811461004d57806318bdc79a146100aa5780638da5cb5b146100be578063d2cc718f146100d0575b6100d96001805434019055565b6100db6004356024356000805433600160a060020a0390811691161415806100755750600034115b806100a05750805460a060020a900460ff1680156100a057508054600160a060020a03848116911614155b156100f757610002565b6100db60005460ff60a060020a9091041681565b6100ed600054600160a060020a031681565b6100db60015481565b005b60408051918252519081900360200190f35b6060908152602090f35b600160a060020a0383168260608381818185876185025a03f1925050501561015c57604080518381529051600160a060020a038516917f9735b0cb909f3d21d5c16bbcccd272d85fa11446f6d679f6ecb170d2dabfecfc919081900360200190a25060015b9291505056000000000000000000000000304a554a310c7e546dfe434669c62820b7d834900000000000000000000000000000000000000000000000000000000000000001\",", - " \"value\": \"0x0\"", - " },", - " \"blockHash\": \"0xcaaa13ce099342d5e1342b04d588d7733093591666af8ef756ce20cf13d16475\",", - " \"blockNumber\": 1718497,", - " \"result\": {", - " \"address\": \"0x914d1b8b43e92723e64fd0a06f5bdb8dd9b10c79\",", - " \"code\": \"0x606060405236156100405760e060020a60003504630221038a811461004d57806318bdc79a146100aa5780638da5cb5b146100be578063d2cc718f146100d0575b6100d96001805434019055565b6100db6004356024356000805433600160a060020a0390811691161415806100755750600034115b806100a05750805460a060020a900460ff1680156100a057508054600160a060020a03848116911614155b156100f757610002565b6100db60005460ff60a060020a9091041681565b6100ed600054600160a060020a031681565b6100db60015481565b005b60408051918252519081900360200190f35b6060908152602090f35b600160a060020a0383168260608381818185876185025a03f1925050501561015c57604080518381529051600160a060020a038516917f9735b0cb909f3d21d5c16bbcccd272d85fa11446f6d679f6ecb170d2dabfecfc919081900360200190a25060015b9291505056\",", - " \"gasUsed\": \"0x163e6\"", - " },", - " \"subtraces\": 0,", - " \"traceAddress\": [", - " 1,", - " 0,", - " 0,", - " 0", - " ],", - " \"transactionHash\": \"0x0ec3f2488a93839524add10ea229e773f6bc891b4eb4794c3337d4495263790b\",", - " \"transactionPosition\": 0,", - " \"type\": \"create\"", - " },", - " {", - " \"action\": {", - " \"from\": \"0x304a554a310c7e546dfe434669c62820b7d83490\",", - " \"gas\": \"0x405eab\",", - " \"init\": \"0x60606040818152806101bc833960a090525160805160008054600160a060020a03191690921760a060020a60ff0219167401000000000000000000000000000000000000000090910217815561016290819061005a90396000f3606060405236156100405760e060020a60003504630221038a811461004d57806318bdc79a146100aa5780638da5cb5b146100be578063d2cc718f146100d0575b6100d96001805434019055565b6100db6004356024356000805433600160a060020a0390811691161415806100755750600034115b806100a05750805460a060020a900460ff1680156100a057508054600160a060020a03848116911614155b156100f757610002565b6100db60005460ff60a060020a9091041681565b6100ed600054600160a060020a031681565b6100db60015481565b005b60408051918252519081900360200190f35b6060908152602090f35b600160a060020a0383168260608381818185876185025a03f1925050501561015c57604080518381529051600160a060020a038516917f9735b0cb909f3d21d5c16bbcccd272d85fa11446f6d679f6ecb170d2dabfecfc919081900360200190a25060015b9291505056000000000000000000000000304a554a310c7e546dfe434669c62820b7d834900000000000000000000000000000000000000000000000000000000000000000\",", - " \"value\": \"0x0\"", - " },", - " \"blockHash\": \"0xcaaa13ce099342d5e1342b04d588d7733093591666af8ef756ce20cf13d16475\",", - " \"blockNumber\": 1718497,", - " \"result\": {", - " \"address\": \"0xad3ecf23c0c8983b07163708be6d763b5f056193\",", - " \"code\": \"0x606060405236156100405760e060020a60003504630221038a811461004d57806318bdc79a146100aa5780638da5cb5b146100be578063d2cc718f146100d0575b6100d96001805434019055565b6100db6004356024356000805433600160a060020a0390811691161415806100755750600034115b806100a05750805460a060020a900460ff1680156100a057508054600160a060020a03848116911614155b156100f757610002565b6100db60005460ff60a060020a9091041681565b6100ed600054600160a060020a031681565b6100db60015481565b005b60408051918252519081900360200190f35b6060908152602090f35b600160a060020a0383168260608381818185876185025a03f1925050501561015c57604080518381529051600160a060020a038516917f9735b0cb909f3d21d5c16bbcccd272d85fa11446f6d679f6ecb170d2dabfecfc919081900360200190a25060015b9291505056\",", - " \"gasUsed\": \"0x163e6\"", - " },", - " \"subtraces\": 0,", - " \"traceAddress\": [", - " 1,", - " 0,", - " 0,", - " 1", - " ],", - " \"transactionHash\": \"0x0ec3f2488a93839524add10ea229e773f6bc891b4eb4794c3337d4495263790b\",", - " \"transactionPosition\": 0,", - " \"type\": \"create\"", - " },", - " {", - " \"action\": {", - " \"from\": \"0x304a554a310c7e546dfe434669c62820b7d83490\",", - " \"gas\": \"0x3e2e73\",", - " \"init\": \"0x60606040818152806101bc833960a090525160805160008054600160a060020a03191690921760a060020a60ff0219167401000000000000000000000000000000000000000090910217815561016290819061005a90396000f3606060405236156100405760e060020a60003504630221038a811461004d57806318bdc79a146100aa5780638da5cb5b146100be578063d2cc718f146100d0575b6100d96001805434019055565b6100db6004356024356000805433600160a060020a0390811691161415806100755750600034115b806100a05750805460a060020a900460ff1680156100a057508054600160a060020a03848116911614155b156100f757610002565b6100db60005460ff60a060020a9091041681565b6100ed600054600160a060020a031681565b6100db60015481565b005b60408051918252519081900360200190f35b6060908152602090f35b600160a060020a0383168260608381818185876185025a03f1925050501561015c57604080518381529051600160a060020a038516917f9735b0cb909f3d21d5c16bbcccd272d85fa11446f6d679f6ecb170d2dabfecfc919081900360200190a25060015b9291505056000000000000000000000000304a554a310c7e546dfe434669c62820b7d834900000000000000000000000000000000000000000000000000000000000000000\",", - " \"value\": \"0x0\"", - " },", - " \"blockHash\": \"0xcaaa13ce099342d5e1342b04d588d7733093591666af8ef756ce20cf13d16475\",", - " \"blockNumber\": 1718497,", - " \"result\": {", - " \"address\": \"0x1d29edb6997993a16c5086733cfd735d01df787c\",", - " \"code\": \"0x606060405236156100405760e060020a60003504630221038a811461004d57806318bdc79a146100aa5780638da5cb5b146100be578063d2cc718f146100d0575b6100d96001805434019055565b6100db6004356024356000805433600160a060020a0390811691161415806100755750600034115b806100a05750805460a060020a900460ff1680156100a057508054600160a060020a03848116911614155b156100f757610002565b6100db60005460ff60a060020a9091041681565b6100ed600054600160a060020a031681565b6100db60015481565b005b60408051918252519081900360200190f35b6060908152602090f35b600160a060020a0383168260608381818185876185025a03f1925050501561015c57604080518381529051600160a060020a038516917f9735b0cb909f3d21d5c16bbcccd272d85fa11446f6d679f6ecb170d2dabfecfc919081900360200190a25060015b9291505056\",", - " \"gasUsed\": \"0x163e6\"", - " },", - " \"subtraces\": 0,", - " \"traceAddress\": [", - " 1,", - " 0,", - " 0,", - " 2", - " ],", - " \"transactionHash\": \"0x0ec3f2488a93839524add10ea229e773f6bc891b4eb4794c3337d4495263790b\",", - " \"transactionPosition\": 0,", - " \"type\": \"create\"", - " },", - " {", - " \"action\": {", - " \"callType\": \"call\",", - " \"from\": \"0xbb9bc244d798123fde783fcc1c72d3bb8c189413\",", - " \"gas\": \"0x18a010\",", - " \"input\": \"0xbaac5300000000000000000000000000c0ee9db1a9e07ca63e4ff0d5fb6f86bf68d47b89\",", - " \"to\": \"0x304a554a310c7e546dfe434669c62820b7d83490\",", - " \"value\": \"0xdfd4116684423b208\"", - " },", - " \"blockHash\": \"0xcaaa13ce099342d5e1342b04d588d7733093591666af8ef756ce20cf13d16475\",", - " \"blockNumber\": 1718497,", - " \"result\": {", - " \"gasUsed\": \"0x124c9\",", - " \"output\": \"0x0000000000000000000000000000000000000000000000000000000000000001\"", - " },", - " \"subtraces\": 1,", - " \"traceAddress\": [", - " 1,", - " 1", - " ],", - " \"transactionHash\": \"0x0ec3f2488a93839524add10ea229e773f6bc891b4eb4794c3337d4495263790b\",", - " \"transactionPosition\": 0,", - " \"type\": \"call\"", - " },", - " {", - " \"action\": {", - " \"callType\": \"call\",", - " \"from\": \"0x304a554a310c7e546dfe434669c62820b7d83490\",", - " \"gas\": \"0x181512\",", - " \"input\": \"0x\",", - " \"to\": \"0x914d1b8b43e92723e64fd0a06f5bdb8dd9b10c79\",", - " \"value\": \"0x0\"", - " },", - " \"blockHash\": \"0xcaaa13ce099342d5e1342b04d588d7733093591666af8ef756ce20cf13d16475\",", - " \"blockNumber\": 1718497,", - " \"result\": {", - " \"gasUsed\": \"0x13f9\",", - " \"output\": \"0x\"", - " },", - " \"subtraces\": 0,", - " \"traceAddress\": [", - " 1,", - " 1,", - " 0", - " ],", - " \"transactionHash\": \"0x0ec3f2488a93839524add10ea229e773f6bc891b4eb4794c3337d4495263790b\",", - " \"transactionPosition\": 0,", - " \"type\": \"call\"", - " },", - " {", - " \"action\": {", - " \"callType\": \"call\",", - " \"from\": \"0xbb9bc244d798123fde783fcc1c72d3bb8c189413\",", - " \"gas\": \"0x171a62\",", - " \"input\": \"0xd2cc718f\",", - " \"to\": \"0xd2e16a20dd7b1ae54fb0312209784478d069c7b0\",", - " \"value\": \"0x0\"", - " },", - " \"blockHash\": \"0xcaaa13ce099342d5e1342b04d588d7733093591666af8ef756ce20cf13d16475\",", - " \"blockNumber\": 1718497,", - " \"result\": {", - " \"gasUsed\": \"0x113\",", - " \"output\": \"0x00000000000000000000000000000000000000000000000000038d7ea4c68000\"", - " },", - " \"subtraces\": 0,", - " \"traceAddress\": [", - " 1,", - " 2", - " ],", - " \"transactionHash\": \"0x0ec3f2488a93839524add10ea229e773f6bc891b4eb4794c3337d4495263790b\",", - " \"transactionPosition\": 0,", - " \"type\": \"call\"", - " },", - " {", - " \"action\": {", - " \"callType\": \"call\",", - " \"from\": \"0xbb9bc244d798123fde783fcc1c72d3bb8c189413\",", - " \"gas\": \"0x17168c\",", - " \"input\": \"0xd2cc718f\",", - " \"to\": \"0xd2e16a20dd7b1ae54fb0312209784478d069c7b0\",", - " \"value\": \"0x0\"", - " },", - " \"blockHash\": \"0xcaaa13ce099342d5e1342b04d588d7733093591666af8ef756ce20cf13d16475\",", - " \"blockNumber\": 1718497,", - " \"result\": {", - " \"gasUsed\": \"0x113\",", - " \"output\": \"0x00000000000000000000000000000000000000000000000000038d7ea4c68000\"", - " },", - " \"subtraces\": 0,", - " \"traceAddress\": [", - " 1,", - " 3", - " ],", - " \"transactionHash\": \"0x0ec3f2488a93839524add10ea229e773f6bc891b4eb4794c3337d4495263790b\",", - " \"transactionPosition\": 0,", - " \"type\": \"call\"", - " },", - " {", - " \"action\": {", - " \"callType\": \"call\",", - " \"from\": \"0xbb9bc244d798123fde783fcc1c72d3bb8c189413\",", - " \"gas\": \"0x171316\",", - " \"input\": \"0x0221038a000000000000000000000000c0ee9db1a9e07ca63e4ff0d5fb6f86bf68d47b89000000000000000000000000000000000000000000000000000000052aa8b9ab\",", - " \"to\": \"0xd2e16a20dd7b1ae54fb0312209784478d069c7b0\",", - " \"value\": \"0x0\"", - " },", - " \"blockHash\": \"0xcaaa13ce099342d5e1342b04d588d7733093591666af8ef756ce20cf13d16475\",", - " \"blockNumber\": 1718497,", - " \"result\": {", - " \"gasUsed\": \"0xea64e\",", - " \"output\": \"0x0000000000000000000000000000000000000000000000000000000000000001\"", - " },", - " \"subtraces\": 1,", - " \"traceAddress\": [", - " 1,", - " 4", - " ],", - " \"transactionHash\": \"0x0ec3f2488a93839524add10ea229e773f6bc891b4eb4794c3337d4495263790b\",", - " \"transactionPosition\": 0,", - " \"type\": \"call\"", - " },", - " {", - " \"action\": {", - " \"callType\": \"call\",", - " \"from\": \"0xd2e16a20dd7b1ae54fb0312209784478d069c7b0\",", - " \"gas\": \"0x169540\",", - " \"input\": \"0x\",", - " \"to\": \"0xc0ee9db1a9e07ca63e4ff0d5fb6f86bf68d47b89\",", - " \"value\": \"0x52aa8b9ab\"", - " },", - " \"blockHash\": \"0xcaaa13ce099342d5e1342b04d588d7733093591666af8ef756ce20cf13d16475\",", - " \"blockNumber\": 1718497,", - " \"result\": {", - " \"gasUsed\": \"0xe8407\",", - " \"output\": \"0x0000000000000000000000000000000000000000000000000000000000000001\"", - " },", - " \"subtraces\": 2,", - " \"traceAddress\": [", - " 1,", - " 4,", - " 0", - " ],", - " \"transactionHash\": \"0x0ec3f2488a93839524add10ea229e773f6bc891b4eb4794c3337d4495263790b\",", - " \"transactionPosition\": 0,", - " \"type\": \"call\"", - " },", - " {", - " \"action\": {", - " \"callType\": \"call\",", - " \"from\": \"0xc0ee9db1a9e07ca63e4ff0d5fb6f86bf68d47b89\",", - " \"gas\": \"0x1632a8\",", - " \"input\": \"0x0e708203\",", - " \"to\": \"0xbb9bc244d798123fde783fcc1c72d3bb8c189413\",", - " \"value\": \"0x0\"", - " },", - " \"blockHash\": \"0xcaaa13ce099342d5e1342b04d588d7733093591666af8ef756ce20cf13d16475\",", - " \"blockNumber\": 1718497,", - " \"result\": {", - " \"gasUsed\": \"0x15f\",", - " \"output\": \"0x000000000000000000000000d2e16a20dd7b1ae54fb0312209784478d069c7b0\"", - " },", - " \"subtraces\": 0,", - " \"traceAddress\": [", - " 1,", - " 4,", - " 0,", - " 0", - " ],", - " \"transactionHash\": \"0x0ec3f2488a93839524add10ea229e773f6bc891b4eb4794c3337d4495263790b\",", - " \"transactionPosition\": 0,", - " \"type\": \"call\"", - " },", - " {", - " \"action\": {", - " \"callType\": \"call\",", - " \"from\": \"0xc0ee9db1a9e07ca63e4ff0d5fb6f86bf68d47b89\",", - " \"gas\": \"0x161ae8\",", - " \"input\": \"0x82661dc4000000000000000000000000000000000000000000000000000000000000003b000000000000000000000000b656b2a9c3b2416437a811e07466ca712f5a5b5a\",", - " \"to\": \"0xbb9bc244d798123fde783fcc1c72d3bb8c189413\",", - " \"value\": \"0x0\"", - " },", - " \"blockHash\": \"0xcaaa13ce099342d5e1342b04d588d7733093591666af8ef756ce20cf13d16475\",", - " \"blockNumber\": 1718497,", - " \"result\": {", - " \"gasUsed\": \"0xe6afb\",", - " \"output\": \"0x0000000000000000000000000000000000000000000000000000000000000001\"", - " },", - " \"subtraces\": 4,", - " \"traceAddress\": [", - " 1,", - " 4,", - " 0,", - " 1", - " ],", - " \"transactionHash\": \"0x0ec3f2488a93839524add10ea229e773f6bc891b4eb4794c3337d4495263790b\",", - " \"transactionPosition\": 0,", - " \"type\": \"call\"", - " },", - " {", - " \"action\": {", - " \"callType\": \"call\",", - " \"from\": \"0xbb9bc244d798123fde783fcc1c72d3bb8c189413\",", - " \"gas\": \"0x159290\",", - " \"input\": \"0xbaac5300000000000000000000000000c0ee9db1a9e07ca63e4ff0d5fb6f86bf68d47b89\",", - " \"to\": \"0x304a554a310c7e546dfe434669c62820b7d83490\",", - " \"value\": \"0xdfd4116684423b208\"", - " },", - " \"blockHash\": \"0xcaaa13ce099342d5e1342b04d588d7733093591666af8ef756ce20cf13d16475\",", - " \"blockNumber\": 1718497,", - " \"result\": {", - " \"gasUsed\": \"0x5cdf\",", - " \"output\": \"0x0000000000000000000000000000000000000000000000000000000000000001\"", - " },", - " \"subtraces\": 1,", - " \"traceAddress\": [", - " 1,", - " 4,", - " 0,", - " 1,", - " 0", - " ],", - " \"transactionHash\": \"0x0ec3f2488a93839524add10ea229e773f6bc891b4eb4794c3337d4495263790b\",", - " \"transactionPosition\": 0,", - " \"type\": \"call\"", - " },", - " {", - " \"action\": {", - " \"callType\": \"call\",", - " \"from\": \"0x304a554a310c7e546dfe434669c62820b7d83490\",", - " \"gas\": \"0x150792\",", - " \"input\": \"0x\",", - " \"to\": \"0x914d1b8b43e92723e64fd0a06f5bdb8dd9b10c79\",", - " \"value\": \"0x0\"", - " },", - " \"blockHash\": \"0xcaaa13ce099342d5e1342b04d588d7733093591666af8ef756ce20cf13d16475\",", - " \"blockNumber\": 1718497,", - " \"result\": {", - " \"gasUsed\": \"0x13f9\",", - " \"output\": \"0x\"", - " },", - " \"subtraces\": 0,", - " \"traceAddress\": [", - " 1,", - " 4,", - " 0,", - " 1,", - " 0,", - " 0", - " ],", - " \"transactionHash\": \"0x0ec3f2488a93839524add10ea229e773f6bc891b4eb4794c3337d4495263790b\",", - " \"transactionPosition\": 0,", - " \"type\": \"call\"", - " },", - " {", - " \"action\": {", - " \"callType\": \"call\",", - " \"from\": \"0xbb9bc244d798123fde783fcc1c72d3bb8c189413\",", - " \"gas\": \"0x14d4cc\",", - " \"input\": \"0xd2cc718f\",", - " \"to\": \"0xd2e16a20dd7b1ae54fb0312209784478d069c7b0\",", - " \"value\": \"0x0\"", - " },", - " \"blockHash\": \"0xcaaa13ce099342d5e1342b04d588d7733093591666af8ef756ce20cf13d16475\",", - " \"blockNumber\": 1718497,", - " \"result\": {", - " \"gasUsed\": \"0x113\",", - " \"output\": \"0x00000000000000000000000000000000000000000000000000038d7ea4c68000\"", - " },", - " \"subtraces\": 0,", - " \"traceAddress\": [", - " 1,", - " 4,", - " 0,", - " 1,", - " 1", - " ],", - " \"transactionHash\": \"0x0ec3f2488a93839524add10ea229e773f6bc891b4eb4794c3337d4495263790b\",", - " \"transactionPosition\": 0,", - " \"type\": \"call\"", - " },", - " {", - " \"action\": {", - " \"callType\": \"call\",", - " \"from\": \"0xbb9bc244d798123fde783fcc1c72d3bb8c189413\",", - " \"gas\": \"0x14d0f6\",", - " \"input\": \"0xd2cc718f\",", - " \"to\": \"0xd2e16a20dd7b1ae54fb0312209784478d069c7b0\",", - " \"value\": \"0x0\"", - " },", - " \"blockHash\": \"0xcaaa13ce099342d5e1342b04d588d7733093591666af8ef756ce20cf13d16475\",", - " \"blockNumber\": 1718497,", - " \"result\": {", - " \"gasUsed\": \"0x113\",", - " \"output\": \"0x00000000000000000000000000000000000000000000000000038d7ea4c68000\"", - " },", - " \"subtraces\": 0,", - " \"traceAddress\": [", - " 1,", - " 4,", - " 0,", - " 1,", - " 2", - " ],", - " \"transactionHash\": \"0x0ec3f2488a93839524add10ea229e773f6bc891b4eb4794c3337d4495263790b\",", - " \"transactionPosition\": 0,", - " \"type\": \"call\"", - " },", - " {", - " \"action\": {", - " \"callType\": \"call\",", - " \"from\": \"0xbb9bc244d798123fde783fcc1c72d3bb8c189413\",", - " \"gas\": \"0x14cd7d\",", - " \"input\": \"0x0221038a000000000000000000000000c0ee9db1a9e07ca63e4ff0d5fb6f86bf68d47b89000000000000000000000000000000000000000000000000000000052aa8b9ab\",", - " \"to\": \"0xd2e16a20dd7b1ae54fb0312209784478d069c7b0\",", - " \"value\": \"0x0\"", - " },", - " \"blockHash\": \"0xcaaa13ce099342d5e1342b04d588d7733093591666af8ef756ce20cf13d16475\",", - " \"blockNumber\": 1718497,", - " \"result\": {", - " \"gasUsed\": \"0xcf3f8\",", - " \"output\": \"0x0000000000000000000000000000000000000000000000000000000000000001\"", - " },", - " \"subtraces\": 1,", - " \"traceAddress\": [", - " 1,", - " 4,", - " 0,", - " 1,", - " 3", - " ],", - " \"transactionHash\": \"0x0ec3f2488a93839524add10ea229e773f6bc891b4eb4794c3337d4495263790b\",", - " \"transactionPosition\": 0,", - " \"type\": \"call\"", - " },", - " {", - " \"action\": {", - " \"callType\": \"call\",", - " \"from\": \"0xd2e16a20dd7b1ae54fb0312209784478d069c7b0\",", - " \"gas\": \"0x144fa7\",", - " \"input\": \"0x\",", - " \"to\": \"0xc0ee9db1a9e07ca63e4ff0d5fb6f86bf68d47b89\",", - " \"value\": \"0x52aa8b9ab\"", - " },", - " \"blockHash\": \"0xcaaa13ce099342d5e1342b04d588d7733093591666af8ef756ce20cf13d16475\",", - " \"blockNumber\": 1718497,", - " \"result\": {", - " \"gasUsed\": \"0xcd1b1\",", - " \"output\": \"0x0000000000000000000000000000000000000000000000000000000000000001\"", - " },", - " \"subtraces\": 2,", - " \"traceAddress\": [", - " 1,", - " 4,", - " 0,", - " 1,", - " 3,", - " 0", - " ],", - " \"transactionHash\": \"0x0ec3f2488a93839524add10ea229e773f6bc891b4eb4794c3337d4495263790b\",", - " \"transactionPosition\": 0,", - " \"type\": \"call\"", - " },", - " {", - " \"action\": {", - " \"callType\": \"call\",", - " \"from\": \"0xc0ee9db1a9e07ca63e4ff0d5fb6f86bf68d47b89\",", - " \"gas\": \"0x13ed0f\",", - " \"input\": \"0x0e708203\",", - " \"to\": \"0xbb9bc244d798123fde783fcc1c72d3bb8c189413\",", - " \"value\": \"0x0\"", - " },", - " \"blockHash\": \"0xcaaa13ce099342d5e1342b04d588d7733093591666af8ef756ce20cf13d16475\",", - " \"blockNumber\": 1718497,", - " \"result\": {", - " \"gasUsed\": \"0x15f\",", - " \"output\": \"0x000000000000000000000000d2e16a20dd7b1ae54fb0312209784478d069c7b0\"", - " },", - " \"subtraces\": 0,", - " \"traceAddress\": [", - " 1,", - " 4,", - " 0,", - " 1,", - " 3,", - " 0,", - " 0", - " ],", - " \"transactionHash\": \"0x0ec3f2488a93839524add10ea229e773f6bc891b4eb4794c3337d4495263790b\",", - " \"transactionPosition\": 0,", - " \"type\": \"call\"", - " },", - " {", - " \"action\": {", - " \"callType\": \"call\",", - " \"from\": \"0xc0ee9db1a9e07ca63e4ff0d5fb6f86bf68d47b89\",", - " \"gas\": \"0x13d54f\",", - " \"input\": \"0x82661dc4000000000000000000000000000000000000000000000000000000000000003b000000000000000000000000b656b2a9c3b2416437a811e07466ca712f5a5b5a\",", - " \"to\": \"0xbb9bc244d798123fde783fcc1c72d3bb8c189413\",", - " \"value\": \"0x0\"", - " },", - " \"blockHash\": \"0xcaaa13ce099342d5e1342b04d588d7733093591666af8ef756ce20cf13d16475\",", - " \"blockNumber\": 1718497,", - " \"result\": {", - " \"gasUsed\": \"0xcb8a5\",", - " \"output\": \"0x0000000000000000000000000000000000000000000000000000000000000001\"", - " },", - " \"subtraces\": 4,", - " \"traceAddress\": [", - " 1,", - " 4,", - " 0,", - " 1,", - " 3,", - " 0,", - " 1", - " ],", - " \"transactionHash\": \"0x0ec3f2488a93839524add10ea229e773f6bc891b4eb4794c3337d4495263790b\",", - " \"transactionPosition\": 0,", - " \"type\": \"call\"", - " },", - " {", - " \"action\": {", - " \"callType\": \"call\",", - " \"from\": \"0xbb9bc244d798123fde783fcc1c72d3bb8c189413\",", - " \"gas\": \"0x134cf7\",", - " \"input\": \"0xbaac5300000000000000000000000000c0ee9db1a9e07ca63e4ff0d5fb6f86bf68d47b89\",", - " \"to\": \"0x304a554a310c7e546dfe434669c62820b7d83490\",", - " \"value\": \"0xdfd4116684423b208\"", - " },", - " \"blockHash\": \"0xcaaa13ce099342d5e1342b04d588d7733093591666af8ef756ce20cf13d16475\",", - " \"blockNumber\": 1718497,", - " \"result\": {", - " \"gasUsed\": \"0x5cdf\",", - " \"output\": \"0x0000000000000000000000000000000000000000000000000000000000000001\"", - " },", - " \"subtraces\": 1,", - " \"traceAddress\": [", - " 1,", - " 4,", - " 0,", - " 1,", - " 3,", - " 0,", - " 1,", - " 0", - " ],", - " \"transactionHash\": \"0x0ec3f2488a93839524add10ea229e773f6bc891b4eb4794c3337d4495263790b\",", - " \"transactionPosition\": 0,", - " \"type\": \"call\"", - " },", - " {", - " \"action\": {", - " \"callType\": \"call\",", - " \"from\": \"0x304a554a310c7e546dfe434669c62820b7d83490\",", - " \"gas\": \"0x12c1f9\",", - " \"input\": \"0x\",", - " \"to\": \"0x914d1b8b43e92723e64fd0a06f5bdb8dd9b10c79\",", - " \"value\": \"0x0\"", - " },", - " \"blockHash\": \"0xcaaa13ce099342d5e1342b04d588d7733093591666af8ef756ce20cf13d16475\",", - " \"blockNumber\": 1718497,", - " \"result\": {", - " \"gasUsed\": \"0x13f9\",", - " \"output\": \"0x\"", - " },", - " \"subtraces\": 0,", - " \"traceAddress\": [", - " 1,", - " 4,", - " 0,", - " 1,", - " 3,", - " 0,", - " 1,", - " 0,", - " 0", - " ],", - " \"transactionHash\": \"0x0ec3f2488a93839524add10ea229e773f6bc891b4eb4794c3337d4495263790b\",", - " \"transactionPosition\": 0,", - " \"type\": \"call\"", - " },", - " {", - " \"action\": {", - " \"callType\": \"call\",", - " \"from\": \"0xbb9bc244d798123fde783fcc1c72d3bb8c189413\",", - " \"gas\": \"0x128f33\",", - " \"input\": \"0xd2cc718f\",", - " \"to\": \"0xd2e16a20dd7b1ae54fb0312209784478d069c7b0\",", - " \"value\": \"0x0\"", - " },", - " \"blockHash\": \"0xcaaa13ce099342d5e1342b04d588d7733093591666af8ef756ce20cf13d16475\",", - " \"blockNumber\": 1718497,", - " \"result\": {", - " \"gasUsed\": \"0x113\",", - " \"output\": \"0x00000000000000000000000000000000000000000000000000038d7ea4c68000\"", - " },", - " \"subtraces\": 0,", - " \"traceAddress\": [", - " 1,", - " 4,", - " 0,", - " 1,", - " 3,", - " 0,", - " 1,", - " 1", - " ],", - " \"transactionHash\": \"0x0ec3f2488a93839524add10ea229e773f6bc891b4eb4794c3337d4495263790b\",", - " \"transactionPosition\": 0,", - " \"type\": \"call\"", - " },", - " {", - " \"action\": {", - " \"callType\": \"call\",", - " \"from\": \"0xbb9bc244d798123fde783fcc1c72d3bb8c189413\",", - " \"gas\": \"0x128b5d\",", - " \"input\": \"0xd2cc718f\",", - " \"to\": \"0xd2e16a20dd7b1ae54fb0312209784478d069c7b0\",", - " \"value\": \"0x0\"", - " },", - " \"blockHash\": \"0xcaaa13ce099342d5e1342b04d588d7733093591666af8ef756ce20cf13d16475\",", - " \"blockNumber\": 1718497,", - " \"result\": {", - " \"gasUsed\": \"0x113\",", - " \"output\": \"0x00000000000000000000000000000000000000000000000000038d7ea4c68000\"", - " },", - " \"subtraces\": 0,", - " \"traceAddress\": [", - " 1,", - " 4,", - " 0,", - " 1,", - " 3,", - " 0,", - " 1,", - " 2", - " ],", - " \"transactionHash\": \"0x0ec3f2488a93839524add10ea229e773f6bc891b4eb4794c3337d4495263790b\",", - " \"transactionPosition\": 0,", - " \"type\": \"call\"", - " },", - " {", - " \"action\": {", - " \"callType\": \"call\",", - " \"from\": \"0xbb9bc244d798123fde783fcc1c72d3bb8c189413\",", - " \"gas\": \"0x1287e4\",", - " \"input\": \"0x0221038a000000000000000000000000c0ee9db1a9e07ca63e4ff0d5fb6f86bf68d47b89000000000000000000000000000000000000000000000000000000052aa8b9ab\",", - " \"to\": \"0xd2e16a20dd7b1ae54fb0312209784478d069c7b0\",", - " \"value\": \"0x0\"", - " },", - " \"blockHash\": \"0xcaaa13ce099342d5e1342b04d588d7733093591666af8ef756ce20cf13d16475\",", - " \"blockNumber\": 1718497,", - " \"result\": {", - " \"gasUsed\": \"0xb41a2\",", - " \"output\": \"0x0000000000000000000000000000000000000000000000000000000000000001\"", - " },", - " \"subtraces\": 1,", - " \"traceAddress\": [", - " 1,", - " 4,", - " 0,", - " 1,", - " 3,", - " 0,", - " 1,", - " 3", - " ],", - " \"transactionHash\": \"0x0ec3f2488a93839524add10ea229e773f6bc891b4eb4794c3337d4495263790b\",", - " \"transactionPosition\": 0,", - " \"type\": \"call\"", - " },", - " {", - " \"action\": {", - " \"callType\": \"call\",", - " \"from\": \"0xd2e16a20dd7b1ae54fb0312209784478d069c7b0\",", - " \"gas\": \"0x120a0e\",", - " \"input\": \"0x\",", - " \"to\": \"0xc0ee9db1a9e07ca63e4ff0d5fb6f86bf68d47b89\",", - " \"value\": \"0x52aa8b9ab\"", - " },", - " \"blockHash\": \"0xcaaa13ce099342d5e1342b04d588d7733093591666af8ef756ce20cf13d16475\",", - " \"blockNumber\": 1718497,", - " \"result\": {", - " \"gasUsed\": \"0xb1f5b\",", - " \"output\": \"0x0000000000000000000000000000000000000000000000000000000000000001\"", - " },", - " \"subtraces\": 2,", - " \"traceAddress\": [", - " 1,", - " 4,", - " 0,", - " 1,", - " 3,", - " 0,", - " 1,", - " 3,", - " 0", - " ],", - " \"transactionHash\": \"0x0ec3f2488a93839524add10ea229e773f6bc891b4eb4794c3337d4495263790b\",", - " \"transactionPosition\": 0,", - " \"type\": \"call\"", - " },", - " {", - " \"action\": {", - " \"callType\": \"call\",", - " \"from\": \"0xc0ee9db1a9e07ca63e4ff0d5fb6f86bf68d47b89\",", - " \"gas\": \"0x11a776\",", - " \"input\": \"0x0e708203\",", - " \"to\": \"0xbb9bc244d798123fde783fcc1c72d3bb8c189413\",", - " \"value\": \"0x0\"", - " },", - " \"blockHash\": \"0xcaaa13ce099342d5e1342b04d588d7733093591666af8ef756ce20cf13d16475\",", - " \"blockNumber\": 1718497,", - " \"result\": {", - " \"gasUsed\": \"0x15f\",", - " \"output\": \"0x000000000000000000000000d2e16a20dd7b1ae54fb0312209784478d069c7b0\"", - " },", - " \"subtraces\": 0,", - " \"traceAddress\": [", - " 1,", - " 4,", - " 0,", - " 1,", - " 3,", - " 0,", - " 1,", - " 3,", - " 0,", - " 0", - " ],", - " \"transactionHash\": \"0x0ec3f2488a93839524add10ea229e773f6bc891b4eb4794c3337d4495263790b\",", - " \"transactionPosition\": 0,", - " \"type\": \"call\"", - " },", - " {", - " \"action\": {", - " \"callType\": \"call\",", - " \"from\": \"0xc0ee9db1a9e07ca63e4ff0d5fb6f86bf68d47b89\",", - " \"gas\": \"0x118fb6\",", - " \"input\": \"0x82661dc4000000000000000000000000000000000000000000000000000000000000003b000000000000000000000000b656b2a9c3b2416437a811e07466ca712f5a5b5a\",", - " \"to\": \"0xbb9bc244d798123fde783fcc1c72d3bb8c189413\",", - " \"value\": \"0x0\"", - " },", - " \"blockHash\": \"0xcaaa13ce099342d5e1342b04d588d7733093591666af8ef756ce20cf13d16475\",", - " \"blockNumber\": 1718497,", - " \"result\": {", - " \"gasUsed\": \"0xb064f\",", - " \"output\": \"0x0000000000000000000000000000000000000000000000000000000000000001\"", - " },", - " \"subtraces\": 4,", - " \"traceAddress\": [", - " 1,", - " 4,", - " 0,", - " 1,", - " 3,", - " 0,", - " 1,", - " 3,", - " 0,", - " 1", - " ],", - " \"transactionHash\": \"0x0ec3f2488a93839524add10ea229e773f6bc891b4eb4794c3337d4495263790b\",", - " \"transactionPosition\": 0,", - " \"type\": \"call\"", - " },", - " {", - " \"action\": {", - " \"callType\": \"call\",", - " \"from\": \"0xbb9bc244d798123fde783fcc1c72d3bb8c189413\",", - " \"gas\": \"0x11075e\",", - " \"input\": \"0xbaac5300000000000000000000000000c0ee9db1a9e07ca63e4ff0d5fb6f86bf68d47b89\",", - " \"to\": \"0x304a554a310c7e546dfe434669c62820b7d83490\",", - " \"value\": \"0xdfd4116684423b208\"", - " },", - " \"blockHash\": \"0xcaaa13ce099342d5e1342b04d588d7733093591666af8ef756ce20cf13d16475\",", - " \"blockNumber\": 1718497,", - " \"result\": {", - " \"gasUsed\": \"0x5cdf\",", - " \"output\": \"0x0000000000000000000000000000000000000000000000000000000000000001\"", - " },", - " \"subtraces\": 1,", - " \"traceAddress\": [", - " 1,", - " 4,", - " 0,", - " 1,", - " 3,", - " 0,", - " 1,", - " 3,", - " 0,", - " 1,", - " 0", - " ],", - " \"transactionHash\": \"0x0ec3f2488a93839524add10ea229e773f6bc891b4eb4794c3337d4495263790b\",", - " \"transactionPosition\": 0,", - " \"type\": \"call\"", - " },", - " {", - " \"action\": {", - " \"callType\": \"call\",", - " \"from\": \"0x304a554a310c7e546dfe434669c62820b7d83490\",", - " \"gas\": \"0x107c60\",", - " \"input\": \"0x\",", - " \"to\": \"0x914d1b8b43e92723e64fd0a06f5bdb8dd9b10c79\",", - " \"value\": \"0x0\"", - " },", - " \"blockHash\": \"0xcaaa13ce099342d5e1342b04d588d7733093591666af8ef756ce20cf13d16475\",", - " \"blockNumber\": 1718497,", - " \"result\": {", - " \"gasUsed\": \"0x13f9\",", - " \"output\": \"0x\"", - " },", - " \"subtraces\": 0,", - " \"traceAddress\": [", - " 1,", - " 4,", - " 0,", - " 1,", - " 3,", - " 0,", - " 1,", - " 3,", - " 0,", - " 1,", - " 0,", - " 0", - " ],", - " \"transactionHash\": \"0x0ec3f2488a93839524add10ea229e773f6bc891b4eb4794c3337d4495263790b\",", - " \"transactionPosition\": 0,", - " \"type\": \"call\"", - " },", - " {", - " \"action\": {", - " \"callType\": \"call\",", - " \"from\": \"0xbb9bc244d798123fde783fcc1c72d3bb8c189413\",", - " \"gas\": \"0x10499a\",", - " \"input\": \"0xd2cc718f\",", - " \"to\": \"0xd2e16a20dd7b1ae54fb0312209784478d069c7b0\",", - " \"value\": \"0x0\"", - " },", - " \"blockHash\": \"0xcaaa13ce099342d5e1342b04d588d7733093591666af8ef756ce20cf13d16475\",", - " \"blockNumber\": 1718497,", - " \"result\": {", - " \"gasUsed\": \"0x113\",", - " \"output\": \"0x00000000000000000000000000000000000000000000000000038d7ea4c68000\"", - " },", - " \"subtraces\": 0,", - " \"traceAddress\": [", - " 1,", - " 4,", - " 0,", - " 1,", - " 3,", - " 0,", - " 1,", - " 3,", - " 0,", - " 1,", - " 1", - " ],", - " \"transactionHash\": \"0x0ec3f2488a93839524add10ea229e773f6bc891b4eb4794c3337d4495263790b\",", - " \"transactionPosition\": 0,", - " \"type\": \"call\"", - " },", - " {", - " \"action\": {", - " \"callType\": \"call\",", - " \"from\": \"0xbb9bc244d798123fde783fcc1c72d3bb8c189413\",", - " \"gas\": \"0x1045c4\",", - " \"input\": \"0xd2cc718f\",", - " \"to\": \"0xd2e16a20dd7b1ae54fb0312209784478d069c7b0\",", - " \"value\": \"0x0\"", - " },", - " \"blockHash\": \"0xcaaa13ce099342d5e1342b04d588d7733093591666af8ef756ce20cf13d16475\",", - " \"blockNumber\": 1718497,", - " \"result\": {", - " \"gasUsed\": \"0x113\",", - " \"output\": \"0x00000000000000000000000000000000000000000000000000038d7ea4c68000\"", - " },", - " \"subtraces\": 0,", - " \"traceAddress\": [", - " 1,", - " 4,", - " 0,", - " 1,", - " 3,", - " 0,", - " 1,", - " 3,", - " 0,", - " 1,", - " 2", - " ],", - " \"transactionHash\": \"0x0ec3f2488a93839524add10ea229e773f6bc891b4eb4794c3337d4495263790b\",", - " \"transactionPosition\": 0,", - " \"type\": \"call\"", - " },", - " {", - " \"action\": {", - " \"callType\": \"call\",", - " \"from\": \"0xbb9bc244d798123fde783fcc1c72d3bb8c189413\",", - " \"gas\": \"0x10424b\",", - " \"input\": \"0x0221038a000000000000000000000000c0ee9db1a9e07ca63e4ff0d5fb6f86bf68d47b89000000000000000000000000000000000000000000000000000000052aa8b9ab\",", - " \"to\": \"0xd2e16a20dd7b1ae54fb0312209784478d069c7b0\",", - " \"value\": \"0x0\"", - " },", - " \"blockHash\": \"0xcaaa13ce099342d5e1342b04d588d7733093591666af8ef756ce20cf13d16475\",", - " \"blockNumber\": 1718497,", - " \"result\": {", - " \"gasUsed\": \"0x98f4c\",", - " \"output\": \"0x0000000000000000000000000000000000000000000000000000000000000001\"", - " },", - " \"subtraces\": 1,", - " \"traceAddress\": [", - " 1,", - " 4,", - " 0,", - " 1,", - " 3,", - " 0,", - " 1,", - " 3,", - " 0,", - " 1,", - " 3", - " ],", - " \"transactionHash\": \"0x0ec3f2488a93839524add10ea229e773f6bc891b4eb4794c3337d4495263790b\",", - " \"transactionPosition\": 0,", - " \"type\": \"call\"", - " },", - " {", - " \"action\": {", - " \"callType\": \"call\",", - " \"from\": \"0xd2e16a20dd7b1ae54fb0312209784478d069c7b0\",", - " \"gas\": \"0xfc475\",", - " \"input\": \"0x\",", - " \"to\": \"0xc0ee9db1a9e07ca63e4ff0d5fb6f86bf68d47b89\",", - " \"value\": \"0x52aa8b9ab\"", - " },", - " \"blockHash\": \"0xcaaa13ce099342d5e1342b04d588d7733093591666af8ef756ce20cf13d16475\",", - " \"blockNumber\": 1718497,", - " \"result\": {", - " \"gasUsed\": \"0x96d05\",", - " \"output\": \"0x0000000000000000000000000000000000000000000000000000000000000001\"", - " },", - " \"subtraces\": 2,", - " \"traceAddress\": [", - " 1,", - " 4,", - " 0,", - " 1,", - " 3,", - " 0,", - " 1,", - " 3,", - " 0,", - " 1,", - " 3,", - " 0", - " ],", - " \"transactionHash\": \"0x0ec3f2488a93839524add10ea229e773f6bc891b4eb4794c3337d4495263790b\",", - " \"transactionPosition\": 0,", - " \"type\": \"call\"", - " },", - " {", - " \"action\": {", - " \"callType\": \"call\",", - " \"from\": \"0xc0ee9db1a9e07ca63e4ff0d5fb6f86bf68d47b89\",", - " \"gas\": \"0xf61dd\",", - " \"input\": \"0x0e708203\",", - " \"to\": \"0xbb9bc244d798123fde783fcc1c72d3bb8c189413\",", - " \"value\": \"0x0\"", - " },", - " \"blockHash\": \"0xcaaa13ce099342d5e1342b04d588d7733093591666af8ef756ce20cf13d16475\",", - " \"blockNumber\": 1718497,", - " \"result\": {", - " \"gasUsed\": \"0x15f\",", - " \"output\": \"0x000000000000000000000000d2e16a20dd7b1ae54fb0312209784478d069c7b0\"", - " },", - " \"subtraces\": 0,", - " \"traceAddress\": [", - " 1,", - " 4,", - " 0,", - " 1,", - " 3,", - " 0,", - " 1,", - " 3,", - " 0,", - " 1,", - " 3,", - " 0,", - " 0", - " ],", - " \"transactionHash\": \"0x0ec3f2488a93839524add10ea229e773f6bc891b4eb4794c3337d4495263790b\",", - " \"transactionPosition\": 0,", - " \"type\": \"call\"", - " },", - " {", - " \"action\": {", - " \"callType\": \"call\",", - " \"from\": \"0xc0ee9db1a9e07ca63e4ff0d5fb6f86bf68d47b89\",", - " \"gas\": \"0xf4a1d\",", - " \"input\": \"0x82661dc4000000000000000000000000000000000000000000000000000000000000003b000000000000000000000000b656b2a9c3b2416437a811e07466ca712f5a5b5a\",", - " \"to\": \"0xbb9bc244d798123fde783fcc1c72d3bb8c189413\",", - " \"value\": \"0x0\"", - " },", - " \"blockHash\": \"0xcaaa13ce099342d5e1342b04d588d7733093591666af8ef756ce20cf13d16475\",", - " \"blockNumber\": 1718497,", - " \"result\": {", - " \"gasUsed\": \"0x953f9\",", - " \"output\": \"0x0000000000000000000000000000000000000000000000000000000000000001\"", - " },", - " \"subtraces\": 4,", - " \"traceAddress\": [", - " 1,", - " 4,", - " 0,", - " 1,", - " 3,", - " 0,", - " 1,", - " 3,", - " 0,", - " 1,", - " 3,", - " 0,", - " 1", - " ],", - " \"transactionHash\": \"0x0ec3f2488a93839524add10ea229e773f6bc891b4eb4794c3337d4495263790b\",", - " \"transactionPosition\": 0,", - " \"type\": \"call\"", - " },", - " {", - " \"action\": {", - " \"callType\": \"call\",", - " \"from\": \"0xbb9bc244d798123fde783fcc1c72d3bb8c189413\",", - " \"gas\": \"0xec1c5\",", - " \"input\": \"0xbaac5300000000000000000000000000c0ee9db1a9e07ca63e4ff0d5fb6f86bf68d47b89\",", - " \"to\": \"0x304a554a310c7e546dfe434669c62820b7d83490\",", - " \"value\": \"0xdfd4116684423b208\"", - " },", - " \"blockHash\": \"0xcaaa13ce099342d5e1342b04d588d7733093591666af8ef756ce20cf13d16475\",", - " \"blockNumber\": 1718497,", - " \"result\": {", - " \"gasUsed\": \"0x5cdf\",", - " \"output\": \"0x0000000000000000000000000000000000000000000000000000000000000001\"", - " },", - " \"subtraces\": 1,", - " \"traceAddress\": [", - " 1,", - " 4,", - " 0,", - " 1,", - " 3,", - " 0,", - " 1,", - " 3,", - " 0,", - " 1,", - " 3,", - " 0,", - " 1,", - " 0", - " ],", - " \"transactionHash\": \"0x0ec3f2488a93839524add10ea229e773f6bc891b4eb4794c3337d4495263790b\",", - " \"transactionPosition\": 0,", - " \"type\": \"call\"", - " },", - " {", - " \"action\": {", - " \"callType\": \"call\",", - " \"from\": \"0x304a554a310c7e546dfe434669c62820b7d83490\",", - " \"gas\": \"0xe36c7\",", - " \"input\": \"0x\",", - " \"to\": \"0x914d1b8b43e92723e64fd0a06f5bdb8dd9b10c79\",", - " \"value\": \"0x0\"", - " },", - " \"blockHash\": \"0xcaaa13ce099342d5e1342b04d588d7733093591666af8ef756ce20cf13d16475\",", - " \"blockNumber\": 1718497,", - " \"result\": {", - " \"gasUsed\": \"0x13f9\",", - " \"output\": \"0x\"", - " },", - " \"subtraces\": 0,", - " \"traceAddress\": [", - " 1,", - " 4,", - " 0,", - " 1,", - " 3,", - " 0,", - " 1,", - " 3,", - " 0,", - " 1,", - " 3,", - " 0,", - " 1,", - " 0,", - " 0", - " ],", - " \"transactionHash\": \"0x0ec3f2488a93839524add10ea229e773f6bc891b4eb4794c3337d4495263790b\",", - " \"transactionPosition\": 0,", - " \"type\": \"call\"", - " },", - " {", - " \"action\": {", - " \"callType\": \"call\",", - " \"from\": \"0xbb9bc244d798123fde783fcc1c72d3bb8c189413\",", - " \"gas\": \"0xe0401\",", - " \"input\": \"0xd2cc718f\",", - " \"to\": \"0xd2e16a20dd7b1ae54fb0312209784478d069c7b0\",", - " \"value\": \"0x0\"", - " },", - " \"blockHash\": \"0xcaaa13ce099342d5e1342b04d588d7733093591666af8ef756ce20cf13d16475\",", - " \"blockNumber\": 1718497,", - " \"result\": {", - " \"gasUsed\": \"0x113\",", - " \"output\": \"0x00000000000000000000000000000000000000000000000000038d7ea4c68000\"", - " },", - " \"subtraces\": 0,", - " \"traceAddress\": [", - " 1,", - " 4,", - " 0,", - " 1,", - " 3,", - " 0,", - " 1,", - " 3,", - " 0,", - " 1,", - " 3,", - " 0,", - " 1,", - " 1", - " ],", - " \"transactionHash\": \"0x0ec3f2488a93839524add10ea229e773f6bc891b4eb4794c3337d4495263790b\",", - " \"transactionPosition\": 0,", - " \"type\": \"call\"", - " },", - " {", - " \"action\": {", - " \"callType\": \"call\",", - " \"from\": \"0xbb9bc244d798123fde783fcc1c72d3bb8c189413\",", - " \"gas\": \"0xe002b\",", - " \"input\": \"0xd2cc718f\",", - " \"to\": \"0xd2e16a20dd7b1ae54fb0312209784478d069c7b0\",", - " \"value\": \"0x0\"", - " },", - " \"blockHash\": \"0xcaaa13ce099342d5e1342b04d588d7733093591666af8ef756ce20cf13d16475\",", - " \"blockNumber\": 1718497,", - " \"result\": {", - " \"gasUsed\": \"0x113\",", - " \"output\": \"0x00000000000000000000000000000000000000000000000000038d7ea4c68000\"", - " },", - " \"subtraces\": 0,", - " \"traceAddress\": [", - " 1,", - " 4,", - " 0,", - " 1,", - " 3,", - " 0,", - " 1,", - " 3,", - " 0,", - " 1,", - " 3,", - " 0,", - " 1,", - " 2", - " ],", - " \"transactionHash\": \"0x0ec3f2488a93839524add10ea229e773f6bc891b4eb4794c3337d4495263790b\",", - " \"transactionPosition\": 0,", - " \"type\": \"call\"", - " },", - " {", - " \"action\": {", - " \"callType\": \"call\",", - " \"from\": \"0xbb9bc244d798123fde783fcc1c72d3bb8c189413\",", - " \"gas\": \"0xdfcb2\",", - " \"input\": \"0x0221038a000000000000000000000000c0ee9db1a9e07ca63e4ff0d5fb6f86bf68d47b89000000000000000000000000000000000000000000000000000000052aa8b9ab\",", - " \"to\": \"0xd2e16a20dd7b1ae54fb0312209784478d069c7b0\",", - " \"value\": \"0x0\"", - " },", - " \"blockHash\": \"0xcaaa13ce099342d5e1342b04d588d7733093591666af8ef756ce20cf13d16475\",", - " \"blockNumber\": 1718497,", - " \"result\": {", - " \"gasUsed\": \"0x7dcf6\",", - " \"output\": \"0x0000000000000000000000000000000000000000000000000000000000000001\"", - " },", - " \"subtraces\": 1,", - " \"traceAddress\": [", - " 1,", - " 4,", - " 0,", - " 1,", - " 3,", - " 0,", - " 1,", - " 3,", - " 0,", - " 1,", - " 3,", - " 0,", - " 1,", - " 3", - " ],", - " \"transactionHash\": \"0x0ec3f2488a93839524add10ea229e773f6bc891b4eb4794c3337d4495263790b\",", - " \"transactionPosition\": 0,", - " \"type\": \"call\"", - " },", - " {", - " \"action\": {", - " \"callType\": \"call\",", - " \"from\": \"0xd2e16a20dd7b1ae54fb0312209784478d069c7b0\",", - " \"gas\": \"0xd7edc\",", - " \"input\": \"0x\",", - " \"to\": \"0xc0ee9db1a9e07ca63e4ff0d5fb6f86bf68d47b89\",", - " \"value\": \"0x52aa8b9ab\"", - " },", - " \"blockHash\": \"0xcaaa13ce099342d5e1342b04d588d7733093591666af8ef756ce20cf13d16475\",", - " \"blockNumber\": 1718497,", - " \"result\": {", - " \"gasUsed\": \"0x7baaf\",", - " \"output\": \"0x0000000000000000000000000000000000000000000000000000000000000001\"", - " },", - " \"subtraces\": 2,", - " \"traceAddress\": [", - " 1,", - " 4,", - " 0,", - " 1,", - " 3,", - " 0,", - " 1,", - " 3,", - " 0,", - " 1,", - " 3,", - " 0,", - " 1,", - " 3,", - " 0", - " ],", - " \"transactionHash\": \"0x0ec3f2488a93839524add10ea229e773f6bc891b4eb4794c3337d4495263790b\",", - " \"transactionPosition\": 0,", - " \"type\": \"call\"", - " },", - " {", - " \"action\": {", - " \"callType\": \"call\",", - " \"from\": \"0xc0ee9db1a9e07ca63e4ff0d5fb6f86bf68d47b89\",", - " \"gas\": \"0xd1c44\",", - " \"input\": \"0x0e708203\",", - " \"to\": \"0xbb9bc244d798123fde783fcc1c72d3bb8c189413\",", - " \"value\": \"0x0\"", - " },", - " \"blockHash\": \"0xcaaa13ce099342d5e1342b04d588d7733093591666af8ef756ce20cf13d16475\",", - " \"blockNumber\": 1718497,", - " \"result\": {", - " \"gasUsed\": \"0x15f\",", - " \"output\": \"0x000000000000000000000000d2e16a20dd7b1ae54fb0312209784478d069c7b0\"", - " },", - " \"subtraces\": 0,", - " \"traceAddress\": [", - " 1,", - " 4,", - " 0,", - " 1,", - " 3,", - " 0,", - " 1,", - " 3,", - " 0,", - " 1,", - " 3,", - " 0,", - " 1,", - " 3,", - " 0,", - " 0", - " ],", - " \"transactionHash\": \"0x0ec3f2488a93839524add10ea229e773f6bc891b4eb4794c3337d4495263790b\",", - " \"transactionPosition\": 0,", - " \"type\": \"call\"", - " },", - " {", - " \"action\": {", - " \"callType\": \"call\",", - " \"from\": \"0xc0ee9db1a9e07ca63e4ff0d5fb6f86bf68d47b89\",", - " \"gas\": \"0xd0484\",", - " \"input\": \"0x82661dc4000000000000000000000000000000000000000000000000000000000000003b000000000000000000000000b656b2a9c3b2416437a811e07466ca712f5a5b5a\",", - " \"to\": \"0xbb9bc244d798123fde783fcc1c72d3bb8c189413\",", - " \"value\": \"0x0\"", - " },", - " \"blockHash\": \"0xcaaa13ce099342d5e1342b04d588d7733093591666af8ef756ce20cf13d16475\",", - " \"blockNumber\": 1718497,", - " \"result\": {", - " \"gasUsed\": \"0x7a1a3\",", - " \"output\": \"0x0000000000000000000000000000000000000000000000000000000000000001\"", - " },", - " \"subtraces\": 4,", - " \"traceAddress\": [", - " 1,", - " 4,", - " 0,", - " 1,", - " 3,", - " 0,", - " 1,", - " 3,", - " 0,", - " 1,", - " 3,", - " 0,", - " 1,", - " 3,", - " 0,", - " 1", - " ],", - " \"transactionHash\": \"0x0ec3f2488a93839524add10ea229e773f6bc891b4eb4794c3337d4495263790b\",", - " \"transactionPosition\": 0,", - " \"type\": \"call\"", - " },", - " {", - " \"action\": {", - " \"callType\": \"call\",", - " \"from\": \"0xbb9bc244d798123fde783fcc1c72d3bb8c189413\",", - " \"gas\": \"0xc7c2c\",", - " \"input\": \"0xbaac5300000000000000000000000000c0ee9db1a9e07ca63e4ff0d5fb6f86bf68d47b89\",", - " \"to\": \"0x304a554a310c7e546dfe434669c62820b7d83490\",", - " \"value\": \"0xdfd4116684423b208\"", - " },", - " \"blockHash\": \"0xcaaa13ce099342d5e1342b04d588d7733093591666af8ef756ce20cf13d16475\",", - " \"blockNumber\": 1718497,", - " \"result\": {", - " \"gasUsed\": \"0x5cdf\",", - " \"output\": \"0x0000000000000000000000000000000000000000000000000000000000000001\"", - " },", - " \"subtraces\": 1,", - " \"traceAddress\": [", - " 1,", - " 4,", - " 0,", - " 1,", - " 3,", - " 0,", - " 1,", - " 3,", - " 0,", - " 1,", - " 3,", - " 0,", - " 1,", - " 3,", - " 0,", - " 1,", - " 0", - " ],", - " \"transactionHash\": \"0x0ec3f2488a93839524add10ea229e773f6bc891b4eb4794c3337d4495263790b\",", - " \"transactionPosition\": 0,", - " \"type\": \"call\"", - " },", - " {", - " \"action\": {", - " \"callType\": \"call\",", - " \"from\": \"0x304a554a310c7e546dfe434669c62820b7d83490\",", - " \"gas\": \"0xbf12e\",", - " \"input\": \"0x\",", - " \"to\": \"0x914d1b8b43e92723e64fd0a06f5bdb8dd9b10c79\",", - " \"value\": \"0x0\"", - " },", - " \"blockHash\": \"0xcaaa13ce099342d5e1342b04d588d7733093591666af8ef756ce20cf13d16475\",", - " \"blockNumber\": 1718497,", - " \"result\": {", - " \"gasUsed\": \"0x13f9\",", - " \"output\": \"0x\"", - " },", - " \"subtraces\": 0,", - " \"traceAddress\": [", - " 1,", - " 4,", - " 0,", - " 1,", - " 3,", - " 0,", - " 1,", - " 3,", - " 0,", - " 1,", - " 3,", - " 0,", - " 1,", - " 3,", - " 0,", - " 1,", - " 0,", - " 0", - " ],", - " \"transactionHash\": \"0x0ec3f2488a93839524add10ea229e773f6bc891b4eb4794c3337d4495263790b\",", - " \"transactionPosition\": 0,", - " \"type\": \"call\"", - " },", - " {", - " \"action\": {", - " \"callType\": \"call\",", - " \"from\": \"0xbb9bc244d798123fde783fcc1c72d3bb8c189413\",", - " \"gas\": \"0xbbe68\",", - " \"input\": \"0xd2cc718f\",", - " \"to\": \"0xd2e16a20dd7b1ae54fb0312209784478d069c7b0\",", - " \"value\": \"0x0\"", - " },", - " \"blockHash\": \"0xcaaa13ce099342d5e1342b04d588d7733093591666af8ef756ce20cf13d16475\",", - " \"blockNumber\": 1718497,", - " \"result\": {", - " \"gasUsed\": \"0x113\",", - " \"output\": \"0x00000000000000000000000000000000000000000000000000038d7ea4c68000\"", - " },", - " \"subtraces\": 0,", - " \"traceAddress\": [", - " 1,", - " 4,", - " 0,", - " 1,", - " 3,", - " 0,", - " 1,", - " 3,", - " 0,", - " 1,", - " 3,", - " 0,", - " 1,", - " 3,", - " 0,", - " 1,", - " 1", - " ],", - " \"transactionHash\": \"0x0ec3f2488a93839524add10ea229e773f6bc891b4eb4794c3337d4495263790b\",", - " \"transactionPosition\": 0,", - " \"type\": \"call\"", - " },", - " {", - " \"action\": {", - " \"callType\": \"call\",", - " \"from\": \"0xbb9bc244d798123fde783fcc1c72d3bb8c189413\",", - " \"gas\": \"0xbba92\",", - " \"input\": \"0xd2cc718f\",", - " \"to\": \"0xd2e16a20dd7b1ae54fb0312209784478d069c7b0\",", - " \"value\": \"0x0\"", - " },", - " \"blockHash\": \"0xcaaa13ce099342d5e1342b04d588d7733093591666af8ef756ce20cf13d16475\",", - " \"blockNumber\": 1718497,", - " \"result\": {", - " \"gasUsed\": \"0x113\",", - " \"output\": \"0x00000000000000000000000000000000000000000000000000038d7ea4c68000\"", - " },", - " \"subtraces\": 0,", - " \"traceAddress\": [", - " 1,", - " 4,", - " 0,", - " 1,", - " 3,", - " 0,", - " 1,", - " 3,", - " 0,", - " 1,", - " 3,", - " 0,", - " 1,", - " 3,", - " 0,", - " 1,", - " 2", - " ],", - " \"transactionHash\": \"0x0ec3f2488a93839524add10ea229e773f6bc891b4eb4794c3337d4495263790b\",", - " \"transactionPosition\": 0,", - " \"type\": \"call\"", - " },", - " {", - " \"action\": {", - " \"callType\": \"call\",", - " \"from\": \"0xbb9bc244d798123fde783fcc1c72d3bb8c189413\",", - " \"gas\": \"0xbb719\",", - " \"input\": \"0x0221038a000000000000000000000000c0ee9db1a9e07ca63e4ff0d5fb6f86bf68d47b89000000000000000000000000000000000000000000000000000000052aa8b9ab\",", - " \"to\": \"0xd2e16a20dd7b1ae54fb0312209784478d069c7b0\",", - " \"value\": \"0x0\"", - " },", - " \"blockHash\": \"0xcaaa13ce099342d5e1342b04d588d7733093591666af8ef756ce20cf13d16475\",", - " \"blockNumber\": 1718497,", - " \"result\": {", - " \"gasUsed\": \"0x62aa0\",", - " \"output\": \"0x0000000000000000000000000000000000000000000000000000000000000001\"", - " },", - " \"subtraces\": 1,", - " \"traceAddress\": [", - " 1,", - " 4,", - " 0,", - " 1,", - " 3,", - " 0,", - " 1,", - " 3,", - " 0,", - " 1,", - " 3,", - " 0,", - " 1,", - " 3,", - " 0,", - " 1,", - " 3", - " ],", - " \"transactionHash\": \"0x0ec3f2488a93839524add10ea229e773f6bc891b4eb4794c3337d4495263790b\",", - " \"transactionPosition\": 0,", - " \"type\": \"call\"", - " },", - " {", - " \"action\": {", - " \"callType\": \"call\",", - " \"from\": \"0xd2e16a20dd7b1ae54fb0312209784478d069c7b0\",", - " \"gas\": \"0xb3943\",", - " \"input\": \"0x\",", - " \"to\": \"0xc0ee9db1a9e07ca63e4ff0d5fb6f86bf68d47b89\",", - " \"value\": \"0x52aa8b9ab\"", - " },", - " \"blockHash\": \"0xcaaa13ce099342d5e1342b04d588d7733093591666af8ef756ce20cf13d16475\",", - " \"blockNumber\": 1718497,", - " \"result\": {", - " \"gasUsed\": \"0x60859\",", - " \"output\": \"0x0000000000000000000000000000000000000000000000000000000000000001\"", - " },", - " \"subtraces\": 2,", - " \"traceAddress\": [", - " 1,", - " 4,", - " 0,", - " 1,", - " 3,", - " 0,", - " 1,", - " 3,", - " 0,", - " 1,", - " 3,", - " 0,", - " 1,", - " 3,", - " 0,", - " 1,", - " 3,", - " 0", - " ],", - " \"transactionHash\": \"0x0ec3f2488a93839524add10ea229e773f6bc891b4eb4794c3337d4495263790b\",", - " \"transactionPosition\": 0,", - " \"type\": \"call\"", - " },", - " {", - " \"action\": {", - " \"callType\": \"call\",", - " \"from\": \"0xc0ee9db1a9e07ca63e4ff0d5fb6f86bf68d47b89\",", - " \"gas\": \"0xad6ab\",", - " \"input\": \"0x0e708203\",", - " \"to\": \"0xbb9bc244d798123fde783fcc1c72d3bb8c189413\",", - " \"value\": \"0x0\"", - " },", - " \"blockHash\": \"0xcaaa13ce099342d5e1342b04d588d7733093591666af8ef756ce20cf13d16475\",", - " \"blockNumber\": 1718497,", - " \"result\": {", - " \"gasUsed\": \"0x15f\",", - " \"output\": \"0x000000000000000000000000d2e16a20dd7b1ae54fb0312209784478d069c7b0\"", - " },", - " \"subtraces\": 0,", - " \"traceAddress\": [", - " 1,", - " 4,", - " 0,", - " 1,", - " 3,", - " 0,", - " 1,", - " 3,", - " 0,", - " 1,", - " 3,", - " 0,", - " 1,", - " 3,", - " 0,", - " 1,", - " 3,", - " 0,", - " 0", - " ],", - " \"transactionHash\": \"0x0ec3f2488a93839524add10ea229e773f6bc891b4eb4794c3337d4495263790b\",", - " \"transactionPosition\": 0,", - " \"type\": \"call\"", - " },", - " {", - " \"action\": {", - " \"callType\": \"call\",", - " \"from\": \"0xc0ee9db1a9e07ca63e4ff0d5fb6f86bf68d47b89\",", - " \"gas\": \"0xabeeb\",", - " \"input\": \"0x82661dc4000000000000000000000000000000000000000000000000000000000000003b000000000000000000000000b656b2a9c3b2416437a811e07466ca712f5a5b5a\",", - " \"to\": \"0xbb9bc244d798123fde783fcc1c72d3bb8c189413\",", - " \"value\": \"0x0\"", - " },", - " \"blockHash\": \"0xcaaa13ce099342d5e1342b04d588d7733093591666af8ef756ce20cf13d16475\",", - " \"blockNumber\": 1718497,", - " \"result\": {", - " \"gasUsed\": \"0x5ef4d\",", - " \"output\": \"0x0000000000000000000000000000000000000000000000000000000000000001\"", - " },", - " \"subtraces\": 4,", - " \"traceAddress\": [", - " 1,", - " 4,", - " 0,", - " 1,", - " 3,", - " 0,", - " 1,", - " 3,", - " 0,", - " 1,", - " 3,", - " 0,", - " 1,", - " 3,", - " 0,", - " 1,", - " 3,", - " 0,", - " 1", - " ],", - " \"transactionHash\": \"0x0ec3f2488a93839524add10ea229e773f6bc891b4eb4794c3337d4495263790b\",", - " \"transactionPosition\": 0,", - " \"type\": \"call\"", - " },", - " {", - " \"action\": {", - " \"callType\": \"call\",", - " \"from\": \"0xbb9bc244d798123fde783fcc1c72d3bb8c189413\",", - " \"gas\": \"0xa3693\",", - " \"input\": \"0xbaac5300000000000000000000000000c0ee9db1a9e07ca63e4ff0d5fb6f86bf68d47b89\",", - " \"to\": \"0x304a554a310c7e546dfe434669c62820b7d83490\",", - " \"value\": \"0xdfd4116684423b208\"", - " },", - " \"blockHash\": \"0xcaaa13ce099342d5e1342b04d588d7733093591666af8ef756ce20cf13d16475\",", - " \"blockNumber\": 1718497,", - " \"result\": {", - " \"gasUsed\": \"0x5cdf\",", - " \"output\": \"0x0000000000000000000000000000000000000000000000000000000000000001\"", - " },", - " \"subtraces\": 1,", - " \"traceAddress\": [", - " 1,", - " 4,", - " 0,", - " 1,", - " 3,", - " 0,", - " 1,", - " 3,", - " 0,", - " 1,", - " 3,", - " 0,", - " 1,", - " 3,", - " 0,", - " 1,", - " 3,", - " 0,", - " 1,", - " 0", - " ],", - " \"transactionHash\": \"0x0ec3f2488a93839524add10ea229e773f6bc891b4eb4794c3337d4495263790b\",", - " \"transactionPosition\": 0,", - " \"type\": \"call\"", - " },", - " {", - " \"action\": {", - " \"callType\": \"call\",", - " \"from\": \"0x304a554a310c7e546dfe434669c62820b7d83490\",", - " \"gas\": \"0x9ab95\",", - " \"input\": \"0x\",", - " \"to\": \"0x914d1b8b43e92723e64fd0a06f5bdb8dd9b10c79\",", - " \"value\": \"0x0\"", - " },", - " \"blockHash\": \"0xcaaa13ce099342d5e1342b04d588d7733093591666af8ef756ce20cf13d16475\",", - " \"blockNumber\": 1718497,", - " \"result\": {", - " \"gasUsed\": \"0x13f9\",", - " \"output\": \"0x\"", - " },", - " \"subtraces\": 0,", - " \"traceAddress\": [", - " 1,", - " 4,", - " 0,", - " 1,", - " 3,", - " 0,", - " 1,", - " 3,", - " 0,", - " 1,", - " 3,", - " 0,", - " 1,", - " 3,", - " 0,", - " 1,", - " 3,", - " 0,", - " 1,", - " 0,", - " 0", - " ],", - " \"transactionHash\": \"0x0ec3f2488a93839524add10ea229e773f6bc891b4eb4794c3337d4495263790b\",", - " \"transactionPosition\": 0,", - " \"type\": \"call\"", - " },", - " {", - " \"action\": {", - " \"callType\": \"call\",", - " \"from\": \"0xbb9bc244d798123fde783fcc1c72d3bb8c189413\",", - " \"gas\": \"0x978cf\",", - " \"input\": \"0xd2cc718f\",", - " \"to\": \"0xd2e16a20dd7b1ae54fb0312209784478d069c7b0\",", - " \"value\": \"0x0\"", - " },", - " \"blockHash\": \"0xcaaa13ce099342d5e1342b04d588d7733093591666af8ef756ce20cf13d16475\",", - " \"blockNumber\": 1718497,", - " \"result\": {", - " \"gasUsed\": \"0x113\",", - " \"output\": \"0x00000000000000000000000000000000000000000000000000038d7ea4c68000\"", - " },", - " \"subtraces\": 0,", - " \"traceAddress\": [", - " 1,", - " 4,", - " 0,", - " 1,", - " 3,", - " 0,", - " 1,", - " 3,", - " 0,", - " 1,", - " 3,", - " 0,", - " 1,", - " 3,", - " 0,", - " 1,", - " 3,", - " 0,", - " 1,", - " 1", - " ],", - " \"transactionHash\": \"0x0ec3f2488a93839524add10ea229e773f6bc891b4eb4794c3337d4495263790b\",", - " \"transactionPosition\": 0,", - " \"type\": \"call\"", - " },", - " {", - " \"action\": {", - " \"callType\": \"call\",", - " \"from\": \"0xbb9bc244d798123fde783fcc1c72d3bb8c189413\",", - " \"gas\": \"0x974f9\",", - " \"input\": \"0xd2cc718f\",", - " \"to\": \"0xd2e16a20dd7b1ae54fb0312209784478d069c7b0\",", - " \"value\": \"0x0\"", - " },", - " \"blockHash\": \"0xcaaa13ce099342d5e1342b04d588d7733093591666af8ef756ce20cf13d16475\",", - " \"blockNumber\": 1718497,", - " \"result\": {", - " \"gasUsed\": \"0x113\",", - " \"output\": \"0x00000000000000000000000000000000000000000000000000038d7ea4c68000\"", - " },", - " \"subtraces\": 0,", - " \"traceAddress\": [", - " 1,", - " 4,", - " 0,", - " 1,", - " 3,", - " 0,", - " 1,", - " 3,", - " 0,", - " 1,", - " 3,", - " 0,", - " 1,", - " 3,", - " 0,", - " 1,", - " 3,", - " 0,", - " 1,", - " 2", - " ],", - " \"transactionHash\": \"0x0ec3f2488a93839524add10ea229e773f6bc891b4eb4794c3337d4495263790b\",", - " \"transactionPosition\": 0,", - " \"type\": \"call\"", - " },", - " {", - " \"action\": {", - " \"callType\": \"call\",", - " \"from\": \"0xbb9bc244d798123fde783fcc1c72d3bb8c189413\",", - " \"gas\": \"0x97180\",", - " \"input\": \"0x0221038a000000000000000000000000c0ee9db1a9e07ca63e4ff0d5fb6f86bf68d47b89000000000000000000000000000000000000000000000000000000052aa8b9ab\",", - " \"to\": \"0xd2e16a20dd7b1ae54fb0312209784478d069c7b0\",", - " \"value\": \"0x0\"", - " },", - " \"blockHash\": \"0xcaaa13ce099342d5e1342b04d588d7733093591666af8ef756ce20cf13d16475\",", - " \"blockNumber\": 1718497,", - " \"result\": {", - " \"gasUsed\": \"0x4784a\",", - " \"output\": \"0x0000000000000000000000000000000000000000000000000000000000000001\"", - " },", - " \"subtraces\": 1,", - " \"traceAddress\": [", - " 1,", - " 4,", - " 0,", - " 1,", - " 3,", - " 0,", - " 1,", - " 3,", - " 0,", - " 1,", - " 3,", - " 0,", - " 1,", - " 3,", - " 0,", - " 1,", - " 3,", - " 0,", - " 1,", - " 3", - " ],", - " \"transactionHash\": \"0x0ec3f2488a93839524add10ea229e773f6bc891b4eb4794c3337d4495263790b\",", - " \"transactionPosition\": 0,", - " \"type\": \"call\"", - " },", - " {", - " \"action\": {", - " \"callType\": \"call\",", - " \"from\": \"0xd2e16a20dd7b1ae54fb0312209784478d069c7b0\",", - " \"gas\": \"0x8f3aa\",", - " \"input\": \"0x\",", - " \"to\": \"0xc0ee9db1a9e07ca63e4ff0d5fb6f86bf68d47b89\",", - " \"value\": \"0x52aa8b9ab\"", - " },", - " \"blockHash\": \"0xcaaa13ce099342d5e1342b04d588d7733093591666af8ef756ce20cf13d16475\",", - " \"blockNumber\": 1718497,", - " \"result\": {", - " \"gasUsed\": \"0x45603\",", - " \"output\": \"0x0000000000000000000000000000000000000000000000000000000000000001\"", - " },", - " \"subtraces\": 2,", - " \"traceAddress\": [", - " 1,", - " 4,", - " 0,", - " 1,", - " 3,", - " 0,", - " 1,", - " 3,", - " 0,", - " 1,", - " 3,", - " 0,", - " 1,", - " 3,", - " 0,", - " 1,", - " 3,", - " 0,", - " 1,", - " 3,", - " 0", - " ],", - " \"transactionHash\": \"0x0ec3f2488a93839524add10ea229e773f6bc891b4eb4794c3337d4495263790b\",", - " \"transactionPosition\": 0,", - " \"type\": \"call\"", - " },", - " {", - " \"action\": {", - " \"callType\": \"call\",", - " \"from\": \"0xc0ee9db1a9e07ca63e4ff0d5fb6f86bf68d47b89\",", - " \"gas\": \"0x89112\",", - " \"input\": \"0x0e708203\",", - " \"to\": \"0xbb9bc244d798123fde783fcc1c72d3bb8c189413\",", - " \"value\": \"0x0\"", - " },", - " \"blockHash\": \"0xcaaa13ce099342d5e1342b04d588d7733093591666af8ef756ce20cf13d16475\",", - " \"blockNumber\": 1718497,", - " \"result\": {", - " \"gasUsed\": \"0x15f\",", - " \"output\": \"0x000000000000000000000000d2e16a20dd7b1ae54fb0312209784478d069c7b0\"", - " },", - " \"subtraces\": 0,", - " \"traceAddress\": [", - " 1,", - " 4,", - " 0,", - " 1,", - " 3,", - " 0,", - " 1,", - " 3,", - " 0,", - " 1,", - " 3,", - " 0,", - " 1,", - " 3,", - " 0,", - " 1,", - " 3,", - " 0,", - " 1,", - " 3,", - " 0,", - " 0", - " ],", - " \"transactionHash\": \"0x0ec3f2488a93839524add10ea229e773f6bc891b4eb4794c3337d4495263790b\",", - " \"transactionPosition\": 0,", - " \"type\": \"call\"", - " },", - " {", - " \"action\": {", - " \"callType\": \"call\",", - " \"from\": \"0xc0ee9db1a9e07ca63e4ff0d5fb6f86bf68d47b89\",", - " \"gas\": \"0x87952\",", - " \"input\": \"0x82661dc4000000000000000000000000000000000000000000000000000000000000003b000000000000000000000000b656b2a9c3b2416437a811e07466ca712f5a5b5a\",", - " \"to\": \"0xbb9bc244d798123fde783fcc1c72d3bb8c189413\",", - " \"value\": \"0x0\"", - " },", - " \"blockHash\": \"0xcaaa13ce099342d5e1342b04d588d7733093591666af8ef756ce20cf13d16475\",", - " \"blockNumber\": 1718497,", - " \"result\": {", - " \"gasUsed\": \"0x43cf7\",", - " \"output\": \"0x0000000000000000000000000000000000000000000000000000000000000001\"", - " },", - " \"subtraces\": 4,", - " \"traceAddress\": [", - " 1,", - " 4,", - " 0,", - " 1,", - " 3,", - " 0,", - " 1,", - " 3,", - " 0,", - " 1,", - " 3,", - " 0,", - " 1,", - " 3,", - " 0,", - " 1,", - " 3,", - " 0,", - " 1,", - " 3,", - " 0,", - " 1", - " ],", - " \"transactionHash\": \"0x0ec3f2488a93839524add10ea229e773f6bc891b4eb4794c3337d4495263790b\",", - " \"transactionPosition\": 0,", - " \"type\": \"call\"", - " },", - " {", - " \"action\": {", - " \"callType\": \"call\",", - " \"from\": \"0xbb9bc244d798123fde783fcc1c72d3bb8c189413\",", - " \"gas\": \"0x7f0fa\",", - " \"input\": \"0xbaac5300000000000000000000000000c0ee9db1a9e07ca63e4ff0d5fb6f86bf68d47b89\",", - " \"to\": \"0x304a554a310c7e546dfe434669c62820b7d83490\",", - " \"value\": \"0xdfd4116684423b208\"", - " },", - " \"blockHash\": \"0xcaaa13ce099342d5e1342b04d588d7733093591666af8ef756ce20cf13d16475\",", - " \"blockNumber\": 1718497,", - " \"result\": {", - " \"gasUsed\": \"0x5cdf\",", - " \"output\": \"0x0000000000000000000000000000000000000000000000000000000000000001\"", - " },", - " \"subtraces\": 1,", - " \"traceAddress\": [", - " 1,", - " 4,", - " 0,", - " 1,", - " 3,", - " 0,", - " 1,", - " 3,", - " 0,", - " 1,", - " 3,", - " 0,", - " 1,", - " 3,", - " 0,", - " 1,", - " 3,", - " 0,", - " 1,", - " 3,", - " 0,", - " 1,", - " 0", - " ],", - " \"transactionHash\": \"0x0ec3f2488a93839524add10ea229e773f6bc891b4eb4794c3337d4495263790b\",", - " \"transactionPosition\": 0,", - " \"type\": \"call\"", - " },", - " {", - " \"action\": {", - " \"callType\": \"call\",", - " \"from\": \"0x304a554a310c7e546dfe434669c62820b7d83490\",", - " \"gas\": \"0x765fc\",", - " \"input\": \"0x\",", - " \"to\": \"0x914d1b8b43e92723e64fd0a06f5bdb8dd9b10c79\",", - " \"value\": \"0x0\"", - " },", - " \"blockHash\": \"0xcaaa13ce099342d5e1342b04d588d7733093591666af8ef756ce20cf13d16475\",", - " \"blockNumber\": 1718497,", - " \"result\": {", - " \"gasUsed\": \"0x13f9\",", - " \"output\": \"0x\"", - " },", - " \"subtraces\": 0,", - " \"traceAddress\": [", - " 1,", - " 4,", - " 0,", - " 1,", - " 3,", - " 0,", - " 1,", - " 3,", - " 0,", - " 1,", - " 3,", - " 0,", - " 1,", - " 3,", - " 0,", - " 1,", - " 3,", - " 0,", - " 1,", - " 3,", - " 0,", - " 1,", - " 0,", - " 0", - " ],", - " \"transactionHash\": \"0x0ec3f2488a93839524add10ea229e773f6bc891b4eb4794c3337d4495263790b\",", - " \"transactionPosition\": 0,", - " \"type\": \"call\"", - " },", - " {", - " \"action\": {", - " \"callType\": \"call\",", - " \"from\": \"0xbb9bc244d798123fde783fcc1c72d3bb8c189413\",", - " \"gas\": \"0x73336\",", - " \"input\": \"0xd2cc718f\",", - " \"to\": \"0xd2e16a20dd7b1ae54fb0312209784478d069c7b0\",", - " \"value\": \"0x0\"", - " },", - " \"blockHash\": \"0xcaaa13ce099342d5e1342b04d588d7733093591666af8ef756ce20cf13d16475\",", - " \"blockNumber\": 1718497,", - " \"result\": {", - " \"gasUsed\": \"0x113\",", - " \"output\": \"0x00000000000000000000000000000000000000000000000000038d7ea4c68000\"", - " },", - " \"subtraces\": 0,", - " \"traceAddress\": [", - " 1,", - " 4,", - " 0,", - " 1,", - " 3,", - " 0,", - " 1,", - " 3,", - " 0,", - " 1,", - " 3,", - " 0,", - " 1,", - " 3,", - " 0,", - " 1,", - " 3,", - " 0,", - " 1,", - " 3,", - " 0,", - " 1,", - " 1", - " ],", - " \"transactionHash\": \"0x0ec3f2488a93839524add10ea229e773f6bc891b4eb4794c3337d4495263790b\",", - " \"transactionPosition\": 0,", - " \"type\": \"call\"", - " },", - " {", - " \"action\": {", - " \"callType\": \"call\",", - " \"from\": \"0xbb9bc244d798123fde783fcc1c72d3bb8c189413\",", - " \"gas\": \"0x72f60\",", - " \"input\": \"0xd2cc718f\",", - " \"to\": \"0xd2e16a20dd7b1ae54fb0312209784478d069c7b0\",", - " \"value\": \"0x0\"", - " },", - " \"blockHash\": \"0xcaaa13ce099342d5e1342b04d588d7733093591666af8ef756ce20cf13d16475\",", - " \"blockNumber\": 1718497,", - " \"result\": {", - " \"gasUsed\": \"0x113\",", - " \"output\": \"0x00000000000000000000000000000000000000000000000000038d7ea4c68000\"", - " },", - " \"subtraces\": 0,", - " \"traceAddress\": [", - " 1,", - " 4,", - " 0,", - " 1,", - " 3,", - " 0,", - " 1,", - " 3,", - " 0,", - " 1,", - " 3,", - " 0,", - " 1,", - " 3,", - " 0,", - " 1,", - " 3,", - " 0,", - " 1,", - " 3,", - " 0,", - " 1,", - " 2", - " ],", - " \"transactionHash\": \"0x0ec3f2488a93839524add10ea229e773f6bc891b4eb4794c3337d4495263790b\",", - " \"transactionPosition\": 0,", - " \"type\": \"call\"", - " },", - " {", - " \"action\": {", - " \"callType\": \"call\",", - " \"from\": \"0xbb9bc244d798123fde783fcc1c72d3bb8c189413\",", - " \"gas\": \"0x72be7\",", - " \"input\": \"0x0221038a000000000000000000000000c0ee9db1a9e07ca63e4ff0d5fb6f86bf68d47b89000000000000000000000000000000000000000000000000000000052aa8b9ab\",", - " \"to\": \"0xd2e16a20dd7b1ae54fb0312209784478d069c7b0\",", - " \"value\": \"0x0\"", - " },", - " \"blockHash\": \"0xcaaa13ce099342d5e1342b04d588d7733093591666af8ef756ce20cf13d16475\",", - " \"blockNumber\": 1718497,", - " \"result\": {", - " \"gasUsed\": \"0x2c5f4\",", - " \"output\": \"0x0000000000000000000000000000000000000000000000000000000000000001\"", - " },", - " \"subtraces\": 1,", - " \"traceAddress\": [", - " 1,", - " 4,", - " 0,", - " 1,", - " 3,", - " 0,", - " 1,", - " 3,", - " 0,", - " 1,", - " 3,", - " 0,", - " 1,", - " 3,", - " 0,", - " 1,", - " 3,", - " 0,", - " 1,", - " 3,", - " 0,", - " 1,", - " 3", - " ],", - " \"transactionHash\": \"0x0ec3f2488a93839524add10ea229e773f6bc891b4eb4794c3337d4495263790b\",", - " \"transactionPosition\": 0,", - " \"type\": \"call\"", - " },", - " {", - " \"action\": {", - " \"callType\": \"call\",", - " \"from\": \"0xd2e16a20dd7b1ae54fb0312209784478d069c7b0\",", - " \"gas\": \"0x6ae11\",", - " \"input\": \"0x\",", - " \"to\": \"0xc0ee9db1a9e07ca63e4ff0d5fb6f86bf68d47b89\",", - " \"value\": \"0x52aa8b9ab\"", - " },", - " \"blockHash\": \"0xcaaa13ce099342d5e1342b04d588d7733093591666af8ef756ce20cf13d16475\",", - " \"blockNumber\": 1718497,", - " \"result\": {", - " \"gasUsed\": \"0x2a3ad\",", - " \"output\": \"0x0000000000000000000000000000000000000000000000000000000000000001\"", - " },", - " \"subtraces\": 2,", - " \"traceAddress\": [", - " 1,", - " 4,", - " 0,", - " 1,", - " 3,", - " 0,", - " 1,", - " 3,", - " 0,", - " 1,", - " 3,", - " 0,", - " 1,", - " 3,", - " 0,", - " 1,", - " 3,", - " 0,", - " 1,", - " 3,", - " 0,", - " 1,", - " 3,", - " 0", - " ],", - " \"transactionHash\": \"0x0ec3f2488a93839524add10ea229e773f6bc891b4eb4794c3337d4495263790b\",", - " \"transactionPosition\": 0,", - " \"type\": \"call\"", - " },", - " {", - " \"action\": {", - " \"callType\": \"call\",", - " \"from\": \"0xc0ee9db1a9e07ca63e4ff0d5fb6f86bf68d47b89\",", - " \"gas\": \"0x64b79\",", - " \"input\": \"0x0e708203\",", - " \"to\": \"0xbb9bc244d798123fde783fcc1c72d3bb8c189413\",", - " \"value\": \"0x0\"", - " },", - " \"blockHash\": \"0xcaaa13ce099342d5e1342b04d588d7733093591666af8ef756ce20cf13d16475\",", - " \"blockNumber\": 1718497,", - " \"result\": {", - " \"gasUsed\": \"0x15f\",", - " \"output\": \"0x000000000000000000000000d2e16a20dd7b1ae54fb0312209784478d069c7b0\"", - " },", - " \"subtraces\": 0,", - " \"traceAddress\": [", - " 1,", - " 4,", - " 0,", - " 1,", - " 3,", - " 0,", - " 1,", - " 3,", - " 0,", - " 1,", - " 3,", - " 0,", - " 1,", - " 3,", - " 0,", - " 1,", - " 3,", - " 0,", - " 1,", - " 3,", - " 0,", - " 1,", - " 3,", - " 0,", - " 0", - " ],", - " \"transactionHash\": \"0x0ec3f2488a93839524add10ea229e773f6bc891b4eb4794c3337d4495263790b\",", - " \"transactionPosition\": 0,", - " \"type\": \"call\"", - " },", - " {", - " \"action\": {", - " \"callType\": \"call\",", - " \"from\": \"0xc0ee9db1a9e07ca63e4ff0d5fb6f86bf68d47b89\",", - " \"gas\": \"0x633b9\",", - " \"input\": \"0x82661dc4000000000000000000000000000000000000000000000000000000000000003b000000000000000000000000b656b2a9c3b2416437a811e07466ca712f5a5b5a\",", - " \"to\": \"0xbb9bc244d798123fde783fcc1c72d3bb8c189413\",", - " \"value\": \"0x0\"", - " },", - " \"blockHash\": \"0xcaaa13ce099342d5e1342b04d588d7733093591666af8ef756ce20cf13d16475\",", - " \"blockNumber\": 1718497,", - " \"result\": {", - " \"gasUsed\": \"0x28aa1\",", - " \"output\": \"0x0000000000000000000000000000000000000000000000000000000000000001\"", - " },", - " \"subtraces\": 4,", - " \"traceAddress\": [", - " 1,", - " 4,", - " 0,", - " 1,", - " 3,", - " 0,", - " 1,", - " 3,", - " 0,", - " 1,", - " 3,", - " 0,", - " 1,", - " 3,", - " 0,", - " 1,", - " 3,", - " 0,", - " 1,", - " 3,", - " 0,", - " 1,", - " 3,", - " 0,", - " 1", - " ],", - " \"transactionHash\": \"0x0ec3f2488a93839524add10ea229e773f6bc891b4eb4794c3337d4495263790b\",", - " \"transactionPosition\": 0,", - " \"type\": \"call\"", - " },", - " {", - " \"action\": {", - " \"callType\": \"call\",", - " \"from\": \"0xbb9bc244d798123fde783fcc1c72d3bb8c189413\",", - " \"gas\": \"0x5ab61\",", - " \"input\": \"0xbaac5300000000000000000000000000c0ee9db1a9e07ca63e4ff0d5fb6f86bf68d47b89\",", - " \"to\": \"0x304a554a310c7e546dfe434669c62820b7d83490\",", - " \"value\": \"0xdfd4116684423b208\"", - " },", - " \"blockHash\": \"0xcaaa13ce099342d5e1342b04d588d7733093591666af8ef756ce20cf13d16475\",", - " \"blockNumber\": 1718497,", - " \"result\": {", - " \"gasUsed\": \"0x5cdf\",", - " \"output\": \"0x0000000000000000000000000000000000000000000000000000000000000001\"", - " },", - " \"subtraces\": 1,", - " \"traceAddress\": [", - " 1,", - " 4,", - " 0,", - " 1,", - " 3,", - " 0,", - " 1,", - " 3,", - " 0,", - " 1,", - " 3,", - " 0,", - " 1,", - " 3,", - " 0,", - " 1,", - " 3,", - " 0,", - " 1,", - " 3,", - " 0,", - " 1,", - " 3,", - " 0,", - " 1,", - " 0", - " ],", - " \"transactionHash\": \"0x0ec3f2488a93839524add10ea229e773f6bc891b4eb4794c3337d4495263790b\",", - " \"transactionPosition\": 0,", - " \"type\": \"call\"", - " },", - " {", - " \"action\": {", - " \"callType\": \"call\",", - " \"from\": \"0x304a554a310c7e546dfe434669c62820b7d83490\",", - " \"gas\": \"0x52063\",", - " \"input\": \"0x\",", - " \"to\": \"0x914d1b8b43e92723e64fd0a06f5bdb8dd9b10c79\",", - " \"value\": \"0x0\"", - " },", - " \"blockHash\": \"0xcaaa13ce099342d5e1342b04d588d7733093591666af8ef756ce20cf13d16475\",", - " \"blockNumber\": 1718497,", - " \"result\": {", - " \"gasUsed\": \"0x13f9\",", - " \"output\": \"0x\"", - " },", - " \"subtraces\": 0,", - " \"traceAddress\": [", - " 1,", - " 4,", - " 0,", - " 1,", - " 3,", - " 0,", - " 1,", - " 3,", - " 0,", - " 1,", - " 3,", - " 0,", - " 1,", - " 3,", - " 0,", - " 1,", - " 3,", - " 0,", - " 1,", - " 3,", - " 0,", - " 1,", - " 3,", - " 0,", - " 1,", - " 0,", - " 0", - " ],", - " \"transactionHash\": \"0x0ec3f2488a93839524add10ea229e773f6bc891b4eb4794c3337d4495263790b\",", - " \"transactionPosition\": 0,", - " \"type\": \"call\"", - " },", - " {", - " \"action\": {", - " \"callType\": \"call\",", - " \"from\": \"0xbb9bc244d798123fde783fcc1c72d3bb8c189413\",", - " \"gas\": \"0x4ed9d\",", - " \"input\": \"0xd2cc718f\",", - " \"to\": \"0xd2e16a20dd7b1ae54fb0312209784478d069c7b0\",", - " \"value\": \"0x0\"", - " },", - " \"blockHash\": \"0xcaaa13ce099342d5e1342b04d588d7733093591666af8ef756ce20cf13d16475\",", - " \"blockNumber\": 1718497,", - " \"result\": {", - " \"gasUsed\": \"0x113\",", - " \"output\": \"0x00000000000000000000000000000000000000000000000000038d7ea4c68000\"", - " },", - " \"subtraces\": 0,", - " \"traceAddress\": [", - " 1,", - " 4,", - " 0,", - " 1,", - " 3,", - " 0,", - " 1,", - " 3,", - " 0,", - " 1,", - " 3,", - " 0,", - " 1,", - " 3,", - " 0,", - " 1,", - " 3,", - " 0,", - " 1,", - " 3,", - " 0,", - " 1,", - " 3,", - " 0,", - " 1,", - " 1", - " ],", - " \"transactionHash\": \"0x0ec3f2488a93839524add10ea229e773f6bc891b4eb4794c3337d4495263790b\",", - " \"transactionPosition\": 0,", - " \"type\": \"call\"", - " },", - " {", - " \"action\": {", - " \"callType\": \"call\",", - " \"from\": \"0xbb9bc244d798123fde783fcc1c72d3bb8c189413\",", - " \"gas\": \"0x4e9c7\",", - " \"input\": \"0xd2cc718f\",", - " \"to\": \"0xd2e16a20dd7b1ae54fb0312209784478d069c7b0\",", - " \"value\": \"0x0\"", - " },", - " \"blockHash\": \"0xcaaa13ce099342d5e1342b04d588d7733093591666af8ef756ce20cf13d16475\",", - " \"blockNumber\": 1718497,", - " \"result\": {", - " \"gasUsed\": \"0x113\",", - " \"output\": \"0x00000000000000000000000000000000000000000000000000038d7ea4c68000\"", - " },", - " \"subtraces\": 0,", - " \"traceAddress\": [", - " 1,", - " 4,", - " 0,", - " 1,", - " 3,", - " 0,", - " 1,", - " 3,", - " 0,", - " 1,", - " 3,", - " 0,", - " 1,", - " 3,", - " 0,", - " 1,", - " 3,", - " 0,", - " 1,", - " 3,", - " 0,", - " 1,", - " 3,", - " 0,", - " 1,", - " 2", - " ],", - " \"transactionHash\": \"0x0ec3f2488a93839524add10ea229e773f6bc891b4eb4794c3337d4495263790b\",", - " \"transactionPosition\": 0,", - " \"type\": \"call\"", - " },", - " {", - " \"action\": {", - " \"callType\": \"call\",", - " \"from\": \"0xbb9bc244d798123fde783fcc1c72d3bb8c189413\",", - " \"gas\": \"0x4e64e\",", - " \"input\": \"0x0221038a000000000000000000000000c0ee9db1a9e07ca63e4ff0d5fb6f86bf68d47b89000000000000000000000000000000000000000000000000000000052aa8b9ab\",", - " \"to\": \"0xd2e16a20dd7b1ae54fb0312209784478d069c7b0\",", - " \"value\": \"0x0\"", - " },", - " \"blockHash\": \"0xcaaa13ce099342d5e1342b04d588d7733093591666af8ef756ce20cf13d16475\",", - " \"blockNumber\": 1718497,", - " \"result\": {", - " \"gasUsed\": \"0x1139e\",", - " \"output\": \"0x0000000000000000000000000000000000000000000000000000000000000001\"", - " },", - " \"subtraces\": 1,", - " \"traceAddress\": [", - " 1,", - " 4,", - " 0,", - " 1,", - " 3,", - " 0,", - " 1,", - " 3,", - " 0,", - " 1,", - " 3,", - " 0,", - " 1,", - " 3,", - " 0,", - " 1,", - " 3,", - " 0,", - " 1,", - " 3,", - " 0,", - " 1,", - " 3,", - " 0,", - " 1,", - " 3", - " ],", - " \"transactionHash\": \"0x0ec3f2488a93839524add10ea229e773f6bc891b4eb4794c3337d4495263790b\",", - " \"transactionPosition\": 0,", - " \"type\": \"call\"", - " },", - " {", - " \"action\": {", - " \"callType\": \"call\",", - " \"from\": \"0xd2e16a20dd7b1ae54fb0312209784478d069c7b0\",", - " \"gas\": \"0x46878\",", - " \"input\": \"0x\",", - " \"to\": \"0xc0ee9db1a9e07ca63e4ff0d5fb6f86bf68d47b89\",", - " \"value\": \"0x52aa8b9ab\"", - " },", - " \"blockHash\": \"0xcaaa13ce099342d5e1342b04d588d7733093591666af8ef756ce20cf13d16475\",", - " \"blockNumber\": 1718497,", - " \"result\": {", - " \"gasUsed\": \"0xf157\",", - " \"output\": \"0x0000000000000000000000000000000000000000000000000000000000000001\"", - " },", - " \"subtraces\": 3,", - " \"traceAddress\": [", - " 1,", - " 4,", - " 0,", - " 1,", - " 3,", - " 0,", - " 1,", - " 3,", - " 0,", - " 1,", - " 3,", - " 0,", - " 1,", - " 3,", - " 0,", - " 1,", - " 3,", - " 0,", - " 1,", - " 3,", - " 0,", - " 1,", - " 3,", - " 0,", - " 1,", - " 3,", - " 0", - " ],", - " \"transactionHash\": \"0x0ec3f2488a93839524add10ea229e773f6bc891b4eb4794c3337d4495263790b\",", - " \"transactionPosition\": 0,", - " \"type\": \"call\"", - " },", - " {", - " \"action\": {", - " \"callType\": \"call\",", - " \"from\": \"0xc0ee9db1a9e07ca63e4ff0d5fb6f86bf68d47b89\",", - " \"gas\": \"0x405e0\",", - " \"input\": \"0x0e708203\",", - " \"to\": \"0xbb9bc244d798123fde783fcc1c72d3bb8c189413\",", - " \"value\": \"0x0\"", - " },", - " \"blockHash\": \"0xcaaa13ce099342d5e1342b04d588d7733093591666af8ef756ce20cf13d16475\",", - " \"blockNumber\": 1718497,", - " \"result\": {", - " \"gasUsed\": \"0x15f\",", - " \"output\": \"0x000000000000000000000000d2e16a20dd7b1ae54fb0312209784478d069c7b0\"", - " },", - " \"subtraces\": 0,", - " \"traceAddress\": [", - " 1,", - " 4,", - " 0,", - " 1,", - " 3,", - " 0,", - " 1,", - " 3,", - " 0,", - " 1,", - " 3,", - " 0,", - " 1,", - " 3,", - " 0,", - " 1,", - " 3,", - " 0,", - " 1,", - " 3,", - " 0,", - " 1,", - " 3,", - " 0,", - " 1,", - " 3,", - " 0,", - " 0", - " ],", - " \"transactionHash\": \"0x0ec3f2488a93839524add10ea229e773f6bc891b4eb4794c3337d4495263790b\",", - " \"transactionPosition\": 0,", - " \"type\": \"call\"", - " },", - " {", - " \"action\": {", - " \"callType\": \"call\",", - " \"from\": \"0xc0ee9db1a9e07ca63e4ff0d5fb6f86bf68d47b89\",", - " \"gas\": \"0x4024b\",", - " \"input\": \"0x70a08231000000000000000000000000c0ee9db1a9e07ca63e4ff0d5fb6f86bf68d47b89\",", - " \"to\": \"0xbb9bc244d798123fde783fcc1c72d3bb8c189413\",", - " \"value\": \"0x0\"", - " },", - " \"blockHash\": \"0xcaaa13ce099342d5e1342b04d588d7733093591666af8ef756ce20cf13d16475\",", - " \"blockNumber\": 1718497,", - " \"result\": {", - " \"gasUsed\": \"0x314\",", - " \"output\": \"0x00000000000000000000000000000000000000000000000dfd3f956d86e77600\"", - " },", - " \"subtraces\": 0,", - " \"traceAddress\": [", - " 1,", - " 4,", - " 0,", - " 1,", - " 3,", - " 0,", - " 1,", - " 3,", - " 0,", - " 1,", - " 3,", - " 0,", - " 1,", - " 3,", - " 0,", - " 1,", - " 3,", - " 0,", - " 1,", - " 3,", - " 0,", - " 1,", - " 3,", - " 0,", - " 1,", - " 3,", - " 0,", - " 1", - " ],", - " \"transactionHash\": \"0x0ec3f2488a93839524add10ea229e773f6bc891b4eb4794c3337d4495263790b\",", - " \"transactionPosition\": 0,", - " \"type\": \"call\"", - " },", - " {", - " \"action\": {", - " \"callType\": \"call\",", - " \"from\": \"0xc0ee9db1a9e07ca63e4ff0d5fb6f86bf68d47b89\",", - " \"gas\": \"0x3fe60\",", - " \"input\": \"0xa9059cbb000000000000000000000000f835a0247b0063c04ef22006ebe57c5f11977cc400000000000000000000000000000000000000000000000dfd3f956d86e77600\",", - " \"to\": \"0xbb9bc244d798123fde783fcc1c72d3bb8c189413\",", - " \"value\": \"0x0\"", - " },", - " \"blockHash\": \"0xcaaa13ce099342d5e1342b04d588d7733093591666af8ef756ce20cf13d16475\",", - " \"blockNumber\": 1718497,", - " \"result\": {", - " \"gasUsed\": \"0xd4fa\",", - " \"output\": \"0x0000000000000000000000000000000000000000000000000000000000000001\"", - " },", - " \"subtraces\": 0,", - " \"traceAddress\": [", - " 1,", - " 4,", - " 0,", - " 1,", - " 3,", - " 0,", - " 1,", - " 3,", - " 0,", - " 1,", - " 3,", - " 0,", - " 1,", - " 3,", - " 0,", - " 1,", - " 3,", - " 0,", - " 1,", - " 3,", - " 0,", - " 1,", - " 3,", - " 0,", - " 1,", - " 3,", - " 0,", - " 2", - " ],", - " \"transactionHash\": \"0x0ec3f2488a93839524add10ea229e773f6bc891b4eb4794c3337d4495263790b\",", - " \"transactionPosition\": 0,", - " \"type\": \"call\"", - " },", - " {", - " \"action\": {", - " \"callType\": \"call\",", - " \"from\": \"0x2dd9d8e62af6e839b4ed0d869094198ee7e02bff\",", - " \"gas\": \"0x0\",", - " \"input\": \"0x\",", - " \"to\": \"0x46e943ad525b7fe18f8240d944028c7890da135c\",", - " \"value\": \"0xb1a2bc2ec50000\"", - " },", - " \"blockHash\": \"0xcaaa13ce099342d5e1342b04d588d7733093591666af8ef756ce20cf13d16475\",", - " \"blockNumber\": 1718497,", - " \"result\": {", - " \"gasUsed\": \"0x0\",", - " \"output\": \"0x\"", - " },", - " \"subtraces\": 0,", - " \"traceAddress\": [],", - " \"transactionHash\": \"0x61db76fc3fe109ad548d91d321daf76dc2ef2f683dc3c4006377d1da8629e274\",", - " \"transactionPosition\": 1,", - " \"type\": \"call\"", - " },", - " {", - " \"action\": {", - " \"callType\": \"call\",", - " \"from\": \"0x286635c294b61bf10f416bbb7b579a0035379d33\",", - " \"gas\": \"0x10d88\",", - " \"input\": \"0x\",", - " \"to\": \"0x81d246bf10386b5702193202b865b0e45bd97f1a\",", - " \"value\": \"0x393ef1a5127c80000\"", - " },", - " \"blockHash\": \"0xcaaa13ce099342d5e1342b04d588d7733093591666af8ef756ce20cf13d16475\",", - " \"blockNumber\": 1718497,", - " \"result\": {", - " \"gasUsed\": \"0x0\",", - " \"output\": \"0x\"", - " },", - " \"subtraces\": 0,", - " \"traceAddress\": [],", - " \"transactionHash\": \"0x877db25210ca8be928280112b8b3b4a3afced1d35eb8cd795ac730c7e89d88a3\",", - " \"transactionPosition\": 2,", - " \"type\": \"call\"", - " },", - " {", - " \"action\": {", - " \"callType\": \"call\",", - " \"from\": \"0xfecd1b8e3a1e21b304bfba7e5fb5241169e1fa1d\",", - " \"gas\": \"0x0\",", - " \"input\": \"0x\",", - " \"to\": \"0x7ed1e469fcb3ee19c0366d829e291451be638e59\",", - " \"value\": \"0x9e4e3e07f0b2fc00\"", - " },", - " \"blockHash\": \"0xcaaa13ce099342d5e1342b04d588d7733093591666af8ef756ce20cf13d16475\",", - " \"blockNumber\": 1718497,", - " \"result\": {", - " \"gasUsed\": \"0x0\",", - " \"output\": \"0x\"", - " },", - " \"subtraces\": 0,", - " \"traceAddress\": [],", - " \"transactionHash\": \"0xe7b3edd1d02b869b4d0eac0be43a67f0ac2f5b190f5a49f37ac59a98b17f56a3\",", - " \"transactionPosition\": 3,", - " \"type\": \"call\"", - " },", - " {", - " \"action\": {", - " \"callType\": \"call\",", - " \"from\": \"0x0a869d79a7052c7f1b55a8ebabbea3420f0d1e13\",", - " \"gas\": \"0x74148\",", - " \"input\": \"0xf5537ede000000000000000000000000bb9bc244d798123fde783fcc1c72d3bb8c1894130000000000000000000000000a869d79a7052c7f1b55a8ebabbea3420f0d1e1300000000000000000000000000000000000000000000000000b1a2bc2ec50000\",", - " \"to\": \"0x447f914fee54e1f9dc1fc5276ae2572b9369ae5d\",", - " \"value\": \"0x0\"", - " },", - " \"blockHash\": \"0xcaaa13ce099342d5e1342b04d588d7733093591666af8ef756ce20cf13d16475\",", - " \"blockNumber\": 1718497,", - " \"result\": {", - " \"gasUsed\": \"0x68eb\",", - " \"output\": \"0x\"", - " },", - " \"subtraces\": 1,", - " \"traceAddress\": [],", - " \"transactionHash\": \"0xe11112b361cc2ffdc4815513dcb337beb83be014bcc89cd39a984f3d458e668d\",", - " \"transactionPosition\": 4,", - " \"type\": \"call\"", - " },", - " {", - " \"action\": {", - " \"callType\": \"call\",", - " \"from\": \"0x447f914fee54e1f9dc1fc5276ae2572b9369ae5d\",", - " \"gas\": \"0x6ddd9\",", - " \"input\": \"0xa9059cbb0000000000000000000000000a869d79a7052c7f1b55a8ebabbea3420f0d1e1300000000000000000000000000000000000000000000000000b1a2bc2ec50000\",", - " \"to\": \"0xbb9bc244d798123fde783fcc1c72d3bb8c189413\",", - " \"value\": \"0x0\"", - " },", - " \"blockHash\": \"0xcaaa13ce099342d5e1342b04d588d7733093591666af8ef756ce20cf13d16475\",", - " \"blockNumber\": 1718497,", - " \"result\": {", - " \"gasUsed\": \"0x5fca\",", - " \"output\": \"0x0000000000000000000000000000000000000000000000000000000000000001\"", - " },", - " \"subtraces\": 0,", - " \"traceAddress\": [", - " 0", - " ],", - " \"transactionHash\": \"0xe11112b361cc2ffdc4815513dcb337beb83be014bcc89cd39a984f3d458e668d\",", - " \"transactionPosition\": 4,", - " \"type\": \"call\"", - " },", - " {", - " \"action\": {", - " \"author\": \"0xbcdfc35b86bedf72f0cda046a3c16829a2ef41d1\",", - " \"rewardType\": \"block\",", - " \"value\": \"0x4563918244f40000\"", - " },", - " \"blockHash\": \"0xcaaa13ce099342d5e1342b04d588d7733093591666af8ef756ce20cf13d16475\",", - " \"blockNumber\": 1718497,", - " \"result\": null,", - " \"subtraces\": 0,", - " \"traceAddress\": [],", - " \"transactionHash\": null,", - " \"transactionPosition\": null,", - " \"type\": \"reward\"", - " }", - " ],", - " \"id\": \"1\"", - "}", - "", - "pm.test('Has correct result', function() {", - " pm.expect(pm.response.json()).to.be.deep.equal(expected);", - "})" - ], - "type": "text/javascript" - } - } - ], - "request": { - "method": "POST", - "header": [ - { - "key": "Content-Type", - "value": "application/json" - } - ], - "body": { - "mode": "raw", - "raw": "{\n\t\"jsonrpc\":\"2.0\",\n\t\"method\":\"trace_block\",\n\t\"params\":[\"0x1a38e1\"],\n\t\"id\":\"1\"\n}\n", - "options": { - "raw": {} - } - }, - "url": { - "raw": "{{HOST}}", - "host": [ - "{{HOST}}" - ] - } - }, - "response": [] - }, - { - "name": "trace_filter", - "event": [ - { - "listen": "test", - "script": { - "id": "f524ed84-7851-4a5a-88f2-4d19a10e74cb", - "exec": [ - "var expected = {", - " \"jsonrpc\": \"2.0\",", - " \"result\": [", - " {", - " \"action\": {", - " \"author\": \"0x5088d623ba0fcf0131e0897a91734a4d83596aa0\",", - " \"rewardType\": \"block\",", - " \"value\": \"0x478eae0e571ba000\"", - " },", - " \"blockHash\": \"0x3d6122660cc824376f11ee842f83addc3525e2dd6756b9bcf0affa6aa88cf741\",", - " \"blockNumber\": 3,", - " \"result\": null,", - " \"subtraces\": 0,", - " \"traceAddress\": [],", - " \"transactionHash\": null,", - " \"transactionPosition\": null,", - " \"type\": \"reward\"", - " },", - " {", - " \"action\": {", - " \"author\": \"0xc8ebccc5f5689fa8659d83713341e5ad19349448\",", - " \"rewardType\": \"uncle\",", - " \"value\": \"0x340aad21b3b70000\"", - " },", - " \"blockHash\": \"0x3d6122660cc824376f11ee842f83addc3525e2dd6756b9bcf0affa6aa88cf741\",", - " \"blockNumber\": 3,", - " \"result\": null,", - " \"subtraces\": 0,", - " \"traceAddress\": [],", - " \"transactionHash\": null,", - " \"transactionPosition\": null,", - " \"type\": \"reward\"", - " }", - " ],", - " \"id\": \"1\"", - "}", - "pm.test('Has correct result', function() {", - " pm.expect(pm.response.json()).to.be.deep.equal(expected);", - "})" - ], - "type": "text/javascript" - } - } - ], - "request": { - "method": "POST", - "header": [ - { - "key": "Content-Type", - "value": "application/json" - } - ], - "body": { - "mode": "raw", - "raw": "{\n\t\"jsonrpc\":\"2.0\",\n\t\"method\":\"trace_filter\",\n \"params\":[\n {\n \"fromBlock\":\"0x3\",\n \"toBlock\":\"0x3\"\n }\n ],\n\t\"id\":\"1\"\n}\n", - "options": { - "raw": {} - } - }, - "url": { - "raw": "{{HOST}}", - "host": [ - "{{HOST}}" - ] - } - }, - "response": [] - } - ], - "event": [ - { - "listen": "prerequest", - "script": { - "id": "f660f521-60fc-4561-bacd-14ab00640a12", - "type": "text/javascript", - "exec": [ - "utils = {", - " notImplemented: function(methodName, jsonData) {", - " var isErigon = pm.environment.get('HOST') == \"{{ERIGON}}\";", - " var isSilk = pm.environment.get('HOST') == \"{{SILKRPC}}\";", - " if (!isErigon && !isSilk) // only test erigon", - " return;", - "", - " var testNotImplemented = pm.globals.get('TEST_NOT_IMPLEMENTED') === 'true';", - " if (testNotImplemented) { // defaults to false, therefore don't test", - " pm.test('NOT IMPLEMENTED', function() {", - " pm.expect(false).to.be(true);", - " })", - " } else {", - " // pass unless user has explicitly told us to test not implemented", - " var expected = {", - " \"jsonrpc\": \"2.0\",", - " \"id\": \"1\",", - " \"error\": {", - " \"code\": -32000,", - " \"message\": \"the method is currently not implemented: \" + methodName", - " }", - " }", - " if (jsonData.error)", - " delete jsonData.error.data;", - " pm.test('NOT IMPLEMENTED', function() {", - " pm.expect(jsonData).to.deep.equals(expected);", - " })", - " }", - " },", - "", - " isDeprecated: function(methodName, jsonData) {", - " var isErigon = pm.environment.get('HOST') == \"{{ERIGON}}\";", - " var isSilk = pm.environment.get('HOST') == \"{{SILKRPC}}\";", - " if (!isErigon && !isSilk) // only test erigon", - " return;", - "", - " var testDeprecated = pm.globals.get('TEST_DEPRECATED') === 'true';", - " if (testDeprecated) { // defaults to false, therefore don't test", - " pm.test('DEPRECATED', function() {", - " console.log(\"testDeprecated2: \", testDeprecated)", - " pm.expect(false).to.be(true);", - " })", - " } else {", - " // pass unless user has explicitly told us to fail deprecated", - " var expected = {", - " \"jsonrpc\": \"2.0\",", - " \"id\": \"1\",", - " \"error\": {", - " \"code\": -32000,", - " \"message\": \"the method has been deprecated: \" + methodName", - " }", - " }", - " if (jsonData.error)", - " delete jsonData.error.data;", - " pm.test('DEPRECATED', function() {", - " pm.expect(jsonData).to.deep.equals(expected);", - " })", - " }", - " },", - "", - " cannotTest: function(methodName, jsonData) {", - " var isErigon = pm.environment.get('HOST') == \"{{ERIGON}}\";", - " var isSilk = pm.environment.get('HOST') == \"{{SILKRPC}}\";", - " if (!isErigon && !isSilk) // only test erigon", - " return;", - "", - " var expected = {", - " \"jsonrpc\": \"2.0\",", - " \"id\": \"1\",", - " \"result\": \"Cannot test - value changes\"", - " }", - " pm.test('VALUE CHANGES, CANNOT TEST: ' + methodName, function() {", - " jsonData.result = \"Cannot test - value changes\";", - " pm.expect(jsonData).to.deep.equals(expected);", - " })", - " },", - "};" - ] - } - }, - { - "listen": "test", - "script": { - "id": "8e45cd97-14f5-42f7-9df7-fe5e2824be86", - "type": "text/javascript", - "exec": [ - "pm.test('Base tests', function() {", - " const jsonData = pm.response.json();", - " pm.response.to.have.status(200);", - " pm.expect(jsonData !== null)", - " jsonData.errors == null || pm.expect(jsonData.errors).to.be.empty;", - "})", - "" - ] - } - } - ], - "protocolProfileBehavior": {} -} \ No newline at end of file diff --git a/cmd/rpcdaemon22/rpcdaemontest/test_util.go b/cmd/rpcdaemon22/rpcdaemontest/test_util.go deleted file mode 100644 index 58d7beb230b..00000000000 --- a/cmd/rpcdaemon22/rpcdaemontest/test_util.go +++ /dev/null @@ -1,323 +0,0 @@ -package rpcdaemontest - -import ( - "context" - "crypto/ecdsa" - "encoding/binary" - "fmt" - "math/big" - "net" - "testing" - - "github.com/ledgerwatch/erigon/consensus" - - "github.com/holiman/uint256" - "github.com/ledgerwatch/erigon-lib/gointerfaces/remote" - "github.com/ledgerwatch/erigon-lib/gointerfaces/starknet" - "github.com/ledgerwatch/erigon-lib/gointerfaces/txpool" - "github.com/ledgerwatch/erigon-lib/kv" - "github.com/ledgerwatch/erigon/accounts/abi/bind" - "github.com/ledgerwatch/erigon/accounts/abi/bind/backends" - "github.com/ledgerwatch/erigon/cmd/rpcdaemon22/commands/contracts" - "github.com/ledgerwatch/erigon/common" - "github.com/ledgerwatch/erigon/consensus/ethash" - "github.com/ledgerwatch/erigon/core" - "github.com/ledgerwatch/erigon/core/types" - "github.com/ledgerwatch/erigon/crypto" - "github.com/ledgerwatch/erigon/ethdb/privateapi" - "github.com/ledgerwatch/erigon/params" - "github.com/ledgerwatch/erigon/turbo/snapshotsync" - "github.com/ledgerwatch/erigon/turbo/stages" - "google.golang.org/grpc" - "google.golang.org/grpc/test/bufconn" -) - -func CreateTestKV(t *testing.T) kv.RwDB { - s, _, _ := CreateTestSentry(t) - return s.DB -} - -type testAddresses struct { - key *ecdsa.PrivateKey - key1 *ecdsa.PrivateKey - key2 *ecdsa.PrivateKey - address common.Address - address1 common.Address - address2 common.Address -} - -func makeTestAddresses() testAddresses { - var ( - key, _ = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291") - key1, _ = crypto.HexToECDSA("49a7b37aa6f6645917e7b807e9d1c00d4fa71f18343b0d4122a4d2df64dd6fee") - key2, _ = crypto.HexToECDSA("8a1f9a8f95be41cd7ccb6168179afb4504aefe388d1e14474d32c45c72ce7b7a") - address = crypto.PubkeyToAddress(key.PublicKey) - address1 = crypto.PubkeyToAddress(key1.PublicKey) - address2 = crypto.PubkeyToAddress(key2.PublicKey) - ) - - return testAddresses{ - key: key, - key1: key1, - key2: key2, - address: address, - address1: address1, - address2: address2, - } -} - -func CreateTestSentry(t *testing.T) (*stages.MockSentry, *core.ChainPack, []*core.ChainPack) { - addresses := makeTestAddresses() - var ( - key = addresses.key - address = addresses.address - address1 = addresses.address1 - address2 = addresses.address2 - ) - - var ( - gspec = &core.Genesis{ - Config: params.AllEthashProtocolChanges, - Alloc: core.GenesisAlloc{ - address: {Balance: big.NewInt(9000000000000000000)}, - address1: {Balance: big.NewInt(200000000000000000)}, - address2: {Balance: big.NewInt(300000000000000000)}, - }, - GasLimit: 10000000, - } - ) - m := stages.MockWithGenesis(t, gspec, key, false) - - contractBackend := backends.NewSimulatedBackendWithConfig(gspec.Alloc, gspec.Config, gspec.GasLimit) - defer contractBackend.Close() - - // Generate empty chain to have some orphaned blocks for tests - orphanedChain, err := core.GenerateChain(m.ChainConfig, m.Genesis, m.Engine, m.DB, 5, func(i int, block *core.BlockGen) { - }, true) - if err != nil { - t.Fatal(err) - } - - chain, err := getChainInstance(&addresses, m.ChainConfig, m.Genesis, m.Engine, m.DB, contractBackend) - if err != nil { - t.Fatal(err) - } - - if err = m.InsertChain(orphanedChain); err != nil { - t.Fatal(err) - } - if err = m.InsertChain(chain); err != nil { - t.Fatal(err) - } - - return m, chain, []*core.ChainPack{orphanedChain} -} - -var chainInstance *core.ChainPack - -func getChainInstance( - addresses *testAddresses, - config *params.ChainConfig, - parent *types.Block, - engine consensus.Engine, - db kv.RwDB, - contractBackend *backends.SimulatedBackend, -) (*core.ChainPack, error) { - var err error - if chainInstance == nil { - chainInstance, err = generateChain(addresses, config, parent, engine, db, contractBackend) - } - return chainInstance.Copy(), err -} - -func generateChain( - addresses *testAddresses, - config *params.ChainConfig, - parent *types.Block, - engine consensus.Engine, - db kv.RwDB, - contractBackend *backends.SimulatedBackend, -) (*core.ChainPack, error) { - var ( - key = addresses.key - key1 = addresses.key1 - key2 = addresses.key2 - address = addresses.address - address1 = addresses.address1 - address2 = addresses.address2 - theAddr = common.Address{1} - chainId = big.NewInt(1337) - // this code generates a log - signer = types.LatestSignerForChainID(nil) - ) - - transactOpts, _ := bind.NewKeyedTransactorWithChainID(key, chainId) - transactOpts1, _ := bind.NewKeyedTransactorWithChainID(key1, chainId) - transactOpts2, _ := bind.NewKeyedTransactorWithChainID(key2, chainId) - var poly *contracts.Poly - var tokenContract *contracts.Token - - // We generate the blocks without plain state because it's not supported in core.GenerateChain - return core.GenerateChain(config, parent, engine, db, 10, func(i int, block *core.BlockGen) { - var ( - txn types.Transaction - txs []types.Transaction - err error - ) - - ctx := context.Background() - switch i { - case 0: - txn, err = types.SignTx(types.NewTransaction(0, theAddr, uint256.NewInt(1000000000000000), 21000, new(uint256.Int), nil), *signer, key) - if err != nil { - panic(err) - } - err = contractBackend.SendTransaction(ctx, txn) - if err != nil { - panic(err) - } - case 1: - txn, err = types.SignTx(types.NewTransaction(1, theAddr, uint256.NewInt(1000000000000000), 21000, new(uint256.Int), nil), *signer, key) - if err != nil { - panic(err) - } - err = contractBackend.SendTransaction(ctx, txn) - if err != nil { - panic(err) - } - case 2: - _, txn, tokenContract, err = contracts.DeployToken(transactOpts, contractBackend, address1) - case 3: - txn, err = tokenContract.Mint(transactOpts1, address2, big.NewInt(10)) - case 4: - txn, err = tokenContract.Transfer(transactOpts2, address, big.NewInt(3)) - case 5: - // Multiple transactions sending small amounts of ether to various accounts - var j uint64 - var toAddr common.Address - nonce := block.TxNonce(address) - for j = 1; j <= 32; j++ { - binary.BigEndian.PutUint64(toAddr[:], j) - txn, err = types.SignTx(types.NewTransaction(nonce, toAddr, uint256.NewInt(1_000_000_000_000_000), 21000, new(uint256.Int), nil), *signer, key) - if err != nil { - panic(err) - } - err = contractBackend.SendTransaction(ctx, txn) - if err != nil { - panic(err) - } - txs = append(txs, txn) - nonce++ - } - case 6: - _, txn, tokenContract, err = contracts.DeployToken(transactOpts, contractBackend, address1) - if err != nil { - panic(err) - } - txs = append(txs, txn) - txn, err = tokenContract.Mint(transactOpts1, address2, big.NewInt(100)) - if err != nil { - panic(err) - } - txs = append(txs, txn) - // Multiple transactions sending small amounts of ether to various accounts - var j uint64 - var toAddr common.Address - for j = 1; j <= 32; j++ { - binary.BigEndian.PutUint64(toAddr[:], j) - txn, err = tokenContract.Transfer(transactOpts2, toAddr, big.NewInt(1)) - if err != nil { - panic(err) - } - txs = append(txs, txn) - } - case 7: - var toAddr common.Address - nonce := block.TxNonce(address) - binary.BigEndian.PutUint64(toAddr[:], 4) - txn, err = types.SignTx(types.NewTransaction(nonce, toAddr, uint256.NewInt(1000000000000000), 21000, new(uint256.Int), nil), *signer, key) - if err != nil { - panic(err) - } - err = contractBackend.SendTransaction(ctx, txn) - if err != nil { - panic(err) - } - txs = append(txs, txn) - binary.BigEndian.PutUint64(toAddr[:], 12) - txn, err = tokenContract.Transfer(transactOpts2, toAddr, big.NewInt(1)) - if err != nil { - panic(err) - } - txs = append(txs, txn) - case 8: - _, txn, poly, err = contracts.DeployPoly(transactOpts, contractBackend) - if err != nil { - panic(err) - } - txs = append(txs, txn) - case 9: - txn, err = poly.DeployAndDestruct(transactOpts, big.NewInt(0)) - if err != nil { - panic(err) - } - txs = append(txs, txn) - } - - if err != nil { - panic(err) - } - if txs == nil && txn != nil { - txs = append(txs, txn) - } - - for _, txn := range txs { - block.AddTx(txn) - } - contractBackend.Commit() - }, true) -} - -type IsMiningMock struct{} - -func (*IsMiningMock) IsMining() bool { return false } - -func CreateTestGrpcConn(t *testing.T, m *stages.MockSentry) (context.Context, *grpc.ClientConn) { //nolint - ctx, cancel := context.WithCancel(context.Background()) - - apis := m.Engine.APIs(nil) - if len(apis) < 1 { - t.Fatal("couldn't instantiate Engine api") - } - - ethashApi := apis[1].Service.(*ethash.API) - server := grpc.NewServer() - - remote.RegisterETHBACKENDServer(server, privateapi.NewEthBackendServer(ctx, nil, m.DB, m.Notifications.Events, snapshotsync.NewBlockReader(), nil, nil, nil, false)) - txpool.RegisterTxpoolServer(server, m.TxPoolGrpcServer) - txpool.RegisterMiningServer(server, privateapi.NewMiningServer(ctx, &IsMiningMock{}, ethashApi)) - starknet.RegisterCAIROVMServer(server, &starknet.UnimplementedCAIROVMServer{}) - listener := bufconn.Listen(1024 * 1024) - - dialer := func() func(context.Context, string) (net.Conn, error) { - go func() { - if err := server.Serve(listener); err != nil { - fmt.Printf("%v\n", err) - } - }() - return func(context.Context, string) (net.Conn, error) { - return listener.Dial() - } - } - - conn, err := grpc.DialContext(ctx, "", grpc.WithInsecure(), grpc.WithContextDialer(dialer())) - if err != nil { - t.Fatal(err) - } - t.Cleanup(func() { - cancel() - conn.Close() - server.Stop() - }) - return ctx, conn -} diff --git a/cmd/rpcdaemon22/rpcservices/eth_backend.go b/cmd/rpcdaemon22/rpcservices/eth_backend.go deleted file mode 100644 index 248345a003c..00000000000 --- a/cmd/rpcdaemon22/rpcservices/eth_backend.go +++ /dev/null @@ -1,291 +0,0 @@ -package rpcservices - -import ( - "context" - "encoding/json" - "errors" - "fmt" - "io" - "sync/atomic" - - "github.com/ledgerwatch/erigon-lib/gointerfaces" - "github.com/ledgerwatch/erigon-lib/gointerfaces/remote" - types2 "github.com/ledgerwatch/erigon-lib/gointerfaces/types" - "github.com/ledgerwatch/erigon-lib/kv" - "github.com/ledgerwatch/erigon/common" - "github.com/ledgerwatch/erigon/core/types" - "github.com/ledgerwatch/erigon/ethdb/privateapi" - "github.com/ledgerwatch/erigon/p2p" - "github.com/ledgerwatch/erigon/rlp" - "github.com/ledgerwatch/erigon/turbo/services" - "github.com/ledgerwatch/log/v3" - "google.golang.org/grpc" - "google.golang.org/grpc/status" - "google.golang.org/protobuf/types/known/emptypb" -) - -type RemoteBackend struct { - remoteEthBackend remote.ETHBACKENDClient - log log.Logger - version gointerfaces.Version - db kv.RoDB - blockReader services.FullBlockReader -} - -func NewRemoteBackend(client remote.ETHBACKENDClient, db kv.RoDB, blockReader services.FullBlockReader) *RemoteBackend { - return &RemoteBackend{ - remoteEthBackend: client, - version: gointerfaces.VersionFromProto(privateapi.EthBackendAPIVersion), - log: log.New("remote_service", "eth_backend"), - db: db, - blockReader: blockReader, - } -} - -func (back *RemoteBackend) EnsureVersionCompatibility() bool { - versionReply, err := back.remoteEthBackend.Version(context.Background(), &emptypb.Empty{}, grpc.WaitForReady(true)) - if err != nil { - - back.log.Error("getting Version", "err", err) - return false - } - if !gointerfaces.EnsureVersion(back.version, versionReply) { - back.log.Error("incompatible interface versions", "client", back.version.String(), - "server", fmt.Sprintf("%d.%d.%d", versionReply.Major, versionReply.Minor, versionReply.Patch)) - return false - } - back.log.Info("interfaces compatible", "client", back.version.String(), - "server", fmt.Sprintf("%d.%d.%d", versionReply.Major, versionReply.Minor, versionReply.Patch)) - return true -} - -func (back *RemoteBackend) Etherbase(ctx context.Context) (common.Address, error) { - res, err := back.remoteEthBackend.Etherbase(ctx, &remote.EtherbaseRequest{}) - if err != nil { - if s, ok := status.FromError(err); ok { - return common.Address{}, errors.New(s.Message()) - } - return common.Address{}, err - } - - return gointerfaces.ConvertH160toAddress(res.Address), nil -} - -func (back *RemoteBackend) NetVersion(ctx context.Context) (uint64, error) { - res, err := back.remoteEthBackend.NetVersion(ctx, &remote.NetVersionRequest{}) - if err != nil { - if s, ok := status.FromError(err); ok { - return 0, errors.New(s.Message()) - } - return 0, err - } - - return res.Id, nil -} - -func (back *RemoteBackend) NetPeerCount(ctx context.Context) (uint64, error) { - res, err := back.remoteEthBackend.NetPeerCount(ctx, &remote.NetPeerCountRequest{}) - if err != nil { - if s, ok := status.FromError(err); ok { - return 0, errors.New(s.Message()) - } - return 0, err - } - - return res.Count, nil -} - -func (back *RemoteBackend) ProtocolVersion(ctx context.Context) (uint64, error) { - res, err := back.remoteEthBackend.ProtocolVersion(ctx, &remote.ProtocolVersionRequest{}) - if err != nil { - if s, ok := status.FromError(err); ok { - return 0, errors.New(s.Message()) - } - return 0, err - } - - return res.Id, nil -} - -func (back *RemoteBackend) ClientVersion(ctx context.Context) (string, error) { - res, err := back.remoteEthBackend.ClientVersion(ctx, &remote.ClientVersionRequest{}) - if err != nil { - if s, ok := status.FromError(err); ok { - return "", errors.New(s.Message()) - } - return "", err - } - - return res.NodeName, nil -} - -func (back *RemoteBackend) Subscribe(ctx context.Context, onNewEvent func(*remote.SubscribeReply)) error { - subscription, err := back.remoteEthBackend.Subscribe(ctx, &remote.SubscribeRequest{}, grpc.WaitForReady(true)) - if err != nil { - if s, ok := status.FromError(err); ok { - return errors.New(s.Message()) - } - return err - } - for { - event, err := subscription.Recv() - if errors.Is(err, io.EOF) { - log.Debug("rpcdaemon: the subscription channel was closed") - break - } - if err != nil { - return err - } - - onNewEvent(event) - } - return nil -} - -func (back *RemoteBackend) SubscribeLogs(ctx context.Context, onNewLogs func(reply *remote.SubscribeLogsReply), requestor *atomic.Value) error { - subscription, err := back.remoteEthBackend.SubscribeLogs(ctx, grpc.WaitForReady(true)) - if err != nil { - if s, ok := status.FromError(err); ok { - return errors.New(s.Message()) - } - return err - } - requestor.Store(subscription.Send) - for { - logs, err := subscription.Recv() - if errors.Is(err, io.EOF) { - log.Info("rpcdaemon: the logs subscription channel was closed") - break - } - if err != nil { - return err - } - onNewLogs(logs) - } - return nil -} - -func (back *RemoteBackend) TxnLookup(ctx context.Context, tx kv.Getter, txnHash common.Hash) (uint64, bool, error) { - return back.blockReader.TxnLookup(ctx, tx, txnHash) -} -func (back *RemoteBackend) BlockWithSenders(ctx context.Context, tx kv.Getter, hash common.Hash, blockHeight uint64) (block *types.Block, senders []common.Address, err error) { - return back.blockReader.BlockWithSenders(ctx, tx, hash, blockHeight) -} -func (back *RemoteBackend) BodyWithTransactions(ctx context.Context, tx kv.Getter, hash common.Hash, blockHeight uint64) (body *types.Body, err error) { - return back.blockReader.BodyWithTransactions(ctx, tx, hash, blockHeight) -} -func (back *RemoteBackend) BodyRlp(ctx context.Context, tx kv.Getter, hash common.Hash, blockHeight uint64) (bodyRlp rlp.RawValue, err error) { - return back.blockReader.BodyRlp(ctx, tx, hash, blockHeight) -} -func (back *RemoteBackend) Body(ctx context.Context, tx kv.Getter, hash common.Hash, blockHeight uint64) (body *types.Body, err error) { - return back.blockReader.Body(ctx, tx, hash, blockHeight) -} -func (back *RemoteBackend) Header(ctx context.Context, tx kv.Getter, hash common.Hash, blockHeight uint64) (*types.Header, error) { - return back.blockReader.Header(ctx, tx, hash, blockHeight) -} -func (back *RemoteBackend) HeaderByNumber(ctx context.Context, tx kv.Getter, blockHeight uint64) (*types.Header, error) { - return back.blockReader.HeaderByNumber(ctx, tx, blockHeight) -} -func (back *RemoteBackend) HeaderByHash(ctx context.Context, tx kv.Getter, hash common.Hash) (*types.Header, error) { - return back.blockReader.HeaderByHash(ctx, tx, hash) -} -func (back *RemoteBackend) CanonicalHash(ctx context.Context, tx kv.Getter, blockHeight uint64) (common.Hash, error) { - return back.blockReader.CanonicalHash(ctx, tx, blockHeight) -} -func (back *RemoteBackend) TxnByIdxInBlock(ctx context.Context, tx kv.Getter, blockNum uint64, i int) (types.Transaction, error) { - return back.blockReader.TxnByIdxInBlock(ctx, tx, blockNum, i) -} - -func (back *RemoteBackend) EngineNewPayloadV1(ctx context.Context, payload *types2.ExecutionPayload) (res *remote.EnginePayloadStatus, err error) { - return back.remoteEthBackend.EngineNewPayloadV1(ctx, payload) -} - -func (back *RemoteBackend) EngineForkchoiceUpdatedV1(ctx context.Context, request *remote.EngineForkChoiceUpdatedRequest) (*remote.EngineForkChoiceUpdatedReply, error) { - return back.remoteEthBackend.EngineForkChoiceUpdatedV1(ctx, request) -} - -func (back *RemoteBackend) EngineGetPayloadV1(ctx context.Context, payloadId uint64) (res *types2.ExecutionPayload, err error) { - return back.remoteEthBackend.EngineGetPayloadV1(ctx, &remote.EngineGetPayloadRequest{ - PayloadId: payloadId, - }) -} - -func (back *RemoteBackend) NodeInfo(ctx context.Context, limit uint32) ([]p2p.NodeInfo, error) { - nodes, err := back.remoteEthBackend.NodeInfo(ctx, &remote.NodesInfoRequest{Limit: limit}) - if err != nil { - return nil, fmt.Errorf("nodes info request error: %w", err) - } - - if nodes == nil || len(nodes.NodesInfo) == 0 { - return nil, errors.New("empty nodesInfo response") - } - - ret := make([]p2p.NodeInfo, 0, len(nodes.NodesInfo)) - for _, node := range nodes.NodesInfo { - var rawProtocols map[string]json.RawMessage - if err = json.Unmarshal(node.Protocols, &rawProtocols); err != nil { - return nil, fmt.Errorf("cannot decode protocols metadata: %w", err) - } - - protocols := make(map[string]interface{}, len(rawProtocols)) - for k, v := range rawProtocols { - protocols[k] = v - } - - ret = append(ret, p2p.NodeInfo{ - Enode: node.Enode, - ID: node.Id, - IP: node.Enode, - ENR: node.Enr, - ListenAddr: node.ListenerAddr, - Name: node.Name, - Ports: struct { - Discovery int `json:"discovery"` - Listener int `json:"listener"` - }{ - Discovery: int(node.Ports.Discovery), - Listener: int(node.Ports.Listener), - }, - Protocols: protocols, - }) - } - - return ret, nil -} - -func (back *RemoteBackend) Peers(ctx context.Context) ([]*p2p.PeerInfo, error) { - rpcPeers, err := back.remoteEthBackend.Peers(ctx, &emptypb.Empty{}) - if err != nil { - return nil, fmt.Errorf("ETHBACKENDClient.Peers() error: %w", err) - } - - peers := make([]*p2p.PeerInfo, 0, len(rpcPeers.Peers)) - - for _, rpcPeer := range rpcPeers.Peers { - peer := p2p.PeerInfo{ - ENR: rpcPeer.Enr, - Enode: rpcPeer.Enode, - ID: rpcPeer.Id, - Name: rpcPeer.Name, - Caps: rpcPeer.Caps, - Network: struct { - LocalAddress string `json:"localAddress"` - RemoteAddress string `json:"remoteAddress"` - Inbound bool `json:"inbound"` - Trusted bool `json:"trusted"` - Static bool `json:"static"` - }{ - LocalAddress: rpcPeer.ConnLocalAddr, - RemoteAddress: rpcPeer.ConnRemoteAddr, - Inbound: rpcPeer.ConnIsInbound, - Trusted: rpcPeer.ConnIsTrusted, - Static: rpcPeer.ConnIsStatic, - }, - Protocols: nil, - } - - peers = append(peers, &peer) - } - - return peers, nil -} diff --git a/cmd/rpcdaemon22/rpcservices/eth_mining.go b/cmd/rpcdaemon22/rpcservices/eth_mining.go deleted file mode 100644 index 889b24d62e3..00000000000 --- a/cmd/rpcdaemon22/rpcservices/eth_mining.go +++ /dev/null @@ -1,43 +0,0 @@ -package rpcservices - -import ( - "context" - "fmt" - - "github.com/ledgerwatch/erigon-lib/gointerfaces" - "github.com/ledgerwatch/erigon-lib/gointerfaces/txpool" - "github.com/ledgerwatch/erigon/ethdb/privateapi" - "github.com/ledgerwatch/log/v3" - "google.golang.org/grpc" - "google.golang.org/protobuf/types/known/emptypb" -) - -type MiningService struct { - txpool.MiningClient - log log.Logger - version gointerfaces.Version -} - -func NewMiningService(client txpool.MiningClient) *MiningService { - return &MiningService{ - MiningClient: client, - version: gointerfaces.VersionFromProto(privateapi.MiningAPIVersion), - log: log.New("remote_service", "mining"), - } -} - -func (s *MiningService) EnsureVersionCompatibility() bool { - versionReply, err := s.Version(context.Background(), &emptypb.Empty{}, grpc.WaitForReady(true)) - if err != nil { - s.log.Error("getting Version", "err", err) - return false - } - if !gointerfaces.EnsureVersion(s.version, versionReply) { - s.log.Error("incompatible interface versions", "client", s.version.String(), - "server", fmt.Sprintf("%d.%d.%d", versionReply.Major, versionReply.Minor, versionReply.Patch)) - return false - } - s.log.Info("interfaces compatible", "client", s.version.String(), - "server", fmt.Sprintf("%d.%d.%d", versionReply.Major, versionReply.Minor, versionReply.Patch)) - return true -} diff --git a/cmd/rpcdaemon22/rpcservices/eth_starknet.go b/cmd/rpcdaemon22/rpcservices/eth_starknet.go deleted file mode 100644 index 6dcc02d448d..00000000000 --- a/cmd/rpcdaemon22/rpcservices/eth_starknet.go +++ /dev/null @@ -1,31 +0,0 @@ -package rpcservices - -import ( - "github.com/ledgerwatch/erigon-lib/gointerfaces" - "github.com/ledgerwatch/erigon-lib/gointerfaces/starknet" - types2 "github.com/ledgerwatch/erigon-lib/gointerfaces/types" - "github.com/ledgerwatch/log/v3" - "google.golang.org/grpc" -) - -// StarknetAPIVersion -var StarknetAPIVersion = &types2.VersionReply{Major: 1, Minor: 0, Patch: 0} - -type StarknetService struct { - starknet.CAIROVMClient - log log.Logger - version gointerfaces.Version -} - -func NewStarknetService(cc grpc.ClientConnInterface) *StarknetService { - return &StarknetService{ - CAIROVMClient: starknet.NewCAIROVMClient(cc), - version: gointerfaces.VersionFromProto(StarknetAPIVersion), - log: log.New("remote_service", "starknet"), - } -} - -func (s *StarknetService) EnsureVersionCompatibility() bool { - //TODO: add version check - return true -} diff --git a/cmd/rpcdaemon22/rpcservices/eth_txpool.go b/cmd/rpcdaemon22/rpcservices/eth_txpool.go deleted file mode 100644 index 670a77b538b..00000000000 --- a/cmd/rpcdaemon22/rpcservices/eth_txpool.go +++ /dev/null @@ -1,50 +0,0 @@ -package rpcservices - -import ( - "context" - "fmt" - "time" - - "github.com/ledgerwatch/erigon-lib/gointerfaces" - "github.com/ledgerwatch/erigon-lib/gointerfaces/grpcutil" - txpooproto "github.com/ledgerwatch/erigon-lib/gointerfaces/txpool" - txpool2 "github.com/ledgerwatch/erigon-lib/txpool" - "github.com/ledgerwatch/log/v3" - "google.golang.org/grpc" - "google.golang.org/protobuf/types/known/emptypb" -) - -type TxPoolService struct { - txpooproto.TxpoolClient - log log.Logger - version gointerfaces.Version -} - -func NewTxPoolService(client txpooproto.TxpoolClient) *TxPoolService { - return &TxPoolService{ - TxpoolClient: client, - version: gointerfaces.VersionFromProto(txpool2.TxPoolAPIVersion), - log: log.New("remote_service", "tx_pool"), - } -} - -func (s *TxPoolService) EnsureVersionCompatibility() bool { -Start: - versionReply, err := s.Version(context.Background(), &emptypb.Empty{}, grpc.WaitForReady(true)) - if err != nil { - if grpcutil.ErrIs(err, txpool2.ErrPoolDisabled) { - time.Sleep(3 * time.Second) - goto Start - } - s.log.Error("ensure version", "err", err) - return false - } - if !gointerfaces.EnsureVersion(s.version, versionReply) { - s.log.Error("incompatible interface versions", "client", s.version.String(), - "server", fmt.Sprintf("%d.%d.%d", versionReply.Major, versionReply.Minor, versionReply.Patch)) - return false - } - s.log.Info("interfaces compatible", "client", s.version.String(), - "server", fmt.Sprintf("%d.%d.%d", versionReply.Major, versionReply.Minor, versionReply.Patch)) - return true -} diff --git a/cmd/rpcdaemon22/test.http b/cmd/rpcdaemon22/test.http deleted file mode 100644 index cf6205cf37c..00000000000 --- a/cmd/rpcdaemon22/test.http +++ /dev/null @@ -1,222 +0,0 @@ - -### - -POST localhost:8545 -Content-Type: application/json - -{ - "jsonrpc": "2.0", - "method": "eth_syncing", - "params": [], - "id": 1 -} - -### - -POST localhost:8545 -Content-Type: application/json - -{ - "jsonrpc": "2.0", - "method": "eth_getBalance", - "params": [ - "0xfffa4763f94f7ad191b366a343092a5d1a47ed08", - "0xde84" - ], - "id": 1 -} - -### - -POST localhost:8545 -Content-Type: application/json - -{ - "jsonrpc": "2.0", - "method": "debug_accountRange", - "params": [ - "0x1e8480", - "", - 256, - false, - false, - false - ], - "id": 1 -} - -### - -# curl -X POST -H "Content-Type: application/json" --data '{"jsonrpc":"2.0","method":"eth_getTransactionByHash", "params": ["0x1302cc71b89c1482b18a97a6fa2c9c375f4bf7548122363b6e91528440272fde"], "id":1}' localhost:8545 -POST localhost:8545 -Content-Type: application/json - -{ - "jsonrpc": "2.0", - "method": "eth_getTransactionByHash", - "params": [ - "0x1302cc71b89c1482b18a97a6fa2c9c375f4bf7548122363b6e91528440272fde" - ], - "id": 1 -} - -### - - - -# curl -X POST -H "Content-Type: application/json" --data '{"jsonrpc":"2.0","method":"eth_getTransactionByHash", "params": ["0x1302cc71b89c1482b18a97a6fa2c9c375f4bf7548122363b6e91528440272fde"], "id":1}' localhost:8545 -POST localhost:8545 -Content-Type: application/json - -{ - "jsonrpc": "2.0", - "method": "eth_getBlockByNumber", - "params": [ - "0x4C4B40", - true - ], - "id": 1 -} - -### - -# curl -X POST -H "Content-Type: application/json" --data '{"jsonrpc":"2.0","method":"eth_getBlockByNumber", "params": ["0x1b4", true], "id":1}' localhost:8545 -POST localhost:8545 -Content-Type: application/json - -{ - "jsonrpc": "2.0", - "method": "eth_newHeader", - "params": [], - "id": 1 -} - -### - -POST localhost:8545 -Content-Type: application/json - -{ - "jsonrpc": "2.0", - "method": "eth_getBlockByNumber", - "params": [ - "0xf4240", - true - ], - "id": 2 -} - -### - -POST localhost:8545 -Content-Type: application/json - -{ - "jsonrpc": "2.0", - "method": "debug_storageRangeAt", - "params": [ - "0x4ced0bc30041f7f4e11ba9f341b54404770c7695dfdba6bb64b6ffeee2074177", - 99, - "0x33990122638b9132ca29c723bdf037f1a891a70c", - "0x0000000000000000000000000000000000000000000000000000000000000000", - 1024 - ], - "id": 537758 -} - -### > 60 - -### >20 -###{"jsonrpc":"2.0","method":"debug_storageRangeAt","params":["0x6e6ec30ba20b263d1bdf6d87a0b1b037ea595929ac10ad74f6b7e1890fdad744", 19,"0x793ae8c1b1a160bfc07bfb0d04f85eab1a71f4f2","0x0000000000000000000000000000000000000000000000000000000000000000",1024],"id":113911} - - -### {"jsonrpc":"2.0","mesthod":"debug_storageRangeAt","params":["0xbcb55dcb321899291d10818dd06eaaf939ff87a717ac40850b54c6b56e8936ff", 2,"0xca7c390f8f843a8c3036841fde755e5d0acb97da","0x0000000000000000000000000000000000000000000000000000000000000000",1024],"id":3836} - -###{"jsonrpc":"2.0","method":"debug_storageRangeAt","params":["0xf212a7655339852bf58f7e1d66f82256d22d13ccba3068a9c47a635738698c84", 0,"0xb278e4cb20dfbf97e78f27001f6b15288302f4d7","0x0000000000000000000000000000000000000000000000000000000000000000",1024],"id":8970} - -### - -POST 192.168.255.138:8545 -Content-Type: application/json - -{ - "jsonrpc": "2.0", - "method": "eth_getTransactionReceipt", - "params": [ - "0xc05ce241bec59900356ede868d170bc01d743c3cd5ecb129ca99596593022771" - ], - "id": 537758 -} - - -### - -#POST 192.168.255.138:8545 -POST localhost:8545 -Content-Type: application/json - -{ - "jsonrpc": "2.0", - "method": "erigon_getLogsByHash", - "params": [ - "0x343f85f13356e138152d77287fda5ae0818c514119119ad439f81d69c59fc2f6" - ], - "id": 537758 -} - - -### - -#POST 192.168.255.138:8545 -POST localhost:8545 -Content-Type: application/json - -{ - "jsonrpc": "2.0", - "method": "eth_getLogs", - "params": [ - { - "address": "0x6090a6e47849629b7245dfa1ca21d94cd15878ef", - "fromBlock": "0x3d0000", - "toBlock": "0x3d2600", - "topics": [ - null, - "0x374f3a049e006f36f6cf91b02a3b0ee16c858af2f75858733eb0e927b5b7126c" - ] - } - ], - "id": 537758 -} - -### - -#POST 192.168.255.138:8545 -POST localhost:8545 -Content-Type: application/json - -{ - "jsonrpc": "2.0", - "method": "eth_getWork", - "params": [], - "id": 537758 -} - - - -### - -POST localhost:8545 -Content-Type: application/json - -{ - "id": 1, - "method": "eth_estimateGas", - "params": [ - { - "to": "0x5fda30bb72b8dfe20e48a00dfc108d0915be9bb0", - "value": "0x1234" - }, - "latest" - ] -} - diff --git a/cmd/rpcdaemon22/testdata/.gitignore b/cmd/rpcdaemon22/testdata/.gitignore deleted file mode 100644 index 6ad27168f7c..00000000000 --- a/cmd/rpcdaemon22/testdata/.gitignore +++ /dev/null @@ -1,5 +0,0 @@ -geth -parity -nethermind -turbogeth -erigon \ No newline at end of file diff --git a/cmd/rpcdaemon22/testdata/sed_file b/cmd/rpcdaemon22/testdata/sed_file deleted file mode 100644 index 777338ab63f..00000000000 --- a/cmd/rpcdaemon22/testdata/sed_file +++ /dev/null @@ -1,22 +0,0 @@ -s/,\"id\":\"1\"//g -s/\"result\":null,/\"result\":\{\},/g -s/suicide/selfdestruct/g -s/\"gasUsed\":\"0x0\",//g -s/,\"value\":\"0x0\"//g - -s/invalid argument 1: json: cannot unmarshal hex string \\\"0x\\\" into Go value of type hexutil.Uint64/Invalid params: Invalid index: cannot parse integer from empty string./ -s/invalid argument 1: json: cannot unmarshal number into Go value of type \[\]hexutil.Uint64/Invalid params: invalid type: integer `0`, expected a sequence./ -s/missing value for required argument 1/Invalid params: invalid length 1, expected a tuple of size 2./ -s/Invalid params: invalid type: string \\\"0x0\\\", expected a sequence./invalid argument 1: json: cannot unmarshal string into Go value of type \[\]hexutil.Uint64/ -s/Invalid params\: Invalid block number\: number too large to fit in target type./invalid argument 0: hex number > 64 bits/ -s/the method trace_junk12 does not exist\/is not available/Method not found/ - -s/,\"traceAddress\":null/,\"traceAddress\":[]/g -s/\"0x0000000000000000000000000000000000000000000000000000000000000000\"/\"0x\"/g -s/\"transactionHash\":\"0x\",\"transactionPosition\":0/\"transactionHash\":null,\"transactionPosition\":null/g -s/\"result\":null/\"result\":[]/g - -s/\"error\":{\"code\":-32000,\"message\":\"function trace_replayBlockTransactions not implemented\"}/\"result\":\[\]/ -s/\"error\":{\"code\":-32000,\"message\":\"function trace_replayTransaction not implemented\"}/\"result\":{\"output\":\"0x\",\"stateDiff\":null,\"trace\":\[\],\"vmTrace\":null}/ -s/\"error\":{\"code\":-32602,\"message\":\"invalid argument 0: json: cannot unmarshal array into Go value of type commands.CallParam\"}/\"result\":\[{\"output\":\"0x\",\"stateDiff\":null,\"trace\":\[\],\"vmTrace\":null},{\"output\":\"0x\",\"stateDiff\":null,\"trace\":\[\],\"vmTrace\":null}]/ -s/\"error\":{\"code\":-32602,\"message\":\"invalid argument 0: hex string has length 82, want 64 for common.Hash\"}/\"error\":{\"code\":-32602,\"data\":\"RlpIncorrectListLen\",\"message\":\"Couldn't parse parameters: Transaction is not valid RLP\"}/ diff --git a/cmd/rpcdaemon22/testdata/trace_tests b/cmd/rpcdaemon22/testdata/trace_tests deleted file mode 100644 index 0e89a8da6aa..00000000000 --- a/cmd/rpcdaemon22/testdata/trace_tests +++ /dev/null @@ -1,76 +0,0 @@ -005 trace_get fail ["0x17104ac9d3312d8c136b7f44d4b8b47852618065ebfa534bd2d3b5ef218ca1f3",0] -010 trace_get fail ["0x17104ac9d3312d8c136b7f44d4b8b47852618065ebfa534bd2d3b5ef218ca1f3","0x0"] -015 trace_get zero ["0x17104ac9d3312d8c136b7f44d4b8b47852618065ebfa534bd2d3b5ef218ca1f3",["0x0"]] -020 trace_get one ["0x17104ac9d3312d8c136b7f44d4b8b47852618065ebfa534bd2d3b5ef218ca1f3",["0x1"]] -025 trace_get both ["0x17104ac9d3312d8c136b7f44d4b8b47852618065ebfa534bd2d3b5ef218ca1f3",["0x0","0x1"]] -030 trace_get fail ["0x17104ac9d3312d8c136b7f44d4b8b47852618065ebfa534bd2d3b5ef218ca1f3"] -035 trace_get two ["0x5c504ed432cb51138bcf09aa5e8a410dd4a1e204ef84bfed1be16dfba1b22060",["0x2"]] -040 trace_get fail ["0x975994512b958b31608f5692a6dbacba359349533dfb4ba0facfb7291fbec48d",["0x"]] - -050 trace_transaction one ["0x17104ac9d3312d8c136b7f44d4b8b47852618065ebfa534bd2d3b5ef218ca1f3"] -055 trace_transaction two ["0x5c504ed432cb51138bcf09aa5e8a410dd4a1e204ef84bfed1be16dfba1b22060"] -060 trace_transaction three ["0x6afbe0f0ea3613edd6b84b71260836c03bddce81604f05c81a070cd671d3d765"] -065 trace_transaction four ["0x80926bb17ecdd526a2d901835482615eec87c4ca7fc30b96d8c6d6ab17bc721e"] -070 trace_transaction five ["0xb8ae0ab093fe1882249187b8f40dbe6e9285b419d096bd8028172d55b47ff3ce"] -075 trace_transaction six ["0xc2b831c051582f13dfaff6df648972e7e94aeeed1e85d23bd968a55b59f3cb5b"] -080 trace_transaction seven ["0xf9d426284bd20415a53991a004122b3a3a619b295ea98d1d88a5fd3a4125408b"] -085 trace_transaction cr_de ["0x343ba476313771d4431018d7d2e935eba2bfe26d5be3e6cb84af6817fd0e4309"] - -105 trace_block 0x23 ["0x2328"] -110 trace_block 0x10 ["0x100"] -115 trace_block 0x12 ["0x12"] -120 trace_block 0x12 ["0x121212"] -125 trace_block 0x2e ["0x2ed119"] -130 trace_block 0xa1 ["0xa18dcfbc639be11c353420ede9224d772c56eb9ff327eb73771f798cf42d0027"] -#135 trace_block 0xa6 ["0xa60f34"] -#140 trace_block 0xf4 ["0xf4629"] -#145 trace_block slow ["0x895441"] - -150 trace_filter good_1 [{"fromBlock":"0x2328","toBlock":"0x2328"}] -155 trace_filter range_1 [{"fromBlock":"0x2dcaa9","toBlock":"0x2dcaaa"}] -160 trace_filter block_3 [{"fromBlock":"0x3","toBlock":"0x3"}] -165 trace_filter first_tx [{"fromBlock":"0xb443","toBlock":"0xb443"}] -170 trace_filter from_doc [{"fromBlock":"0x2ed0c4","toBlock":"0x2ed128","toAddress":["0x8bbb73bcb5d553b5a556358d27625323fd781d37"],"after":1000,"count":100}] -175 trace_filter rem_a_o [{"fromBlock":"0x2ed0c4","toBlock":"0x2ed128","toAddress":["0x8bbb73bcb5d553b5a556358d27625323fd781d37"]}] -180 trace_filter count_1 [{"fromBlock":"0x2ed0c4","toBlock":"0x2ed128","toAddress":["0x8bbb73bcb5d553b5a556358d27625323fd781d37"],"count":1}] -185 trace_filter after_1 [{"fromBlock":"0x2ed0c4","toBlock":"0x2ed128","toAddress":["0x8bbb73bcb5d553b5a556358d27625323fd781d37"],"after":1,"count":4}] -190 trace_filter to_0xc02 [{"fromBlock":"0xa344e0","toBlock":"0xa344e0","toAddress":["0xc02aaa39b223fe8d0a0e5c4f27ead9083c756cc2"]}] -195 trace_filter fr_0xc3c [{"fromBlock":"0xa344e0","toBlock":"0xa344e0","fromAddress":["0xc3ca90684fd7b8c7e4be88c329269fc32111c4bd"]}] -200 trace_filter both [{"fromBlock":"0xa344e0","toBlock":"0xa344e0","fromAddress":["0xc3ca90684fd7b8c7e4be88c329269fc32111c4bd"],"toAddress":["0xc02aaa39b223fe8d0a0e5c4f27ead9083c756cc2"]}] -205 trace_filter fail_2 [{"fromBlock":"0xa606ba","toBlock":"0x2dcaa9"}] -210 trace_filter bad_1 [{"fromBlock":"0x2328","toBlock":"0x2327"}] -#215 trace_filter slow_2 [{"fromBlock":"0xa606ba","toBlock":"0xa606ba"}] -#220 trace_filter 10700000 [{"fromBlock":"0xa344e0","toBlock":"0xa344e0"}] - -250 trace_replayBlockTransactions fail ["0x3", ["stateDiff"]] -300 trace_replayTransaction fail ["0x02d4a872e096445e80d05276ee756cefef7f3b376bcec14246469c0cd97dad8f", ["fail"]] -320_erigon trace_call fail [{"input":"0x0","nonce":"0x0","from":"0x02fcf30912b6fe2b6452ee19721c6068fe4c7b61","gas":"0xf4240","to":"0x37a9679c41e99db270bda88de8ff50c0cd23f326","gasPrice":"0x4a817c800","value":"0x0"},["fail"],"latest"] -340 trace_callMany fail [[[{"from":"0x407d73d8a49eeb85d32cf465507dd71d507100c1","to":"0xa94f5374fce5edbc8e2a8697c15331677e6ebf0b","value":"0x186a0"},["fail"]],[{"from":"0x407d73d8a49eeb85d32cf465507dd71d507100c1","to":"0xa94f5374fce5edbc8e2a8697c15331677e6ebf0b","value":"0x186a0"},["fail"]]],"latest"] -360 trace_rawTransaction fail ["0xd46e8dd67c5d32be8d46e8dd67c5d32be8058bb8eb970870f072445675058bb8eb970870f072445675",["fail"]] -#255 trace_replayBlockTransactions ["0x1",["trace"]] -#250 trace_replayBlockTransactions ["0x1"] -#265 trace_replayBlockTransactions ["0x100"] -#260 trace_replayBlockTransactions ["0x895441",["trace"]] -#275 trace_replayBlockTransactions ["0x895441",["vmTrace"]] -#270 trace_replayBlockTransactions ["0xCF9BF",["trace"]] -#285 trace_replayBlockTransactions ["0xDBBA1",["trace"]] -#280 trace_replayBlockTransactions ["0xDBBA1",["vmTrace"]] -#285 trace_replayBlockTransactions ["CF9BF",["trace"]] -#290 trace_replayTransactions ["CF9BF",["trace"]] -#295trace_replayTransactions ["CF9BF",["trace"]] - -305 trace_junk12 no_rpc [] - -# custom, experimental stuff -405_erigon trace_blockReward rew_0 ["0x0"] -410_erigon trace_blockReward rew_1 ["0x1"] -415_erigon trace_blockReward rew_2 ["0x2"] -420_erigon trace_blockReward rew_3 ["0x3"] -425_erigon trace_uncleReward unc_0 ["0x0"] -430_erigon trace_uncleReward unc_1 ["0x1"] -435_erigon trace_uncleReward unc_2 ["0x2"] -440_erigon trace_uncleReward unc_3 ["0x3"] -445_erigon trace_issuance iss_0 ["0x0"] -450_erigon trace_issuance iss_1 ["0x1"] -455_erigon trace_issuance iss_2 ["0x2"] -460_erigon trace_issuance iss_3 ["0x3"] diff --git a/cmd/rpctest/rpctest/bench1.go b/cmd/rpctest/rpctest/bench1.go index 065c8fe6247..51e1685d147 100644 --- a/cmd/rpctest/rpctest/bench1.go +++ b/cmd/rpctest/rpctest/bench1.go @@ -19,7 +19,9 @@ var routes map[string]string // but also can be used for comparing RPCDaemon with Geth // parameters: // needCompare - if false - doesn't call Erigon and doesn't compare responses -// use false value - to generate vegeta files, it's faster but we can generate vegeta files for Geth and Erigon +// +// use false value - to generate vegeta files, it's faster but we can generate vegeta files for Geth and Erigon +// // fullTest - if false - then call only methods which RPCDaemon currently supports func Bench1(erigonURL, gethURL string, needCompare bool, fullTest bool, blockFrom uint64, blockTo uint64, recordFile string) { setRoutes(erigonURL, gethURL) diff --git a/cmd/rpctest/rpctest/bench3.go b/cmd/rpctest/rpctest/bench3.go index 0e650da130c..1f48921fa43 100644 --- a/cmd/rpctest/rpctest/bench3.go +++ b/cmd/rpctest/rpctest/bench3.go @@ -3,10 +3,11 @@ package rpctest import ( "encoding/base64" "fmt" - "github.com/ledgerwatch/erigon/common" - "github.com/ledgerwatch/erigon/core/state" "net/http" "time" + + "github.com/ledgerwatch/erigon/common" + "github.com/ledgerwatch/erigon/core/state" ) func Bench3(erigon_url, geth_url string) { diff --git a/cmd/rpctest/rpctest/bench4.go b/cmd/rpctest/rpctest/bench4.go index 0d0f0b55255..d8e0194aa37 100644 --- a/cmd/rpctest/rpctest/bench4.go +++ b/cmd/rpctest/rpctest/bench4.go @@ -2,9 +2,10 @@ package rpctest import ( "fmt" - "github.com/ledgerwatch/erigon/common" "net/http" "time" + + "github.com/ledgerwatch/erigon/common" ) func Bench4(erigon_url string) { diff --git a/cmd/rpctest/rpctest/bench6.go b/cmd/rpctest/rpctest/bench6.go index fabe2ed706f..08d66a7baca 100644 --- a/cmd/rpctest/rpctest/bench6.go +++ b/cmd/rpctest/rpctest/bench6.go @@ -2,9 +2,10 @@ package rpctest import ( "fmt" - "github.com/ledgerwatch/erigon/common" "net/http" "time" + + "github.com/ledgerwatch/erigon/common" ) func Bench6(erigon_url string) { diff --git a/cmd/rpctest/rpctest/bench_ethcall.go b/cmd/rpctest/rpctest/bench_ethcall.go index ddf1ba53322..6d2e0354515 100644 --- a/cmd/rpctest/rpctest/bench_ethcall.go +++ b/cmd/rpctest/rpctest/bench_ethcall.go @@ -12,9 +12,10 @@ import ( // but also can be used for comparing RPCDaemon with Geth or infura // parameters: // needCompare - if false - doesn't call Erigon and doesn't compare responses -// false value - to generate vegeta files, it's faster but we can generate vegeta files for Geth and Erigon -// recordFile stores all eth_call returned with success -// errorFile stores information when erigon and geth doesn't return same data +// +// false value - to generate vegeta files, it's faster but we can generate vegeta files for Geth and Erigon +// recordFile stores all eth_call returned with success +// errorFile stores information when erigon and geth doesn't return same data func BenchEthCall(erigonURL, gethURL string, needCompare, latest bool, blockFrom, blockTo uint64, recordFile string, errorFile string) { setRoutes(erigonURL, gethURL) var client = &http.Client{ diff --git a/cmd/rpctest/rpctest/bench_ethgetlogs.go b/cmd/rpctest/rpctest/bench_ethgetlogs.go index 0d02f1ade46..61c921bf36f 100644 --- a/cmd/rpctest/rpctest/bench_ethgetlogs.go +++ b/cmd/rpctest/rpctest/bench_ethgetlogs.go @@ -13,9 +13,10 @@ import ( // but also can be used for comparing RPCDaemon with Geth or infura // parameters: // needCompare - if false - doesn't call Erigon and doesn't compare responses -// false value - to generate vegeta files, it's faster but we can generate vegeta files for Geth and Erigon -// recordFile stores all eth_getlogs returned with success -// errorFile stores information when erigon and geth doesn't return same data +// +// false value - to generate vegeta files, it's faster but we can generate vegeta files for Geth and Erigon +// recordFile stores all eth_getlogs returned with success +// errorFile stores information when erigon and geth doesn't return same data func BenchEthGetLogs(erigonURL, gethURL string, needCompare bool, blockFrom uint64, blockTo uint64, recordFile string, errorFile string) { setRoutes(erigonURL, gethURL) var client = &http.Client{ diff --git a/cmd/rpctest/rpctest/request_generator.go b/cmd/rpctest/rpctest/request_generator.go index 484b84bd995..41419e52611 100644 --- a/cmd/rpctest/rpctest/request_generator.go +++ b/cmd/rpctest/rpctest/request_generator.go @@ -177,34 +177,6 @@ func (g *RequestGenerator) traceBlock(bn uint64) string { return sb.String() } -func (g *RequestGenerator) traceFilterCount(prevBn uint64, bn uint64, count uint64) string { - var sb strings.Builder - fmt.Fprintf(&sb, `{ "jsonrpc": "2.0", "method": "trace_filter", "params": [{"fromBlock":"0x%x", "toBlock": "0x%x", "count": %d}]`, prevBn, bn, count) - fmt.Fprintf(&sb, `, "id":%d}`, g.reqID) - return sb.String() -} - -func (g *RequestGenerator) traceFilterAfter(prevBn uint64, bn uint64, after uint64) string { - var sb strings.Builder - fmt.Fprintf(&sb, `{ "jsonrpc": "2.0", "method": "trace_filter", "params": [{"fromBlock":"0x%x", "toBlock": "0x%x", "after": %d}]`, prevBn, bn, after) - fmt.Fprintf(&sb, `, "id":%d}`, g.reqID) - return sb.String() -} - -func (g *RequestGenerator) traceFilterCountAfter(prevBn uint64, bn uint64, after, count uint64) string { - var sb strings.Builder - fmt.Fprintf(&sb, `{ "jsonrpc": "2.0", "method": "trace_filter", "params": [{"fromBlock":"0x%x", "toBlock": "0x%x", "count": %d, "after": %d}]`, prevBn, bn, count, after) - fmt.Fprintf(&sb, `, "id":%d}`, g.reqID) - return sb.String() -} - -func (g *RequestGenerator) traceFilterUnion(prevBn uint64, bn uint64, from, to common.Address) string { - var sb strings.Builder - fmt.Fprintf(&sb, `{ "jsonrpc": "2.0", "method": "trace_filter", "params": [{"fromBlock":"0x%x", "toBlock": "0x%x", "fromAddress": ["0x%x"], "toAddress": ["0x%x"]}]`, prevBn, bn, from, to) - fmt.Fprintf(&sb, `, "id":%d}`, g.reqID) - return sb.String() -} - func (g *RequestGenerator) traceFilterFrom(prevBn uint64, bn uint64, account common.Address) string { var sb strings.Builder fmt.Fprintf(&sb, `{ "jsonrpc": "2.0", "method": "trace_filter", "params": [{"fromBlock":"0x%x", "toBlock": "0x%x", "fromAddress": ["0x%x"]}]`, prevBn, bn, account) diff --git a/cmd/rpctest/rpctest/utils.go b/cmd/rpctest/rpctest/utils.go index 629332ee49a..76e85652ad4 100644 --- a/cmd/rpctest/rpctest/utils.go +++ b/cmd/rpctest/rpctest/utils.go @@ -40,7 +40,7 @@ func compareBlocks(b, bg *EthBlockByNumber) bool { return false } if (tx.To == nil && txg.To != nil) || (tx.To != nil && txg.To == nil) { - fmt.Printf("Tx %d different To nilness: %t %t\n", i, (tx.To == nil), (txg.To == nil)) + fmt.Printf("Tx %d different To nilness: %t %t\n", i, tx.To == nil, txg.To == nil) return false } if tx.To != nil && txg.To != nil && *tx.To != *txg.To { @@ -89,8 +89,8 @@ func compareTraces(trace, traceg *EthTxTrace) bool { } func compareJsonValues(prefix string, v, vg *fastjson.Value) error { - var vType fastjson.Type = fastjson.TypeNull - var vgType fastjson.Type = fastjson.TypeNull + var vType = fastjson.TypeNull + var vgType = fastjson.TypeNull if v != nil { vType = v.Type() } @@ -205,8 +205,8 @@ func compareErrors(errVal *fastjson.Value, errValg *fastjson.Value, methodName s return fmt.Errorf("different result (Erigon) returns OK, while G/OE returns error code=%d message=%s", errValg.GetInt("code"), errValg.GetStringBytes("message")) } } else { - s1 := strings.ToUpper(string((errVal.GetStringBytes("message")))) - s2 := strings.ToUpper(string((errValg.GetStringBytes("message")))) + s1 := strings.ToUpper(string(errVal.GetStringBytes("message"))) + s2 := strings.ToUpper(string(errValg.GetStringBytes("message"))) if strings.Compare(s1, s2) != 0 { if errs != nil { fmt.Printf("different error-message for method %s, errCtx: %s\n", methodName, errCtx) diff --git a/cmd/sentry/README.md b/cmd/sentry/README.md index 1ef3bf3a65d..078822e6ff4 100644 --- a/cmd/sentry/README.md +++ b/cmd/sentry/README.md @@ -25,10 +25,12 @@ p2p sentry running on the same computer listening to the port `9091`. In order t computer, or a different port (or both), the option `--sentry.api.addr` can be used. For example: ``` -./buid/bin/sentry --datadir= --sentry.api.addr=localhost:9999 +./buid/bin/sentry --datadir= --sentry.api.addr=localhost:9091 +./buid/bin/sentry --datadir= --sentry.api.addr=localhost:9191 +./build/bin/erigon --sentry.api.addr="localhost:9091,localhost:9191" ``` -The command above will expect the p2p sentry running on the same computer, but on the port `9999` +The command above will expect the p2p sentry running on the same computer, but on the port `9091` Options `--nat`, `--port`, `--staticpeers`, `--netrestrict`, `--discovery` are also available. diff --git a/cmd/sentry/sentry/sentry_api.go b/cmd/sentry/sentry/sentry_api.go index b76ca04b045..f62a1464cff 100644 --- a/cmd/sentry/sentry/sentry_api.go +++ b/cmd/sentry/sentry/sentry_api.go @@ -49,7 +49,7 @@ func (cs *MultiClient) SendBodyRequest(ctx context.Context, req *bodydownload.Bo var bytes []byte var err error bytes, err = rlp.EncodeToBytes(ð.GetBlockBodiesPacket66{ - RequestId: rand.Uint64(), + RequestId: rand.Uint64(), // nolint: gosec GetBlockBodiesPacket: req.Hashes, }) if err != nil { @@ -88,7 +88,7 @@ func (cs *MultiClient) SendHeaderRequest(ctx context.Context, req *headerdownloa case eth.ETH66, eth.ETH67: //log.Info(fmt.Sprintf("Sending header request {hash: %x, height: %d, length: %d}", req.Hash, req.Number, req.Length)) reqData := ð.GetBlockHeadersPacket66{ - RequestId: rand.Uint64(), + RequestId: rand.Uint64(), // nolint: gosec GetBlockHeadersPacket: ð.GetBlockHeadersPacket{ Amount: req.Length, Reverse: req.Reverse, @@ -130,7 +130,7 @@ func (cs *MultiClient) SendHeaderRequest(ctx context.Context, req *headerdownloa func (cs *MultiClient) randSentryIndex() (int, bool, func() (int, bool)) { var i int if len(cs.sentries) > 1 { - i = rand.Intn(len(cs.sentries) - 1) + i = rand.Intn(len(cs.sentries) - 1) // nolint: gosec } to := i return i, true, func() (int, bool) { diff --git a/cmd/sentry/sentry/sentry_grpc_server.go b/cmd/sentry/sentry/sentry_grpc_server.go index 6fe3fc4c0f7..4ed8c085837 100644 --- a/cmd/sentry/sentry/sentry_grpc_server.go +++ b/cmd/sentry/sentry/sentry_grpc_server.go @@ -3,6 +3,7 @@ package sentry import ( "bytes" "context" + "encoding/hex" "encoding/json" "errors" "fmt" @@ -70,8 +71,13 @@ func NewPeerInfo(peer *p2p.Peer, rw p2p.MsgReadWriter) *PeerInfo { ctx, cancel := context.WithCancel(context.Background()) p := &PeerInfo{peer: peer, rw: rw, removed: make(chan struct{}), tasks: make(chan func(), 16), ctx: ctx, ctxCancel: cancel} + + p.lock.RLock() + t := p.tasks + p.lock.RUnlock() + go func() { // each peer has own worker, then slow - for f := range p.tasks { + for f := range t { f() } }() @@ -506,11 +512,12 @@ func NewGrpcServer(ctx context.Context, dialCandidates enode.Iterator, readNodeI DialCandidates: dialCandidates, Run: func(peer *p2p.Peer, rw p2p.MsgReadWriter) error { peerID := peer.Pubkey() + printablePeerID := hex.EncodeToString(peerID[:])[:20] if ss.getPeer(peerID) != nil { - log.Trace(fmt.Sprintf("[%s] Peer already has connection", peerID)) + log.Trace(fmt.Sprintf("[%s] Peer already has connection", printablePeerID)) return nil } - log.Trace(fmt.Sprintf("[%s] Start with peer", peerID)) + log.Trace(fmt.Sprintf("[%s] Start with peer", printablePeerID)) peerInfo := NewPeerInfo(peer, rw) defer peerInfo.Close() @@ -522,9 +529,9 @@ func NewGrpcServer(ctx context.Context, dialCandidates enode.Iterator, readNodeI return ss.startSync(ctx, bestHash, peerID) }) if err != nil { - return fmt.Errorf("handshake to peer %s: %w", peerID, err) + return fmt.Errorf("handshake to peer %s: %w", printablePeerID, err) } - log.Trace(fmt.Sprintf("[%s] Received status message OK", peerID), "name", peer.Name()) + log.Trace(fmt.Sprintf("[%s] Received status message OK", printablePeerID), "name", peer.Name()) err = runPeer( ctx, @@ -535,7 +542,7 @@ func NewGrpcServer(ctx context.Context, dialCandidates enode.Iterator, readNodeI ss.send, ss.hasSubscribers, ) // runPeer never returns a nil error - log.Trace(fmt.Sprintf("[%s] Error while running peer: %v", peerID, err)) + log.Trace(fmt.Sprintf("[%s] Error while running peer: %v", printablePeerID, err)) ss.sendGonePeerToClients(gointerfaces.ConvertHashToH512(peerID)) return nil }, @@ -637,7 +644,7 @@ func (ss *GrpcServer) startSync(ctx context.Context, bestHash common.Hash, peerI switch ss.Protocol.Version { case eth.ETH66, eth.ETH67: b, err := rlp.EncodeToBytes(ð.GetBlockHeadersPacket66{ - RequestId: rand.Uint64(), + RequestId: rand.Uint64(), // nolint: gosec GetBlockHeadersPacket: ð.GetBlockHeadersPacket{ Amount: 1, Reverse: false, diff --git a/cmd/sentry/sentry/sentry_multi_client.go b/cmd/sentry/sentry/sentry_multi_client.go index 67e640cf7b9..f31d4c19ade 100644 --- a/cmd/sentry/sentry/sentry_multi_client.go +++ b/cmd/sentry/sentry/sentry_multi_client.go @@ -20,9 +20,11 @@ import ( proto_sentry "github.com/ledgerwatch/erigon-lib/gointerfaces/sentry" proto_types "github.com/ledgerwatch/erigon-lib/gointerfaces/types" "github.com/ledgerwatch/erigon-lib/kv" + "github.com/ledgerwatch/erigon/cmd/hack/tool/fromdb" "github.com/ledgerwatch/log/v3" "google.golang.org/grpc" "google.golang.org/grpc/backoff" + "google.golang.org/grpc/credentials/insecure" "google.golang.org/grpc/keepalive" "google.golang.org/protobuf/types/known/emptypb" @@ -34,6 +36,7 @@ import ( "github.com/ledgerwatch/erigon/eth/protocols/eth" "github.com/ledgerwatch/erigon/params" "github.com/ledgerwatch/erigon/rlp" + "github.com/ledgerwatch/erigon/turbo/engineapi" "github.com/ledgerwatch/erigon/turbo/services" "github.com/ledgerwatch/erigon/turbo/stages/bodydownload" "github.com/ledgerwatch/erigon/turbo/stages/headerdownload" @@ -209,7 +212,7 @@ func pumpStreamLoop[TMessage interface{}]( go func() { for req := range reqs { if err := handleInboundMessage(ctx, req, sentry); err != nil { - log.Warn("Handling incoming message", "stream", streamName, "err", err) + log.Debug("Handling incoming message", "stream", streamName, "err", err) } if wg != nil { wg.Done() @@ -241,22 +244,26 @@ func pumpStreamLoop[TMessage interface{}]( // MultiClient - does handle request/response/subscriptions to multiple sentries // each sentry may support same or different p2p protocol type MultiClient struct { - lock sync.RWMutex - Hd *headerdownload.HeaderDownload - Bd *bodydownload.BodyDownload - nodeName string - sentries []direct.SentryClient - headHeight uint64 - headHash common.Hash - headTd *uint256.Int - ChainConfig *params.ChainConfig - forks []uint64 - genesisHash common.Hash - networkId uint64 - db kv.RwDB - Engine consensus.Engine - blockReader services.HeaderAndCanonicalReader - logPeerInfo bool + lock sync.RWMutex + Hd *headerdownload.HeaderDownload + Bd *bodydownload.BodyDownload + IsMock bool + forkValidator *engineapi.ForkValidator + nodeName string + sentries []direct.SentryClient + headHeight uint64 + headHash common.Hash + headTd *uint256.Int + ChainConfig *params.ChainConfig + forks []uint64 + genesisHash common.Hash + networkId uint64 + db kv.RwDB + Engine consensus.Engine + blockReader services.HeaderAndCanonicalReader + logPeerInfo bool + + historyV3 bool } func NewMultiClient( @@ -270,13 +277,19 @@ func NewMultiClient( syncCfg ethconfig.Sync, blockReader services.HeaderAndCanonicalReader, logPeerInfo bool, + forkValidator *engineapi.ForkValidator, ) (*MultiClient, error) { + historyV3 := fromdb.HistoryV3(db) + hd := headerdownload.NewHeaderDownload( 512, /* anchorLimit */ 1024*1024, /* linkLimit */ engine, blockReader, ) + if chainConfig.TerminalTotalDifficultyPassed { + hd.SetPOSSync(true) + } if err := hd.RecoverFromDb(db); err != nil { return nil, fmt.Errorf("recovery from DB failed: %w", err) @@ -284,14 +297,16 @@ func NewMultiClient( bd := bodydownload.NewBodyDownload(syncCfg.BlockDownloaderWindow /* outstandingLimit */, engine) cs := &MultiClient{ - nodeName: nodeName, - Hd: hd, - Bd: bd, - sentries: sentries, - db: db, - Engine: engine, - blockReader: blockReader, - logPeerInfo: logPeerInfo, + nodeName: nodeName, + Hd: hd, + Bd: bd, + sentries: sentries, + db: db, + Engine: engine, + blockReader: blockReader, + logPeerInfo: logPeerInfo, + forkValidator: forkValidator, + historyV3: historyV3, } cs.ChainConfig = chainConfig cs.forks = forkid.GatherForks(cs.ChainConfig) @@ -323,7 +338,7 @@ func (cs *MultiClient) newBlockHashes66(ctx context.Context, req *proto_sentry.I } //log.Info(fmt.Sprintf("Sending header request {hash: %x, height: %d, length: %d}", announce.Hash, announce.Number, 1)) b, err := rlp.EncodeToBytes(ð.GetBlockHeadersPacket66{ - RequestId: rand.Uint64(), + RequestId: rand.Uint64(), // nolint: gosec GetBlockHeadersPacket: ð.GetBlockHeadersPacket{ Amount: 1, Reverse: false, @@ -400,10 +415,10 @@ func (cs *MultiClient) blockHeaders(ctx context.Context, pkt eth.BlockHeadersPac if cs.Hd.POSSync() { sort.Sort(headerdownload.HeadersReverseSort(csHeaders)) // Sorting by reverse order of block heights tx, err := cs.db.BeginRo(ctx) - defer tx.Rollback() if err != nil { return err } + defer tx.Rollback() penalties, err := cs.Hd.ProcessHeadersPOS(csHeaders, tx, ConvertH512ToPeerID(peerID)) if err != nil { return err @@ -466,6 +481,24 @@ func (cs *MultiClient) newBlock66(ctx context.Context, inreq *proto_sentry.Inbou if segments, penalty, err := cs.Hd.SingleHeaderAsSegment(headerRaw, request.Block.Header(), true /* penalizePoSBlocks */); err == nil { if penalty == headerdownload.NoPenalty { + propagate := !cs.ChainConfig.TerminalTotalDifficultyPassed + // Do not propagate blocks who are post TTD + firstPosSeen := cs.Hd.FirstPoSHeight() + if firstPosSeen != nil && propagate { + propagate = *firstPosSeen >= segments[0].Number + } + if !cs.IsMock && propagate { + if cs.forkValidator != nil { + cs.forkValidator.TryAddingPoWBlock(request.Block) + } + cs.PropagateNewBlockHashes(ctx, []headerdownload.Announce{ + { + Number: segments[0].Number, + Hash: segments[0].Hash, + }, + }) + } + cs.Hd.ProcessHeaders(segments, true /* newBlock */, ConvertH512ToPeerID(inreq.PeerId)) // There is only one segment in this case } else { outreq := proto_sentry.PenalizePeerRequest{ @@ -499,7 +532,7 @@ func (cs *MultiClient) newBlock66(ctx context.Context, inreq *proto_sentry.Inbou func (cs *MultiClient) blockBodies66(inreq *proto_sentry.InboundMessage, _ direct.SentryClient) error { var request eth.BlockRawBodiesPacket66 if err := rlp.DecodeBytes(inreq.Data, &request); err != nil { - return fmt.Errorf("decode BlockBodiesPacket66: %w, data: %x", err, inreq.Data) + return fmt.Errorf("decode BlockBodiesPacket66: %w", err) } txs, uncles := request.BlockRawBodiesPacket.Unpack() cs.Bd.DeliverBodies(&txs, &uncles, uint64(len(inreq.Data)), ConvertH512ToPeerID(inreq.PeerId)) @@ -589,6 +622,10 @@ func (cs *MultiClient) getBlockBodies66(ctx context.Context, inreq *proto_sentry } func (cs *MultiClient) getReceipts66(ctx context.Context, inreq *proto_sentry.InboundMessage, sentry direct.SentryClient) error { + if cs.historyV3 { // historyV3 doesn't store receipts in DB + return nil + } + var query eth.GetReceiptsPacket66 if err := rlp.DecodeBytes(inreq.Data, &query); err != nil { return fmt.Errorf("decoding getReceipts66: %w, data: %x", err, inreq.Data) @@ -737,7 +774,7 @@ func GrpcClient(ctx context.Context, sentryAddr string) (*direct.SentryClientRem grpc.WithKeepaliveParams(keepalive.ClientParameters{}), } - dialOpts = append(dialOpts, grpc.WithInsecure()) + dialOpts = append(dialOpts, grpc.WithTransportCredentials(insecure.NewCredentials())) conn, err := grpc.DialContext(ctx, sentryAddr, dialOpts...) if err != nil { return nil, fmt.Errorf("creating client connection to sentry P2P: %w", err) diff --git a/cmd/starknet/README.md b/cmd/starknet/README.md deleted file mode 100644 index c203f7cf519..00000000000 --- a/cmd/starknet/README.md +++ /dev/null @@ -1,26 +0,0 @@ -# How to deploy cairo smart contract - -1. Compile cairo smart contract - - `starknet-compile contract.cairo --output contract_compiled.json --abi contract_abi.json` - - -2. Generate payload for `starknet_sendRawTransaction` PRC method - - ``` - go run ./cmd/starknet/main.go generateRawTx - -c ./cairo/contract.json - -o /cairo/send_raw_transaction - -s salt_test - -g 11452296 - -k b9a8b19ff082a7f4b943fcbe0da6cce6ce2c860090f05d031f463412ab534e95 - ``` - - Command syntax: `go run main.go generateRawTx --help` - - -3. Use command output in RPC call - -```json -"params":["0x03f86583127ed80180800180019637623232363136323639323233613230356235643764c080a0b44c2f4e18ca27e621171da5cf3a0c875c0749c7b998ec2759974280d987143aa04f01823122d972baa1a03b113535d9f9057fd9366fd8770e766b91f835b88ea6"], -``` diff --git a/cmd/starknet/cmd/generate_raw_tx.go b/cmd/starknet/cmd/generate_raw_tx.go deleted file mode 100644 index c74d384e9f6..00000000000 --- a/cmd/starknet/cmd/generate_raw_tx.go +++ /dev/null @@ -1,125 +0,0 @@ -package cmd - -import ( - "bytes" - "fmt" - "os" - "path/filepath" - "strings" - - "github.com/ledgerwatch/erigon-lib/kv" - kv2 "github.com/ledgerwatch/erigon-lib/kv/mdbx" - "github.com/ledgerwatch/erigon/cmd/starknet/services" - "github.com/ledgerwatch/erigon/common/paths" - "github.com/ledgerwatch/log/v3" - "github.com/spf13/cobra" -) - -const ( - DefaultGas = 11_000_000 - DefaultNonce = 0 -) - -type Flags struct { - Contract string - Salt string - Gas uint64 - Nonce uint64 - PrivateKey string - DataDir string - Chaindata string - Output string -} - -var generateRawTxCmd = &cobra.Command{ - Use: "generateRawTx", - Short: "Generate data for starknet_sendRawTransaction RPC method", -} - -func init() { - generateRawTxCmd, flags := config() - generateRawTxCmd.RunE = func(cmd *cobra.Command, args []string) error { - logger := log.New() - db, err := db(flags, logger) - if err != nil { - return err - } - defer db.Close() - - rawTxGenerator := services.NewRawTxGenerator(flags.PrivateKey) - - fs := os.DirFS("/") - buf := bytes.NewBuffer(nil) - - config := &services.Config{ - ContractFileName: strings.Trim(flags.Contract, "/"), - Salt: []byte(flags.Salt), - Gas: flags.Gas, - Nonce: flags.Nonce, - } - - err = rawTxGenerator.CreateFromFS(cmd.Context(), fs, db, config, buf) - if err != nil { - return err - } - - if flags.Output != "" { - outputFile, err := os.Create(flags.Output) - if err != nil { - return fmt.Errorf("could not create output file: %v", flags.Output) - } - defer outputFile.Close() - - _, err = outputFile.WriteString(buf.String()) - if err != nil { - return fmt.Errorf("could not write to output file: %v", flags.Output) - } - } else { - fmt.Println(buf.String()) - } - - return err - } - - rootCmd.AddCommand(generateRawTxCmd) -} - -func config() (*cobra.Command, *Flags) { - flags := &Flags{} - generateRawTxCmd.PersistentFlags().StringVar(&flags.Contract, "contract", "", "Path to compiled cairo contract in JSON format") - generateRawTxCmd.MarkPersistentFlagRequired("contract") - - generateRawTxCmd.PersistentFlags().StringVar(&flags.Salt, "salt", "", "Cairo contract address salt") - generateRawTxCmd.MarkPersistentFlagRequired("salt") - - generateRawTxCmd.PersistentFlags().Uint64Var(&flags.Gas, "gas", DefaultGas, "Gas") - - generateRawTxCmd.PersistentFlags().Uint64Var(&flags.Nonce, "nonce", DefaultNonce, "Nonce") - - generateRawTxCmd.PersistentFlags().StringVar(&flags.PrivateKey, "private_key", "", "Private key") - generateRawTxCmd.MarkPersistentFlagRequired("private_key") - - generateRawTxCmd.PersistentFlags().StringVar(&flags.DataDir, "datadir", "", "path to Erigon working directory") - - generateRawTxCmd.PersistentFlags().StringVarP(&flags.Output, "output", "o", "", "Path to file where sign transaction will be saved. Print to stdout if empty.") - - generateRawTxCmd.PersistentPreRunE = func(cmd *cobra.Command, args []string) error { - if flags.DataDir == "" { - flags.DataDir = paths.DefaultDataDir() - } - if flags.Chaindata == "" { - flags.Chaindata = filepath.Join(flags.DataDir, "chaindata") - } - return nil - } - - return generateRawTxCmd, flags -} - -func db(flags *Flags, logger log.Logger) (kv.RoDB, error) { - rwKv, err := kv2.NewMDBX(logger).Path(flags.Chaindata).Readonly().Open() - if err != nil { - return nil, err - } - return rwKv, nil -} diff --git a/cmd/starknet/cmd/root.go b/cmd/starknet/cmd/root.go deleted file mode 100644 index a7d1d3ab357..00000000000 --- a/cmd/starknet/cmd/root.go +++ /dev/null @@ -1,19 +0,0 @@ -package cmd - -import ( - "github.com/spf13/cobra" -) - -// rootCmd represents the base command when called without any subcommands -var rootCmd = &cobra.Command{ - Use: "starknet", - Short: "Starknet cli commands", -} - -func Execute() { - cobra.CheckErr(rootCmd.Execute()) -} - -func init() { - cobra.OnInitialize() -} diff --git a/cmd/starknet/main.go b/cmd/starknet/main.go deleted file mode 100644 index 47a140a703e..00000000000 --- a/cmd/starknet/main.go +++ /dev/null @@ -1,7 +0,0 @@ -package main - -import "github.com/ledgerwatch/erigon/cmd/starknet/cmd" - -func main() { - cmd.Execute() -} diff --git a/cmd/starknet/services/raw_tx_generator.go b/cmd/starknet/services/raw_tx_generator.go deleted file mode 100644 index f3728bcfa21..00000000000 --- a/cmd/starknet/services/raw_tx_generator.go +++ /dev/null @@ -1,136 +0,0 @@ -package services - -import ( - "bytes" - "context" - "crypto/ecdsa" - "encoding/hex" - "errors" - "fmt" - "github.com/holiman/uint256" - "github.com/ledgerwatch/erigon-lib/kv" - "github.com/ledgerwatch/erigon/common" - "github.com/ledgerwatch/erigon/common/hexutil" - "github.com/ledgerwatch/erigon/core/types" - "github.com/ledgerwatch/erigon/crypto" - "github.com/ledgerwatch/erigon/eth/stagedsync/stages" - "github.com/ledgerwatch/erigon/params" - "github.com/ledgerwatch/erigon/rlp" - "github.com/ledgerwatch/erigon/turbo/adapter" - "io/fs" -) - -var ( - ErrReadContract = errors.New("contract read error") - ErrInvalidPrivateKey = errors.New("invalid private key") -) - -type Config struct { - ContractFileName string - Salt []byte - Gas uint64 - Nonce uint64 -} - -func NewRawTxGenerator(privateKey string) *RawTxGenerator { - return &RawTxGenerator{ - privateKey: privateKey, - } -} - -type RawTxGenerator struct { - privateKey string -} - -func (g RawTxGenerator) CreateFromFS(ctx context.Context, fileSystem fs.FS, db kv.RoDB, config *Config, writer *bytes.Buffer) error { - privateKey, err := crypto.HexToECDSA(g.privateKey) - if err != nil { - return ErrInvalidPrivateKey - } - - address, err := addressFromPrivateKey(privateKey) - if err != nil { - return err - } - - nonce, err := getNonce(ctx, db, address, config.Nonce) - if err != nil { - return err - } - - contract, err := fs.ReadFile(fileSystem, config.ContractFileName) - if err != nil { - return ErrReadContract - } - - enc := make([]byte, hex.EncodedLen(len(contract))) - hex.Encode(enc, contract) - - tx := types.StarknetTransaction{ - CommonTx: types.CommonTx{ - Nonce: nonce + 1, - Value: uint256.NewInt(1), - Gas: config.Gas, - Data: enc, - }, - Salt: config.Salt, - FeeCap: uint256.NewInt(875000000), - Tip: uint256.NewInt(100000), - } - - sighash := tx.SigningHash(params.FermionChainConfig.ChainID) - - signature, _ := crypto.Sign(sighash[:], privateKey) - signer := types.MakeSigner(params.FermionChainConfig, 1) - - signedTx, err := tx.WithSignature(*signer, signature) - if err != nil { - return err - } - - err = signedTx.(rlp.Encoder).EncodeRLP(writer) - signedTxRlp := writer.Bytes() - writer.Reset() - writer.WriteString(hexutil.Encode(signedTxRlp)) - - if err != nil { - return errors.New("can not save signed tx") - } - - return nil -} - -func addressFromPrivateKey(privateKey *ecdsa.PrivateKey) (common.Address, error) { - publicKey := privateKey.Public() - publicKeyECDSA, _ := publicKey.(*ecdsa.PublicKey) - return crypto.PubkeyToAddress(*publicKeyECDSA), nil -} - -func getNonce(ctx context.Context, db kv.RoDB, address common.Address, configNonce uint64) (uint64, error) { - if configNonce != 0 { - return configNonce, nil - } - - var nonce uint64 = 0 - - tx, err := db.BeginRo(ctx) - if err != nil { - return nonce, fmt.Errorf("cannot open tx: %w", err) - } - defer tx.Rollback() - blockNumber, err := stages.GetStageProgress(tx, stages.Execution) - if err != nil { - return nonce, err - } - reader := adapter.NewStateReader(tx, blockNumber) - acc, err := reader.ReadAccountData(address) - if err != nil { - return nonce, err - } - - if acc == nil { - return 0, nil - } - - return acc.Nonce, nil -} diff --git a/cmd/starknet/services/raw_tx_generator_test.go b/cmd/starknet/services/raw_tx_generator_test.go deleted file mode 100644 index e20ad33b81b..00000000000 --- a/cmd/starknet/services/raw_tx_generator_test.go +++ /dev/null @@ -1,97 +0,0 @@ -package services_test - -import ( - "bytes" - "context" - "encoding/hex" - "github.com/ledgerwatch/erigon-lib/kv/memdb" - "github.com/ledgerwatch/erigon/cmd/starknet/services" - "testing" - "testing/fstest" - - "github.com/ledgerwatch/erigon/crypto" -) - -func TestCreate(t *testing.T) { - privateKey := "26e86e45f6fc45ec6e2ecd128cec80fa1d1505e5507dcd2ae58c3130a7a97b48" - - var cases = []struct { - name string - privateKey string - config *services.Config - want string - error error - }{ - {name: "invalid private key", privateKey: "abc", config: &services.Config{ - ContractFileName: "not_exist.json", - }, error: services.ErrInvalidPrivateKey}, - {name: "contract file not found", privateKey: generatePrivateKey(t), config: &services.Config{ - ContractFileName: "not_exist.json", - }, error: services.ErrReadContract}, - {name: "success", privateKey: privateKey, config: &services.Config{ - ContractFileName: "contract_test.json", - Salt: []byte("contract_address_salt"), - Gas: 1, - Nonce: 0, - }, want: "0xb88503f88283127ed801830186a084342770c0018001963762323236313632363932323361323035623564376495636f6e74726163745f616464726573735f73616c74c080a08b88467d0a9a6cba87ec6c2ad9e7399d12a1b6f7f5b951bdd2c5c2ea08b76134a0472e1b37ca5f87c9c38690718c6b2b9db1a3d5398dc664fc4e158ab60d02d64b"}, - } - - fs := fstest.MapFS{ - "contract_test.json": {Data: []byte("{\"abi\": []}")}, - } - - for _, tt := range cases { - t.Run(tt.name, func(t *testing.T) { - rawTxGenerator := services.NewRawTxGenerator(tt.privateKey) - - ctx := context.Background() - buf := bytes.NewBuffer(nil) - db := memdb.NewTestDB(t) - - err := rawTxGenerator.CreateFromFS(ctx, fs, db, tt.config, buf) - - if tt.error == nil { - assertNoError(t, err) - - got := buf.String() - - if got != tt.want { - t.Errorf("got %q not equals want %q", got, tt.want) - } - } else { - assertError(t, err, tt.error) - } - }) - } -} - -func generatePrivateKey(t testing.TB) string { - t.Helper() - - privateKey, err := crypto.GenerateKey() - if err != nil { - t.Error(err) - } - - return hex.EncodeToString(crypto.FromECDSA(privateKey)) -} - -func assertNoError(t testing.TB, got error) { - t.Helper() - - if got != nil { - t.Fatal("got an error but didn't want one") - } -} - -func assertError(t testing.TB, got error, want error) { - t.Helper() - - if got == nil { - t.Fatal("didn't get an error but wanted one") - } - - if got != want { - t.Errorf("got %q, want %q", got, want) - } -} diff --git a/cmd/state/commands/check_change_sets.go b/cmd/state/commands/check_change_sets.go index 546dccd23b7..656791edaf4 100644 --- a/cmd/state/commands/check_change_sets.go +++ b/cmd/state/commands/check_change_sets.go @@ -22,7 +22,6 @@ import ( "github.com/ledgerwatch/erigon/core/vm" "github.com/ledgerwatch/erigon/eth/ethconfig" "github.com/ledgerwatch/erigon/eth/stagedsync/stages" - "github.com/ledgerwatch/erigon/ethdb" "github.com/ledgerwatch/erigon/turbo/services" "github.com/ledgerwatch/erigon/turbo/snapshotsync" "github.com/ledgerwatch/log/v3" @@ -73,6 +72,19 @@ func CheckChangeSets(genesis *core.Genesis, logger log.Logger, blockNum uint64, if err != nil { return err } + var blockReader services.FullBlockReader + var allSnapshots *snapshotsync.RoSnapshots + useSnapshots := ethconfig.UseSnapshotsByChainName(chainConfig.ChainName) && snapshotsCli + if useSnapshots { + allSnapshots = snapshotsync.NewRoSnapshots(ethconfig.NewSnapCfg(true, false, true), path.Join(datadir, "snapshots")) + defer allSnapshots.Close() + if err := allSnapshots.ReopenFolder(); err != nil { + return fmt.Errorf("reopen snapshot segments: %w", err) + } + blockReader = snapshotsync.NewBlockReaderWithSnapshots(allSnapshots) + } else { + blockReader = snapshotsync.NewBlockReader() + } chainDb := db defer chainDb.Close() historyDb := chainDb @@ -109,19 +121,6 @@ func CheckChangeSets(genesis *core.Genesis, logger log.Logger, blockNum uint64, commitEvery := time.NewTicker(30 * time.Second) defer commitEvery.Stop() - var blockReader services.FullBlockReader - var allSnapshots *snapshotsync.RoSnapshots - useSnapshots := ethconfig.UseSnapshotsByChainName(chainConfig.ChainName) && snapshotsCli - if useSnapshots { - allSnapshots = snapshotsync.NewRoSnapshots(ethconfig.NewSnapCfg(true, false, true), path.Join(datadir, "snapshots")) - defer allSnapshots.Close() - if err := allSnapshots.ReopenWithDB(db); err != nil { - return fmt.Errorf("reopen snapshot segments: %w", err) - } - blockReader = snapshotsync.NewBlockReaderWithSnapshots(allSnapshots) - } else { - blockReader = snapshotsync.NewBlockReader() - } engine := initConsensusEngine(chainConfig, logger, allSnapshots) for !interrupt { @@ -148,7 +147,7 @@ func CheckChangeSets(genesis *core.Genesis, logger log.Logger, blockNum uint64, break } reader := state.NewPlainState(historyTx, blockNum) - reader.SetTrace(blockNum == uint64(block)) + //reader.SetTrace(blockNum == uint64(block)) intraBlockState := state.New(reader) csw := state.NewChangeSetWriterPlain(nil /* db */, blockNum) var blockWriter state.StateWriter @@ -165,8 +164,7 @@ func CheckChangeSets(genesis *core.Genesis, logger log.Logger, blockNum uint64, } return h } - contractHasTEVM := ethdb.GetHasTEVM(rwtx) - receipts, err1 := runBlock(engine, intraBlockState, noOpWriter, blockWriter, chainConfig, getHeader, contractHasTEVM, b, vmConfig, blockNum == uint64(block)) + receipts, err1 := runBlock(engine, intraBlockState, noOpWriter, blockWriter, chainConfig, getHeader, b, vmConfig, blockNum == block) if err1 != nil { return err1 } diff --git a/cmd/state/commands/erigon2.go b/cmd/state/commands/erigon2.go index f6ad106e211..17dd6f54eb6 100644 --- a/cmd/state/commands/erigon2.go +++ b/cmd/state/commands/erigon2.go @@ -413,7 +413,7 @@ func processBlock(trace bool, txNumStart uint64, rw *ReaderWrapper, ww *WriterWr daoBlock = false } ibs.Prepare(tx.Hash(), block.Hash(), i) - receipt, _, err := core.ApplyTransaction(chainConfig, core.GetHashFn(header, getHeader), engine, nil, gp, ibs, ww, header, tx, usedGas, vmConfig, nil) + receipt, _, err := core.ApplyTransaction(chainConfig, core.GetHashFn(header, getHeader), engine, nil, gp, ibs, ww, header, tx, usedGas, vmConfig) if err != nil { return 0, nil, fmt.Errorf("could not apply tx %d [%x] failed: %w", i, tx.Hash(), err) } @@ -629,18 +629,21 @@ func initConsensusEngine(chainConfig *params.ChainConfig, logger log.Logger, sna case chainConfig.Clique != nil: c := params.CliqueSnapshot c.DBPath = filepath.Join(datadir, "clique", "db") - engine = ethconsensusconfig.CreateConsensusEngine(chainConfig, logger, c, config.Miner.Notify, config.Miner.Noverify, "", true, datadir, snapshots) + engine = ethconsensusconfig.CreateConsensusEngine(chainConfig, logger, c, config.Miner.Notify, config.Miner.Noverify, "", true, datadir, snapshots, true /* readonly */) case chainConfig.Aura != nil: consensusConfig := ¶ms.AuRaConfig{DBPath: filepath.Join(datadir, "aura")} - engine = ethconsensusconfig.CreateConsensusEngine(chainConfig, logger, consensusConfig, config.Miner.Notify, config.Miner.Noverify, "", true, datadir, snapshots) + engine = ethconsensusconfig.CreateConsensusEngine(chainConfig, logger, consensusConfig, config.Miner.Notify, config.Miner.Noverify, "", true, datadir, snapshots, true /* readonly */) case chainConfig.Parlia != nil: + // Apply special hacks for BSC params + params.ApplyBinanceSmartChainParams() consensusConfig := ¶ms.ParliaConfig{DBPath: filepath.Join(datadir, "parlia")} - engine = ethconsensusconfig.CreateConsensusEngine(chainConfig, logger, consensusConfig, config.Miner.Notify, config.Miner.Noverify, "", true, datadir, snapshots) + engine = ethconsensusconfig.CreateConsensusEngine(chainConfig, logger, consensusConfig, config.Miner.Notify, config.Miner.Noverify, "", true, datadir, snapshots, true /* readonly */) case chainConfig.Bor != nil: consensusConfig := &config.Bor - engine = ethconsensusconfig.CreateConsensusEngine(chainConfig, logger, consensusConfig, config.Miner.Notify, config.Miner.Noverify, "http://localhost:1317", false, datadir, snapshots) + engine = ethconsensusconfig.CreateConsensusEngine(chainConfig, logger, consensusConfig, config.Miner.Notify, config.Miner.Noverify, config.HeimdallURL, false, datadir, snapshots, true /* readonly */) default: //ethash engine = ethash.NewFaker() } + return } diff --git a/cmd/state/commands/erigon22.go b/cmd/state/commands/erigon22.go deleted file mode 100644 index 279e61cfbd2..00000000000 --- a/cmd/state/commands/erigon22.go +++ /dev/null @@ -1,653 +0,0 @@ -package commands - -import ( - "container/heap" - "context" - "errors" - "fmt" - "os" - "os/signal" - "path" - "path/filepath" - "runtime" - "sync" - "sync/atomic" - "syscall" - "time" - - libcommon "github.com/ledgerwatch/erigon-lib/common" - "github.com/ledgerwatch/erigon-lib/kv" - kv2 "github.com/ledgerwatch/erigon-lib/kv/mdbx" - libstate "github.com/ledgerwatch/erigon-lib/state" - "github.com/ledgerwatch/erigon/cmd/sentry/sentry" - "github.com/ledgerwatch/erigon/common" - "github.com/ledgerwatch/erigon/consensus" - "github.com/ledgerwatch/erigon/consensus/misc" - "github.com/ledgerwatch/erigon/core" - "github.com/ledgerwatch/erigon/core/state" - "github.com/ledgerwatch/erigon/core/types" - "github.com/ledgerwatch/erigon/core/vm" - "github.com/ledgerwatch/erigon/eth/ethconfig" - "github.com/ledgerwatch/erigon/eth/stagedsync" - "github.com/ledgerwatch/erigon/eth/stagedsync/stages" - datadir2 "github.com/ledgerwatch/erigon/node/nodecfg/datadir" - "github.com/ledgerwatch/erigon/p2p" - "github.com/ledgerwatch/erigon/params" - "github.com/ledgerwatch/erigon/turbo/services" - "github.com/ledgerwatch/erigon/turbo/snapshotsync" - stages2 "github.com/ledgerwatch/erigon/turbo/stages" - "github.com/ledgerwatch/log/v3" - "github.com/spf13/cobra" - "golang.org/x/sync/semaphore" -) - -var ( - reset bool -) - -func init() { - erigon22Cmd.Flags().BoolVar(&reset, "reset", false, "Resets the state database and static files") - withDataDir(erigon22Cmd) - rootCmd.AddCommand(erigon22Cmd) -} - -var erigon22Cmd = &cobra.Command{ - Use: "erigon22", - Short: "Exerimental command to re-execute blocks from beginning using erigon2 histoty (ugrade 2)", - RunE: func(cmd *cobra.Command, args []string) error { - logger := log.New() - return Erigon22(genesis, logger) - }, -} - -type Worker22 struct { - lock sync.Locker - db kv.RoDB - tx kv.Tx - wg *sync.WaitGroup - rs *state.State22 - blockReader services.FullBlockReader - allSnapshots *snapshotsync.RoSnapshots - stateWriter *state.StateWriter22 - stateReader *state.StateReader22 - getHeader func(hash common.Hash, number uint64) *types.Header - ctx context.Context - engine consensus.Engine - txNums []uint64 - chainConfig *params.ChainConfig - logger log.Logger - genesis *core.Genesis - resultCh chan state.TxTask -} - -func NewWorker22(lock sync.Locker, db kv.RoDB, wg *sync.WaitGroup, rs *state.State22, - blockReader services.FullBlockReader, allSnapshots *snapshotsync.RoSnapshots, - txNums []uint64, chainConfig *params.ChainConfig, logger log.Logger, genesis *core.Genesis, - resultCh chan state.TxTask, -) *Worker22 { - return &Worker22{ - lock: lock, - db: db, - wg: wg, - rs: rs, - blockReader: blockReader, - allSnapshots: allSnapshots, - ctx: context.Background(), - stateWriter: state.NewStateWriter22(rs), - stateReader: state.NewStateReader22(rs), - txNums: txNums, - chainConfig: chainConfig, - logger: logger, - genesis: genesis, - resultCh: resultCh, - } -} - -func (rw *Worker22) ResetTx() { - if rw.tx != nil { - rw.tx.Rollback() - rw.tx = nil - } -} - -func (rw *Worker22) run() { - defer rw.wg.Done() - rw.getHeader = func(hash common.Hash, number uint64) *types.Header { - h, err := rw.blockReader.Header(rw.ctx, nil, hash, number) - if err != nil { - panic(err) - } - return h - } - rw.engine = initConsensusEngine(rw.chainConfig, rw.logger, rw.allSnapshots) - for txTask, ok := rw.rs.Schedule(); ok; txTask, ok = rw.rs.Schedule() { - rw.runTxTask(&txTask) - rw.resultCh <- txTask // Needs to have outside of the lock - } -} - -func (rw *Worker22) runTxTask(txTask *state.TxTask) { - rw.lock.Lock() - defer rw.lock.Unlock() - if rw.tx == nil { - var err error - if rw.tx, err = rw.db.BeginRo(rw.ctx); err != nil { - panic(err) - } - rw.stateReader.SetTx(rw.tx) - } - txTask.Error = nil - rw.stateReader.SetTxNum(txTask.TxNum) - rw.stateWriter.SetTxNum(txTask.TxNum) - rw.stateReader.ResetReadSet() - rw.stateWriter.ResetWriteSet() - ibs := state.New(rw.stateReader) - daoForkTx := rw.chainConfig.DAOForkSupport && rw.chainConfig.DAOForkBlock != nil && rw.chainConfig.DAOForkBlock.Uint64() == txTask.BlockNum && txTask.TxIndex == -1 - var err error - if txTask.BlockNum == 0 && txTask.TxIndex == -1 { - //fmt.Printf("txNum=%d, blockNum=%d, Genesis\n", txTask.TxNum, txTask.BlockNum) - // Genesis block - _, ibs, err = rw.genesis.ToBlock() - if err != nil { - panic(err) - } - } else if daoForkTx { - //fmt.Printf("txNum=%d, blockNum=%d, DAO fork\n", txTask.TxNum, txTask.BlockNum) - misc.ApplyDAOHardFork(ibs) - ibs.SoftFinalise() - } else if txTask.TxIndex == -1 { - // Block initialisation - } else if txTask.Final { - if txTask.BlockNum > 0 { - //fmt.Printf("txNum=%d, blockNum=%d, finalisation of the block\n", txTask.TxNum, txTask.BlockNum) - // End of block transaction in a block - if _, _, err := rw.engine.Finalize(rw.chainConfig, txTask.Header, ibs, txTask.Block.Transactions(), txTask.Block.Uncles(), nil /* receipts */, nil, nil, nil); err != nil { - panic(fmt.Errorf("finalize of block %d failed: %w", txTask.BlockNum, err)) - } - } - } else { - //fmt.Printf("txNum=%d, blockNum=%d, txIndex=%d\n", txTask.TxNum, txTask.BlockNum, txTask.TxIndex) - txHash := txTask.Tx.Hash() - gp := new(core.GasPool).AddGas(txTask.Tx.GetGas()) - vmConfig := vm.Config{NoReceipts: true, SkipAnalysis: core.SkipAnalysis(rw.chainConfig, txTask.BlockNum)} - contractHasTEVM := func(contractHash common.Hash) (bool, error) { return false, nil } - ibs.Prepare(txHash, txTask.BlockHash, txTask.TxIndex) - getHashFn := core.GetHashFn(txTask.Header, rw.getHeader) - blockContext := core.NewEVMBlockContext(txTask.Header, getHashFn, rw.engine, nil /* author */, contractHasTEVM) - msg, err := txTask.Tx.AsMessage(*types.MakeSigner(rw.chainConfig, txTask.BlockNum), txTask.Header.BaseFee, txTask.Rules) - if err != nil { - panic(err) - } - txContext := core.NewEVMTxContext(msg) - vmenv := vm.NewEVM(blockContext, txContext, ibs, rw.chainConfig, vmConfig) - if _, err = core.ApplyMessage(vmenv, msg, gp, true /* refunds */, false /* gasBailout */); err != nil { - txTask.Error = err - //fmt.Printf("error=%v\n", err) - } - // Update the state with pending changes - ibs.SoftFinalise() - } - // Prepare read set, write set and balanceIncrease set and send for serialisation - if txTask.Error == nil { - txTask.BalanceIncreaseSet = ibs.BalanceIncreaseSet() - //for addr, bal := range txTask.BalanceIncreaseSet { - // fmt.Printf("[%x]=>[%d]\n", addr, &bal) - //} - if err = ibs.MakeWriteSet(txTask.Rules, rw.stateWriter); err != nil { - panic(err) - } - txTask.ReadLists = rw.stateReader.ReadSet() - txTask.WriteLists = rw.stateWriter.WriteSet() - txTask.AccountPrevs, txTask.AccountDels, txTask.StoragePrevs, txTask.CodePrevs = rw.stateWriter.PrevAndDels() - size := (20 + 32) * len(txTask.BalanceIncreaseSet) - for _, list := range txTask.ReadLists { - for _, b := range list.Keys { - size += len(b) - } - for _, b := range list.Vals { - size += len(b) - } - } - for _, list := range txTask.WriteLists { - for _, b := range list.Keys { - size += len(b) - } - for _, b := range list.Vals { - size += len(b) - } - } - txTask.ResultsSize = int64(size) - } -} - -func processResultQueue(rws *state.TxTaskQueue, outputTxNum *uint64, rs *state.State22, agg *libstate.Aggregator22, applyTx kv.Tx, - triggerCount *uint64, outputBlockNum *uint64, repeatCount *uint64, resultsSize *int64) { - for rws.Len() > 0 && (*rws)[0].TxNum == *outputTxNum { - txTask := heap.Pop(rws).(state.TxTask) - atomic.AddInt64(resultsSize, -txTask.ResultsSize) - if txTask.Error == nil && rs.ReadsValid(txTask.ReadLists) { - if err := rs.Apply(txTask.Rules.IsSpuriousDragon, applyTx, txTask, agg); err != nil { - panic(err) - } - *triggerCount += rs.CommitTxNum(txTask.Sender, txTask.TxNum) - *outputTxNum++ - *outputBlockNum = txTask.BlockNum - //fmt.Printf("Applied %d block %d txIndex %d\n", txTask.TxNum, txTask.BlockNum, txTask.TxIndex) - } else { - rs.AddWork(txTask) - *repeatCount++ - //fmt.Printf("Rolled back %d block %d txIndex %d\n", txTask.TxNum, txTask.BlockNum, txTask.TxIndex) - } - } -} - -func Erigon22(genesis *core.Genesis, logger log.Logger) error { - sigs := make(chan os.Signal, 1) - interruptCh := make(chan bool, 1) - signal.Notify(sigs, syscall.SIGINT, syscall.SIGTERM) - - go func() { - <-sigs - interruptCh <- true - }() - var err error - ctx := context.Background() - reconDbPath := path.Join(datadir, "db22") - if reset { - if _, err = os.Stat(reconDbPath); err != nil { - if !errors.Is(err, os.ErrNotExist) { - return err - } - } else if err = os.RemoveAll(reconDbPath); err != nil { - return err - } - } - limiter := semaphore.NewWeighted(int64(runtime.NumCPU() + 1)) - db, err := kv2.NewMDBX(logger).Path(reconDbPath).RoTxsLimiter(limiter).Open() - if err != nil { - return err - } - startTime := time.Now() - var blockReader services.FullBlockReader - var allSnapshots *snapshotsync.RoSnapshots - allSnapshots = snapshotsync.NewRoSnapshots(ethconfig.NewSnapCfg(true, false, true), path.Join(datadir, "snapshots")) - defer allSnapshots.Close() - if err := allSnapshots.ReopenFolder(); err != nil { - return fmt.Errorf("reopen snapshot segments: %w", err) - } - blockReader = snapshotsync.NewBlockReaderWithSnapshots(allSnapshots) - // Compute mapping blockNum -> last TxNum in that block - maxBlockNum := allSnapshots.BlocksAvailable() + 1 - txNums := make([]uint64, maxBlockNum) - if err = allSnapshots.Bodies.View(func(bs []*snapshotsync.BodySegment) error { - for _, b := range bs { - if err = b.Iterate(func(blockNum, baseTxNum, txAmount uint64) { - txNums[blockNum] = baseTxNum + txAmount - }); err != nil { - return err - } - } - return nil - }); err != nil { - return fmt.Errorf("build txNum => blockNum mapping: %w", err) - } - workerCount := runtime.NumCPU() - workCh := make(chan state.TxTask, 128) - - engine := initConsensusEngine(chainConfig, logger, allSnapshots) - sentryControlServer, err := sentry.NewMultiClient( - db, - "", - chainConfig, - common.Hash{}, - engine, - 1, - nil, - ethconfig.Defaults.Sync, - blockReader, - false, - ) - if err != nil { - return err - } - cfg := ethconfig.Defaults - cfg.DeprecatedTxPool.Disable = true - cfg.Dirs = datadir2.New(datadir) - cfg.Snapshot = allSnapshots.Cfg() - stagedSync, err := stages2.NewStagedSync(context.Background(), logger, db, p2p.Config{}, cfg, sentryControlServer, datadir, &stagedsync.Notifications{}, nil, allSnapshots, nil, nil) - if err != nil { - return err - } - rwTx, err := db.BeginRw(ctx) - if err != nil { - return err - } - defer func() { - if rwTx != nil { - rwTx.Rollback() - } - }() - execStage, err := stagedSync.StageState(stages.Execution, rwTx, db) - if err != nil { - return err - } - if !reset { - block = execStage.BlockNumber + 1 - } - rwTx.Rollback() - - rs := state.NewState22() - aggDir := path.Join(datadir, "agg22") - if reset { - if _, err = os.Stat(aggDir); err != nil { - if !errors.Is(err, os.ErrNotExist) { - return err - } - } else if err = os.RemoveAll(aggDir); err != nil { - return err - } - if err = os.MkdirAll(aggDir, 0755); err != nil { - return err - } - } - agg, err := libstate.NewAggregator22(aggDir, AggregationStep) - if err != nil { - return err - } - defer agg.Close() - var lock sync.RWMutex - reconWorkers := make([]*Worker22, workerCount) - var wg sync.WaitGroup - resultCh := make(chan state.TxTask, 128) - for i := 0; i < workerCount; i++ { - reconWorkers[i] = NewWorker22(lock.RLocker(), db, &wg, rs, blockReader, allSnapshots, txNums, chainConfig, logger, genesis, resultCh) - } - defer func() { - for i := 0; i < workerCount; i++ { - reconWorkers[i].ResetTx() - } - }() - wg.Add(workerCount) - for i := 0; i < workerCount; i++ { - go reconWorkers[i].run() - } - commitThreshold := uint64(1024 * 1024 * 1024) - resultsThreshold := int64(1024 * 1024 * 1024) - count := uint64(0) - repeatCount := uint64(0) - triggerCount := uint64(0) - prevCount := uint64(0) - prevRepeatCount := uint64(0) - //prevTriggerCount := uint64(0) - resultsSize := int64(0) - prevTime := time.Now() - logEvery := time.NewTicker(logInterval) - defer logEvery.Stop() - var rws state.TxTaskQueue - var rwsLock sync.Mutex - rwsReceiveCond := sync.NewCond(&rwsLock) - heap.Init(&rws) - var outputTxNum uint64 - if block > 0 { - outputTxNum = txNums[block-1] - } - var inputBlockNum, outputBlockNum uint64 - var prevOutputBlockNum uint64 = block - // Go-routine gathering results from the workers - var maxTxNum uint64 = txNums[len(txNums)-1] - go func() { - var applyTx kv.RwTx - defer func() { - if applyTx != nil { - applyTx.Rollback() - } - }() - if applyTx, err = db.BeginRw(ctx); err != nil { - panic(err) - } - agg.SetTx(applyTx) - defer rs.Finish() - var waiting, applying time.Duration - waitStart := time.Now() - var waitEnd time.Time - for outputTxNum < atomic.LoadUint64(&maxTxNum) { - select { - case txTask := <-resultCh: - waitEnd = time.Now() - waiting += (waitEnd.Sub(waitStart)) - //fmt.Printf("Saved %d block %d txIndex %d\n", txTask.TxNum, txTask.BlockNum, txTask.TxIndex) - func() { - rwsLock.Lock() - defer rwsLock.Unlock() - atomic.AddInt64(&resultsSize, txTask.ResultsSize) - heap.Push(&rws, txTask) - processResultQueue(&rws, &outputTxNum, rs, agg, applyTx, &triggerCount, &outputBlockNum, &repeatCount, &resultsSize) - rwsReceiveCond.Signal() - }() - waitStart = time.Now() - applying += waitStart.Sub(waitEnd) - case <-logEvery.C: - var m runtime.MemStats - libcommon.ReadMemStats(&m) - sizeEstimate := rs.SizeEstimate() - count = rs.DoneCount() - currentTime := time.Now() - interval := currentTime.Sub(prevTime) - speedTx := float64(count-prevCount) / (float64(interval) / float64(time.Second)) - speedBlock := float64(outputBlockNum-prevOutputBlockNum) / (float64(interval) / float64(time.Second)) - var repeatRatio float64 - if count > prevCount { - repeatRatio = 100.0 * float64(repeatCount-prevRepeatCount) / float64(count-prevCount) - } - log.Info("Transaction replay", - //"workers", workerCount, - "at block", outputBlockNum, - "input block", atomic.LoadUint64(&inputBlockNum), - "blk/s", fmt.Sprintf("%.1f", speedBlock), - "tx/s", fmt.Sprintf("%.1f", speedTx), - "waiting", waiting, - "applying", applying, - //"repeats", repeatCount-prevRepeatCount, - //"triggered", triggerCount-prevTriggerCount, - "result queue", rws.Len(), - "results size", libcommon.ByteCount(uint64(atomic.LoadInt64(&resultsSize))), - "repeat ratio", fmt.Sprintf("%.2f%%", repeatRatio), - "buffer", libcommon.ByteCount(sizeEstimate), - "alloc", libcommon.ByteCount(m.Alloc), "sys", libcommon.ByteCount(m.Sys), - ) - prevTime = currentTime - prevCount = count - prevOutputBlockNum = outputBlockNum - prevRepeatCount = repeatCount - //prevTriggerCount = triggerCount - if sizeEstimate >= commitThreshold { - commitStart := time.Now() - log.Info("Committing...") - err := func() error { - rwsLock.Lock() - defer rwsLock.Unlock() - // Drain results (and process) channel because read sets do not carry over - for { - var drained bool - for !drained { - select { - case txTask := <-resultCh: - atomic.AddInt64(&resultsSize, txTask.ResultsSize) - heap.Push(&rws, txTask) - default: - drained = true - } - } - processResultQueue(&rws, &outputTxNum, rs, agg, applyTx, &triggerCount, &outputBlockNum, &repeatCount, &resultsSize) - if rws.Len() == 0 { - break - } - } - rwsReceiveCond.Signal() - lock.Lock() // This is to prevent workers from starting work on any new txTask - defer lock.Unlock() - // Drain results channel because read sets do not carry over - var drained bool - for !drained { - select { - case txTask := <-resultCh: - rs.AddWork(txTask) - default: - drained = true - } - } - // Drain results queue as well - for rws.Len() > 0 { - txTask := heap.Pop(&rws).(state.TxTask) - atomic.AddInt64(&resultsSize, -txTask.ResultsSize) - rs.AddWork(txTask) - } - if err = applyTx.Commit(); err != nil { - return err - } - for i := 0; i < workerCount; i++ { - reconWorkers[i].ResetTx() - } - rwTx, err = db.BeginRw(ctx) - if err != nil { - return err - } - if err = rs.Flush(rwTx); err != nil { - return err - } - if err = rwTx.Commit(); err != nil { - return err - } - if applyTx, err = db.BeginRw(ctx); err != nil { - return err - } - agg.SetTx(applyTx) - return nil - }() - if err != nil { - panic(err) - } - log.Info("Committed", "time", time.Since(commitStart)) - } - waiting = 0 - applying = 0 - } - } - if err = applyTx.Commit(); err != nil { - panic(err) - } - }() - var inputTxNum uint64 - if block > 0 { - inputTxNum = txNums[block-1] - } - var header *types.Header - var blockNum uint64 -loop: - for blockNum = block; blockNum < maxBlockNum; blockNum++ { - atomic.StoreUint64(&inputBlockNum, blockNum) - rules := chainConfig.Rules(blockNum) - if header, err = blockReader.HeaderByNumber(ctx, nil, blockNum); err != nil { - return err - } - blockHash := header.Hash() - b, _, err := blockReader.BlockWithSenders(ctx, nil, blockHash, blockNum) - if err != nil { - return err - } - txs := b.Transactions() - for txIndex := -1; txIndex <= len(txs); txIndex++ { - // Do not oversend, wait for the result heap to go under certain size - func() { - rwsLock.Lock() - defer rwsLock.Unlock() - for rws.Len() > 128 || atomic.LoadInt64(&resultsSize) >= resultsThreshold || rs.SizeEstimate() >= commitThreshold { - rwsReceiveCond.Wait() - } - }() - txTask := state.TxTask{ - Header: header, - BlockNum: blockNum, - Rules: rules, - Block: b, - TxNum: inputTxNum, - TxIndex: txIndex, - BlockHash: blockHash, - Final: txIndex == len(txs), - } - if txIndex >= 0 && txIndex < len(txs) { - txTask.Tx = txs[txIndex] - if sender, ok := txs[txIndex].GetSender(); ok { - txTask.Sender = &sender - } - if ok := rs.RegisterSender(txTask); ok { - rs.AddWork(txTask) - } - } else { - rs.AddWork(txTask) - } - inputTxNum++ - } - // Check for interrupts - select { - case <-interruptCh: - log.Info(fmt.Sprintf("interrupted, please wait for cleanup, next run will start with block %d", blockNum+1)) - atomic.StoreUint64(&maxTxNum, inputTxNum) - break loop - default: - } - } - close(workCh) - wg.Wait() - for i := 0; i < workerCount; i++ { - reconWorkers[i].ResetTx() - } - rwTx, err = db.BeginRw(ctx) - if err != nil { - return err - } - if err = rs.Flush(rwTx); err != nil { - return err - } - if err = execStage.Update(rwTx, blockNum); err != nil { - return err - } - if err = rwTx.Commit(); err != nil { - return err - } - if rwTx, err = db.BeginRw(ctx); err != nil { - return err - } - log.Info("Transaction replay complete", "duration", time.Since(startTime)) - log.Info("Computing hashed state") - tmpDir := filepath.Join(datadir, "tmp") - if err = rwTx.ClearBucket(kv.HashedAccounts); err != nil { - return err - } - if err = rwTx.ClearBucket(kv.HashedStorage); err != nil { - return err - } - if err = rwTx.ClearBucket(kv.ContractCode); err != nil { - return err - } - if err = stagedsync.PromoteHashedStateCleanly("recon", rwTx, stagedsync.StageHashStateCfg(db, tmpDir), ctx); err != nil { - return err - } - if err = rwTx.Commit(); err != nil { - return err - } - if rwTx, err = db.BeginRw(ctx); err != nil { - return err - } - var rootHash common.Hash - if rootHash, err = stagedsync.RegenerateIntermediateHashes("recon", rwTx, stagedsync.StageTrieCfg(db, false /* checkRoot */, false /* saveHashesToDB */, false /* badBlockHalt */, tmpDir, blockReader, nil /* HeaderDownload */), common.Hash{}, make(chan struct{}, 1)); err != nil { - return err - } - if err = rwTx.Commit(); err != nil { - return err - } - if rootHash != header.Root { - log.Error("Incorrect root hash", "expected", fmt.Sprintf("%x", header.Root)) - } - return nil -} diff --git a/cmd/state/commands/erigon23.go b/cmd/state/commands/erigon23.go index a342118aa58..789b6cd98e7 100644 --- a/cmd/state/commands/erigon23.go +++ b/cmd/state/commands/erigon23.go @@ -1,6 +1,7 @@ package commands import ( + "bytes" "context" "errors" "fmt" @@ -14,15 +15,15 @@ import ( "time" "github.com/holiman/uint256" + "github.com/spf13/cobra" + libcommon "github.com/ledgerwatch/erigon-lib/common" "github.com/ledgerwatch/erigon-lib/kv" - "github.com/ledgerwatch/erigon-lib/kv/mdbx" kv2 "github.com/ledgerwatch/erigon-lib/kv/mdbx" libstate "github.com/ledgerwatch/erigon-lib/state" - "github.com/ledgerwatch/erigon/turbo/services" "github.com/ledgerwatch/log/v3" - "github.com/spf13/cobra" + "github.com/ledgerwatch/erigon/cmd/state/exec3" "github.com/ledgerwatch/erigon/common" "github.com/ledgerwatch/erigon/consensus" "github.com/ledgerwatch/erigon/consensus/misc" @@ -33,30 +34,55 @@ import ( "github.com/ledgerwatch/erigon/core/vm" "github.com/ledgerwatch/erigon/eth/ethconfig" "github.com/ledgerwatch/erigon/params" + "github.com/ledgerwatch/erigon/turbo/services" "github.com/ledgerwatch/erigon/turbo/snapshotsync" ) -const ( - AggregationStep = 3_125_000 /* number of transactions in smallest static file */ -) - func init() { withBlock(erigon23Cmd) withDataDir(erigon23Cmd) withChain(erigon23Cmd) + withLogPath(erigon23Cmd) + erigon23Cmd.Flags().IntVar(&commitmentFrequency, "commfreq", 25000, "how many blocks to skip between calculating commitment") + erigon23Cmd.Flags().BoolVar(&commitments, "commitments", false, "set to true to calculate commitments") rootCmd.AddCommand(erigon23Cmd) } var erigon23Cmd = &cobra.Command{ Use: "erigon23", - Short: "Exerimental command to re-execute blocks from beginning using erigon2 state representation and histoty (ugrade 3)", + Short: "Experimental command to re-execute blocks from beginning using erigon2 state representation and histoty (ugrade 3)", RunE: func(cmd *cobra.Command, args []string) error { - logger := log.New() + logger, err := initSeparatedLogging(logdir, "erigon23") + if err != nil { + return err + } return Erigon23(genesis, chainConfig, logger) }, } +func initSeparatedLogging(logPath string, filePrefix string) (log.Logger, error) { + err := os.MkdirAll(logPath, 0764) + if err != nil { + return nil, err + } + + logger := log.New() + userLog, err := log.FileHandler(path.Join(logPath, filePrefix+"-user.log"), log.LogfmtFormat(), 1<<27) // 128Mb + if err != nil { + return nil, err + } + errLog, err := log.FileHandler(path.Join(logPath, filePrefix+"-error.log"), log.LogfmtFormat(), 1<<27) // 128Mb + if err != nil { + return nil, err + } + + mux := log.MultiHandler(logger.GetHandler(), log.LvlFilterHandler(log.LvlInfo, userLog), log.LvlFilterHandler(log.LvlError, errLog)) + logger.SetHandler(mux) + log.SetRootHandler(mux) + return logger, nil +} + func Erigon23(genesis *core.Genesis, chainConfig *params.ChainConfig, logger log.Logger) error { sigs := make(chan os.Signal, 1) interruptCh := make(chan bool, 1) @@ -105,17 +131,25 @@ func Erigon23(genesis *core.Genesis, chainConfig *params.ChainConfig, logger log return err } - agg, err3 := libstate.NewAggregator(aggPath, AggregationStep) + agg, err3 := libstate.NewAggregator(aggPath, ethconfig.HistoryV3AggregationStep) if err3 != nil { return fmt.Errorf("create aggregator: %w", err3) } defer agg.Close() + startTxNum := agg.EndTxNumMinimax() fmt.Printf("Max txNum in files: %d\n", startTxNum) + agg.SetTx(rwTx) + latestTx, err := agg.SeekCommitment(startTxNum) + if err != nil && startTxNum != 0 { + return fmt.Errorf("failed to seek commitment to tx %d: %w", startTxNum, err) + } + startTxNum = latestTx + interrupt := false if startTxNum == 0 { - _, genesisIbs, err := genesis.ToBlock() + genBlock, genesisIbs, err := genesis.ToBlock() if err != nil { return err } @@ -124,12 +158,22 @@ func Erigon23(genesis *core.Genesis, chainConfig *params.ChainConfig, logger log if err = genesisIbs.CommitBlock(¶ms.Rules{}, &WriterWrapper23{w: agg}); err != nil { return fmt.Errorf("cannot write state: %w", err) } + + blockRootHash, err := agg.ComputeCommitment(true, false) + if err != nil { + return err + } if err = agg.FinishTx(); err != nil { return err } + + genesisRootHash := genBlock.Root() + if !bytes.Equal(blockRootHash, genesisRootHash[:]) { + return fmt.Errorf("genesis root hash mismatch: expected %x got %x", genesisRootHash, blockRootHash) + } } - logger.Info("Initialised chain configuration", "config", chainConfig) + logger.Info("Initialised chain configuration", "startTxNum", startTxNum, "config", chainConfig) var ( blockNum uint64 @@ -155,8 +199,7 @@ func Erigon23(genesis *core.Genesis, chainConfig *params.ChainConfig, logger log }() var blockReader services.FullBlockReader - var allSnapshots *snapshotsync.RoSnapshots - allSnapshots = snapshotsync.NewRoSnapshots(ethconfig.NewSnapCfg(true, false, true), path.Join(datadir, "snapshots")) + var allSnapshots = snapshotsync.NewRoSnapshots(ethconfig.NewSnapCfg(true, false, true), path.Join(datadir, "snapshots")) defer allSnapshots.Close() if err := allSnapshots.ReopenFolder(); err != nil { return fmt.Errorf("reopen snapshot segments: %w", err) @@ -174,6 +217,40 @@ func Erigon23(genesis *core.Genesis, chainConfig *params.ChainConfig, logger log readWrapper := &ReaderWrapper23{ac: agg.MakeContext(), roTx: rwTx} writeWrapper := &WriterWrapper23{w: agg} + commitFn := func(txn uint64) error { + var spaceDirty uint64 + if spaceDirty, _, err = rwTx.(*kv2.MdbxTx).SpaceDirty(); err != nil { + return fmt.Errorf("retrieving spaceDirty: %w", err) + } + if spaceDirty >= dirtySpaceThreshold { + log.Info("Initiated tx commit", "block", blockNum, "space dirty", libcommon.ByteCount(spaceDirty)) + } + log.Info("database commitment", "block", blockNum, "txNum", txn) + + if err = rwTx.Commit(); err != nil { + return err + } + if interrupt { + return nil + } + + if rwTx, err = db.BeginRw(ctx); err != nil { + return err + } + agg.SetTx(rwTx) + readWrapper.roTx = rwTx + return nil + } + + defer func() { + interrupt = true + if err := commitFn(txNum); err != nil { + log.Error("commit on exit failed", "err", err) + } + }() + + agg.SetCommitFn(commitFn) + for !interrupt { blockNum++ trace = traceBlock > 0 && blockNum == uint64(traceBlock) @@ -200,32 +277,12 @@ func Erigon23(genesis *core.Genesis, chainConfig *params.ChainConfig, logger log // Check for interrupts select { case interrupt = <-interruptCh: + // Commit transaction only when interrupted or just before computing commitment (so it can be re-done) log.Info(fmt.Sprintf("interrupted, please wait for cleanup, next time start with --block %d", blockNum)) - default: - } - // Commit transaction only when interrupted or just before computing commitment (so it can be re-done) - commit := interrupt - if !commit && (blockNum+1)%uint64(commitmentFrequency) == 0 { - var spaceDirty uint64 - if spaceDirty, _, err = rwTx.(*mdbx.MdbxTx).SpaceDirty(); err != nil { - return fmt.Errorf("retrieving spaceDirty: %w", err) + if err := commitFn(txNum); err != nil { + log.Error("db commit", "err", err) } - if spaceDirty >= dirtySpaceThreshold { - log.Info("Initiated tx commit", "block", blockNum, "space dirty", libcommon.ByteCount(spaceDirty)) - commit = true - } - } - if commit { - if err = rwTx.Commit(); err != nil { - return err - } - if !interrupt { - if rwTx, err = db.BeginRw(ctx); err != nil { - return err - } - } - agg.SetTx(rwTx) - readWrapper.roTx = rwTx + default: } } @@ -237,8 +294,6 @@ type stat23 struct { hits uint64 misses uint64 prevBlock uint64 - prevMisses uint64 - prevHits uint64 hitMissRatio float64 speed float64 prevTime time.Time @@ -246,12 +301,12 @@ type stat23 struct { } func (s *stat23) print(aStats libstate.FilesStats, logger log.Logger) { - totalFiles := 0 - totalDatSize := 0 - totalIdxSize := 0 + totalFiles := aStats.FilesCount + totalDatSize := aStats.DataSize + totalIdxSize := aStats.IdxSize logger.Info("Progress", "block", s.blockNum, "blk/s", s.speed, "state files", totalFiles, - "total dat", libcommon.ByteCount(uint64(totalDatSize)), "total idx", libcommon.ByteCount(uint64(totalIdxSize)), + "total dat", libcommon.ByteCount(totalDatSize), "total idx", libcommon.ByteCount(totalIdxSize), "hit ratio", s.hitMissRatio, "hits+misses", s.hits+s.misses, "alloc", libcommon.ByteCount(s.mem.Alloc), "sys", libcommon.ByteCount(s.mem.Sys), ) @@ -305,24 +360,28 @@ func processBlock23(startTxNum uint64, trace bool, txNumStart uint64, rw *Reader txNum++ // Pre-block transaction ww.w.SetTxNum(txNum) + if err := ww.w.FinishTx(); err != nil { + return 0, nil, fmt.Errorf("finish pre-block tx %d (block %d) has failed: %w", txNum, block.NumberU64(), err) + } + getHashFn := core.GetHashFn(header, getHeader) for i, tx := range block.Transactions() { if txNum >= startTxNum { ibs := state.New(rw) ibs.Prepare(tx.Hash(), block.Hash(), i) - ct := NewCallTracer() + ct := exec3.NewCallTracer() vmConfig.Tracer = ct - receipt, _, err := core.ApplyTransaction(chainConfig, getHashFn, engine, nil, gp, ibs, ww, header, tx, usedGas, vmConfig, nil) + receipt, _, err := core.ApplyTransaction(chainConfig, getHashFn, engine, nil, gp, ibs, ww, header, tx, usedGas, vmConfig) if err != nil { return 0, nil, fmt.Errorf("could not apply tx %d [%x] failed: %w", i, tx.Hash(), err) } - for from := range ct.froms { + for from := range ct.Froms() { if err := ww.w.AddTraceFrom(from[:]); err != nil { return 0, nil, err } } - for to := range ct.tos { + for to := range ct.Tos() { if err := ww.w.AddTraceTo(to[:]); err != nil { return 0, nil, err } @@ -358,7 +417,6 @@ func processBlock23(startTxNum uint64, trace bool, txNumStart uint64, rw *Reader } } } - if txNum >= startTxNum { ibs := state.New(rw) if err := ww.w.AddTraceTo(block.Coinbase().Bytes()); err != nil { @@ -379,6 +437,16 @@ func processBlock23(startTxNum uint64, trace bool, txNumStart uint64, rw *Reader return 0, nil, fmt.Errorf("committing block %d failed: %w", block.NumberU64(), err) } + if commitments && block.Number().Uint64()%uint64(commitmentFrequency) == 0 { + rootHash, err := ww.w.ComputeCommitment(true, trace) + if err != nil { + return 0, nil, err + } + if !bytes.Equal(rootHash, header.Root[:]) { + return 0, nil, fmt.Errorf("invalid root hash for block %d: expected %x got %x", block.NumberU64(), header.Root, rootHash) + } + } + if err := ww.w.FinishTx(); err != nil { return 0, nil, fmt.Errorf("failed to finish tx: %w", err) } @@ -389,6 +457,9 @@ func processBlock23(startTxNum uint64, trace bool, txNumStart uint64, rw *Reader txNum++ // Post-block transaction ww.w.SetTxNum(txNum) + if err := ww.w.FinishTx(); err != nil { + return 0, nil, fmt.Errorf("finish after-block tx %d (block %d) has failed: %w", txNum, block.NumberU64(), err) + } return txNum, receipts, nil } @@ -413,7 +484,6 @@ func (rw *ReaderWrapper23) ReadAccountData(address common.Address) (*accounts.Ac if len(enc) == 0 { return nil, nil } - var a accounts.Account a.Reset() pos := 0 @@ -489,6 +559,7 @@ func (ww *WriterWrapper23) UpdateAccountData(address common.Address, original, a } value := make([]byte, l) pos := 0 + if account.Nonce == 0 { value[pos] = 0 pos++ diff --git a/cmd/state/commands/global_flags_vars.go b/cmd/state/commands/global_flags_vars.go index 547f1b8a2d6..1495dda36a6 100644 --- a/cmd/state/commands/global_flags_vars.go +++ b/cmd/state/commands/global_flags_vars.go @@ -1,10 +1,11 @@ package commands import ( + "github.com/spf13/cobra" + "github.com/ledgerwatch/erigon-lib/kv" "github.com/ledgerwatch/erigon/cmd/utils" "github.com/ledgerwatch/erigon/common/paths" - "github.com/spf13/cobra" ) var ( @@ -16,6 +17,7 @@ var ( indexBucket string snapshotsCli bool chain string + logdir string ) func must(err error) { @@ -56,3 +58,7 @@ func withSnapshotBlocks(cmd *cobra.Command) { func withChain(cmd *cobra.Command) { cmd.Flags().StringVar(&chain, "chain", "", "pick a chain to assume (mainnet, ropsten, etc.)") } + +func withLogPath(cmd *cobra.Command) { + cmd.Flags().StringVar(&logdir, "log-dir", "/var/lib/erigon", "path to write user and error logs to") +} diff --git a/cmd/state/commands/history2.go b/cmd/state/commands/history2.go index 0aa8b07ffee..3fe6ba465e4 100644 --- a/cmd/state/commands/history2.go +++ b/cmd/state/commands/history2.go @@ -157,7 +157,7 @@ func runHistory2(trace bool, blockNum, txNumStart uint64, hw *HistoryWrapper, ww daoBlock = false } ibs.Prepare(tx.Hash(), block.Hash(), i) - receipt, _, err := core.ApplyTransaction(chainConfig, core.GetHashFn(header, getHeader), engine, nil, gp, ibs, ww, header, tx, usedGas, vmConfig, nil) + receipt, _, err := core.ApplyTransaction(chainConfig, core.GetHashFn(header, getHeader), engine, nil, gp, ibs, ww, header, tx, usedGas, vmConfig) if err != nil { return 0, nil, fmt.Errorf("could not apply tx %d [%x] failed: %w", i, tx.Hash(), err) } diff --git a/cmd/state/commands/history22.go b/cmd/state/commands/history22.go index fce22f2a717..fe6bb11d753 100644 --- a/cmd/state/commands/history22.go +++ b/cmd/state/commands/history22.go @@ -25,6 +25,7 @@ import ( "github.com/ledgerwatch/erigon/core/types" "github.com/ledgerwatch/erigon/core/vm" "github.com/ledgerwatch/erigon/eth/ethconfig" + datadir2 "github.com/ledgerwatch/erigon/node/nodecfg/datadir" "github.com/ledgerwatch/erigon/params" "github.com/ledgerwatch/erigon/turbo/services" "github.com/ledgerwatch/erigon/turbo/snapshotsync" @@ -58,7 +59,8 @@ func History22(genesis *core.Genesis, logger log.Logger) error { <-sigs interruptCh <- true }() - historyDb, err := kv2.NewMDBX(logger).Path(path.Join(datadir, "chaindata")).Open() + dirs := datadir2.New(datadir) + historyDb, err := kv2.NewMDBX(logger).Path(dirs.Chaindata).Open() if err != nil { return fmt.Errorf("opening chaindata as read only: %v", err) } @@ -70,7 +72,7 @@ func History22(genesis *core.Genesis, logger log.Logger) error { } defer historyTx.Rollback() aggPath := filepath.Join(datadir, "erigon23") - h, err := libstate.NewAggregator(aggPath, AggregationStep) + h, err := libstate.NewAggregator(aggPath, ethconfig.HistoryV3AggregationStep) if err != nil { return fmt.Errorf("create history: %w", err) } @@ -103,7 +105,7 @@ func History22(genesis *core.Genesis, logger log.Logger) error { return err } } - ri, err := libstate.NewReadIndices(readPath, AggregationStep) + ri, err := libstate.NewReadIndices(readPath, ethconfig.HistoryV3AggregationStep) if err != nil { return fmt.Errorf("create read indices: %w", err) } @@ -136,7 +138,7 @@ func History22(genesis *core.Genesis, logger log.Logger) error { return fmt.Errorf("reopen snapshot segments: %w", err) } blockReader = snapshotsync.NewBlockReaderWithSnapshots(allSnapshots) - readWrapper := state.NewHistoryReader22(h.MakeContext(), ri) + readWrapper := state.NewHistoryReader23(h.MakeContext(), ri) for !interrupt { select { @@ -218,7 +220,7 @@ func History22(genesis *core.Genesis, logger log.Logger) error { return nil } -func runHistory22(trace bool, blockNum, txNumStart uint64, hw *state.HistoryReader22, ww state.StateWriter, chainConfig *params.ChainConfig, getHeader func(hash common.Hash, number uint64) *types.Header, block *types.Block, vmConfig vm.Config) (uint64, types.Receipts, error) { +func runHistory22(trace bool, blockNum, txNumStart uint64, hw *state.HistoryReader23, ww state.StateWriter, chainConfig *params.ChainConfig, getHeader func(hash common.Hash, number uint64) *types.Header, block *types.Block, vmConfig vm.Config) (uint64, types.Receipts, error) { header := block.Header() vmConfig.TraceJumpDest = true engine := ethash.NewFullFaker() @@ -244,7 +246,7 @@ func runHistory22(trace bool, blockNum, txNumStart uint64, hw *state.HistoryRead hw.SetTxNum(txNum) ibs := state.New(hw) ibs.Prepare(tx.Hash(), block.Hash(), i) - receipt, _, err := core.ApplyTransaction(chainConfig, core.GetHashFn(header, getHeader), engine, nil, gp, ibs, ww, header, tx, usedGas, vmConfig, nil) + receipt, _, err := core.ApplyTransaction(chainConfig, core.GetHashFn(header, getHeader), engine, nil, gp, ibs, ww, header, tx, usedGas, vmConfig) if err != nil { return 0, nil, fmt.Errorf("could not apply tx %d [%x] failed: %w", i, tx.Hash(), err) } diff --git a/cmd/state/commands/opcode_tracer.go b/cmd/state/commands/opcode_tracer.go index ee6a5d86102..a944be4153b 100644 --- a/cmd/state/commands/opcode_tracer.go +++ b/cmd/state/commands/opcode_tracer.go @@ -29,7 +29,6 @@ import ( "github.com/ledgerwatch/erigon/core/systemcontracts" "github.com/ledgerwatch/erigon/core/types" "github.com/ledgerwatch/erigon/core/vm" - "github.com/ledgerwatch/erigon/ethdb" "github.com/ledgerwatch/erigon/params" ) @@ -57,8 +56,8 @@ var opcodeTracerCmd = &cobra.Command{ }, } -//const MaxUint = ^uint(0) -//const MaxUint64 = ^uint64(0) +// const MaxUint = ^uint(0) +// const MaxUint64 = ^uint64(0) const MaxUint16 = ^uint16(0) type opcode struct { @@ -559,8 +558,7 @@ func OpcodeTracer(genesis *core.Genesis, blockNum uint64, chaindata string, numB intraBlockState.SetTracer(ot) getHeader := func(hash common.Hash, number uint64) *types.Header { return rawdb.ReadHeader(historyTx, hash, number) } - contractHasTEVM := ethdb.GetHasTEVM(historyTx) - receipts, err1 := runBlock(ethash.NewFullFaker(), intraBlockState, noOpWriter, noOpWriter, chainConfig, getHeader, contractHasTEVM, block, vmConfig, false) + receipts, err1 := runBlock(ethash.NewFullFaker(), intraBlockState, noOpWriter, noOpWriter, chainConfig, getHeader, block, vmConfig, false) if err1 != nil { return err1 } @@ -671,7 +669,7 @@ func OpcodeTracer(genesis *core.Genesis, blockNum uint64, chaindata string, numB } func runBlock(engine consensus.Engine, ibs *state.IntraBlockState, txnWriter state.StateWriter, blockWriter state.StateWriter, - chainConfig *params.ChainConfig, getHeader func(hash common.Hash, number uint64) *types.Header, contractHasTEVM func(common.Hash) (bool, error), block *types.Block, vmConfig vm.Config, trace bool) (types.Receipts, error) { + chainConfig *params.ChainConfig, getHeader func(hash common.Hash, number uint64) *types.Header, block *types.Block, vmConfig vm.Config, trace bool) (types.Receipts, error) { header := block.Header() vmConfig.TraceJumpDest = true gp := new(core.GasPool).AddGas(block.GasLimit()) @@ -684,7 +682,7 @@ func runBlock(engine consensus.Engine, ibs *state.IntraBlockState, txnWriter sta rules := chainConfig.Rules(block.NumberU64()) for i, tx := range block.Transactions() { ibs.Prepare(tx.Hash(), block.Hash(), i) - receipt, _, err := core.ApplyTransaction(chainConfig, core.GetHashFn(header, getHeader), engine, nil, gp, ibs, txnWriter, header, tx, usedGas, vmConfig, contractHasTEVM) + receipt, _, err := core.ApplyTransaction(chainConfig, core.GetHashFn(header, getHeader), engine, nil, gp, ibs, txnWriter, header, tx, usedGas, vmConfig) if err != nil { return nil, fmt.Errorf("could not apply tx %d [%x] failed: %w", i, tx.Hash(), err) } diff --git a/cmd/state/commands/replay_tx.go b/cmd/state/commands/replay_tx.go index 417f5b71bfd..3ad2ab1371c 100644 --- a/cmd/state/commands/replay_tx.go +++ b/cmd/state/commands/replay_tx.go @@ -4,7 +4,6 @@ import ( "context" "fmt" "path" - "path/filepath" "sort" "github.com/ledgerwatch/erigon-lib/kv/memdb" @@ -15,6 +14,7 @@ import ( "github.com/ledgerwatch/erigon/core/types" "github.com/ledgerwatch/erigon/core/vm" "github.com/ledgerwatch/erigon/eth/ethconfig" + datadir2 "github.com/ledgerwatch/erigon/node/nodecfg/datadir" "github.com/ledgerwatch/erigon/turbo/services" "github.com/ledgerwatch/erigon/turbo/snapshotsync" "github.com/ledgerwatch/log/v3" @@ -41,8 +41,7 @@ var replayTxCmd = &cobra.Command{ func ReplayTx(genesis *core.Genesis) error { var blockReader services.FullBlockReader - var allSnapshots *snapshotsync.RoSnapshots - allSnapshots = snapshotsync.NewRoSnapshots(ethconfig.NewSnapCfg(true, true, true), path.Join(datadir, "snapshots")) + var allSnapshots = snapshotsync.NewRoSnapshots(ethconfig.NewSnapCfg(true, true, true), path.Join(datadir, "snapshots")) defer allSnapshots.Close() if err := allSnapshots.ReopenFolder(); err != nil { return fmt.Errorf("reopen snapshot segments: %w", err) @@ -52,8 +51,9 @@ func ReplayTx(genesis *core.Genesis) error { txNums := make([]uint64, allSnapshots.BlocksAvailable()+1) if err := allSnapshots.Bodies.View(func(bs []*snapshotsync.BodySegment) error { for _, b := range bs { - if err := b.Iterate(func(blockNum, baseTxNum, txAmount uint64) { + if err := b.Iterate(func(blockNum, baseTxNum, txAmount uint64) error { txNums[blockNum] = baseTxNum + txAmount + return nil }); err != nil { return err } @@ -103,14 +103,18 @@ func ReplayTx(genesis *core.Genesis) error { txNum = txnum } fmt.Printf("txNum = %d\n", txNum) - aggPath := filepath.Join(datadir, "agg22") - agg, err := libstate.NewAggregator22(aggPath, AggregationStep) + dirs := datadir2.New(datadir) + agg, err := libstate.NewAggregator22(dirs.SnapHistory, ethconfig.HistoryV3AggregationStep) + if err != nil { + return fmt.Errorf("create history: %w", err) + } + err = agg.ReopenFiles() if err != nil { return fmt.Errorf("create history: %w", err) } defer agg.Close() ac := agg.MakeContext() - workCh := make(chan state.TxTask) + workCh := make(chan *state.TxTask) rs := state.NewReconState(workCh) if err = replayTxNum(ctx, allSnapshots, blockReader, txNum, txNums, rs, ac); err != nil { return err @@ -149,7 +153,6 @@ func replayTxNum(ctx context.Context, allSnapshots *snapshotsync.RoSnapshots, bl gp := new(core.GasPool).AddGas(txn.GetGas()) //fmt.Printf("txNum=%d, blockNum=%d, txIndex=%d, gas=%d, input=[%x]\n", txNum, blockNum, txIndex, txn.GetGas(), txn.GetData()) vmConfig := vm.Config{NoReceipts: true, SkipAnalysis: core.SkipAnalysis(chainConfig, bn)} - contractHasTEVM := func(contractHash common.Hash) (bool, error) { return false, nil } getHeader := func(hash common.Hash, number uint64) *types.Header { h, err := blockReader.Header(ctx, nil, hash, number) if err != nil { @@ -161,7 +164,7 @@ func replayTxNum(ctx context.Context, allSnapshots *snapshotsync.RoSnapshots, bl logger := log.New() engine := initConsensusEngine(chainConfig, logger, allSnapshots) txnHash := txn.Hash() - blockContext := core.NewEVMBlockContext(header, getHashFn, engine, nil /* author */, contractHasTEVM) + blockContext := core.NewEVMBlockContext(header, getHashFn, engine, nil /* author */) ibs.Prepare(txnHash, blockHash, txIndex) msg, err := txn.AsMessage(*types.MakeSigner(chainConfig, bn), header.BaseFee, rules) if err != nil { diff --git a/cmd/state/commands/state_recon.go b/cmd/state/commands/state_recon.go deleted file mode 100644 index 39df9d6fdf2..00000000000 --- a/cmd/state/commands/state_recon.go +++ /dev/null @@ -1,864 +0,0 @@ -package commands - -import ( - "context" - "errors" - "fmt" - "math/big" - "os" - "os/signal" - "path" - "path/filepath" - "runtime" - "sort" - "sync" - "sync/atomic" - "syscall" - "time" - - "github.com/RoaringBitmap/roaring/roaring64" - libcommon "github.com/ledgerwatch/erigon-lib/common" - "github.com/ledgerwatch/erigon-lib/etl" - "github.com/ledgerwatch/erigon-lib/kv" - kv2 "github.com/ledgerwatch/erigon-lib/kv/mdbx" - libstate "github.com/ledgerwatch/erigon-lib/state" - "github.com/ledgerwatch/erigon/common" - "github.com/ledgerwatch/erigon/common/dbutils" - "github.com/ledgerwatch/erigon/consensus" - "github.com/ledgerwatch/erigon/consensus/misc" - "github.com/ledgerwatch/erigon/core" - "github.com/ledgerwatch/erigon/core/state" - "github.com/ledgerwatch/erigon/core/types" - "github.com/ledgerwatch/erigon/core/types/accounts" - "github.com/ledgerwatch/erigon/core/vm" - "github.com/ledgerwatch/erigon/eth/ethconfig" - "github.com/ledgerwatch/erigon/eth/stagedsync" - "github.com/ledgerwatch/erigon/params" - "github.com/ledgerwatch/erigon/turbo/services" - "github.com/ledgerwatch/erigon/turbo/snapshotsync" - "github.com/ledgerwatch/log/v3" - "github.com/spf13/cobra" -) - -func init() { - withBlock(reconCmd) - withDataDir(reconCmd) - rootCmd.AddCommand(reconCmd) -} - -var reconCmd = &cobra.Command{ - Use: "recon", - Short: "Exerimental command to reconstitute the state from state history at given block", - RunE: func(cmd *cobra.Command, args []string) error { - logger := log.New() - return Recon(genesis, logger) - }, -} - -type ReconWorker struct { - lock sync.Locker - wg *sync.WaitGroup - rs *state.ReconState - blockReader services.FullBlockReader - allSnapshots *snapshotsync.RoSnapshots - stateWriter *state.StateReconWriter - stateReader *state.HistoryReaderNoState - getHeader func(hash common.Hash, number uint64) *types.Header - ctx context.Context - engine consensus.Engine - chainConfig *params.ChainConfig - logger log.Logger - genesis *core.Genesis -} - -func NewReconWorker(lock sync.Locker, wg *sync.WaitGroup, rs *state.ReconState, - a *libstate.Aggregator22, blockReader services.FullBlockReader, allSnapshots *snapshotsync.RoSnapshots, - chainConfig *params.ChainConfig, logger log.Logger, genesis *core.Genesis, -) *ReconWorker { - ac := a.MakeContext() - return &ReconWorker{ - lock: lock, - wg: wg, - rs: rs, - blockReader: blockReader, - allSnapshots: allSnapshots, - ctx: context.Background(), - stateWriter: state.NewStateReconWriter(ac, rs), - stateReader: state.NewHistoryReaderNoState(ac, rs), - chainConfig: chainConfig, - logger: logger, - genesis: genesis, - } -} - -func (rw *ReconWorker) SetTx(tx kv.Tx) { - rw.stateReader.SetTx(tx) -} - -func (rw *ReconWorker) run() { - defer rw.wg.Done() - rw.getHeader = func(hash common.Hash, number uint64) *types.Header { - h, err := rw.blockReader.Header(rw.ctx, nil, hash, number) - if err != nil { - panic(err) - } - return h - } - rw.engine = initConsensusEngine(rw.chainConfig, rw.logger, rw.allSnapshots) - for txTask, ok := rw.rs.Schedule(); ok; txTask, ok = rw.rs.Schedule() { - rw.runTxTask(txTask) - } -} - -func (rw *ReconWorker) runTxTask(txTask state.TxTask) { - rw.lock.Lock() - defer rw.lock.Unlock() - rw.stateReader.SetTxNum(txTask.TxNum) - rw.stateReader.ResetError() - rw.stateWriter.SetTxNum(txTask.TxNum) - noop := state.NewNoopWriter() - rules := rw.chainConfig.Rules(txTask.BlockNum) - ibs := state.New(rw.stateReader) - daoForkTx := rw.chainConfig.DAOForkSupport && rw.chainConfig.DAOForkBlock != nil && rw.chainConfig.DAOForkBlock.Uint64() == txTask.BlockNum && txTask.TxIndex == -1 - var err error - if txTask.BlockNum == 0 && txTask.TxIndex == -1 { - //fmt.Printf("txNum=%d, blockNum=%d, Genesis\n", txTask.TxNum, txTask.BlockNum) - // Genesis block - _, ibs, err = rw.genesis.ToBlock() - if err != nil { - panic(err) - } - } else if daoForkTx { - //fmt.Printf("txNum=%d, blockNum=%d, DAO fork\n", txNum, blockNum) - misc.ApplyDAOHardFork(ibs) - ibs.SoftFinalise() - } else if txTask.Final { - if txTask.BlockNum > 0 { - //fmt.Printf("txNum=%d, blockNum=%d, finalisation of the block\n", txNum, blockNum) - // End of block transaction in a block - if _, _, err := rw.engine.Finalize(rw.chainConfig, txTask.Header, ibs, txTask.Block.Transactions(), txTask.Block.Uncles(), nil /* receipts */, nil, nil, nil); err != nil { - panic(fmt.Errorf("finalize of block %d failed: %w", txTask.BlockNum, err)) - } - } - } else if txTask.TxIndex == -1 { - // Block initialisation - } else { - txHash := txTask.Tx.Hash() - gp := new(core.GasPool).AddGas(txTask.Tx.GetGas()) - vmConfig := vm.Config{NoReceipts: true, SkipAnalysis: core.SkipAnalysis(rw.chainConfig, txTask.BlockNum)} - contractHasTEVM := func(contractHash common.Hash) (bool, error) { return false, nil } - getHashFn := core.GetHashFn(txTask.Header, rw.getHeader) - blockContext := core.NewEVMBlockContext(txTask.Header, getHashFn, rw.engine, nil /* author */, contractHasTEVM) - ibs.Prepare(txHash, txTask.BlockHash, txTask.TxIndex) - msg, err := txTask.Tx.AsMessage(*types.MakeSigner(rw.chainConfig, txTask.BlockNum), txTask.Header.BaseFee, rules) - if err != nil { - panic(err) - } - txContext := core.NewEVMTxContext(msg) - vmenv := vm.NewEVM(blockContext, txContext, ibs, rw.chainConfig, vmConfig) - //fmt.Printf("txNum=%d, blockNum=%d, txIndex=%d, evm=%p\n", txTask.TxNum, txTask.BlockNum, txTask.TxIndex, vmenv) - _, err = core.ApplyMessage(vmenv, msg, gp, true /* refunds */, false /* gasBailout */) - if err != nil { - panic(fmt.Errorf("could not apply tx %d [%x] failed: %w", txTask.TxIndex, txHash, err)) - } - if err = ibs.FinalizeTx(rules, noop); err != nil { - panic(err) - } - } - if dependency, ok := rw.stateReader.ReadError(); ok { - //fmt.Printf("rollback %d\n", txNum) - rw.rs.RollbackTx(txTask, dependency) - } else { - if err = ibs.CommitBlock(rules, rw.stateWriter); err != nil { - panic(err) - } - //fmt.Printf("commit %d\n", txNum) - rw.rs.CommitTxNum(txTask.TxNum) - } -} - -type FillWorker struct { - txNum uint64 - doneCount *uint64 - ac *libstate.Aggregator22Context - fromKey, toKey []byte - currentKey []byte - bitmap roaring64.Bitmap - total uint64 - progress uint64 -} - -func NewFillWorker(txNum uint64, doneCount *uint64, a *libstate.Aggregator22, fromKey, toKey []byte) *FillWorker { - fw := &FillWorker{ - txNum: txNum, - doneCount: doneCount, - ac: a.MakeContext(), - fromKey: fromKey, - toKey: toKey, - } - return fw -} - -func (fw *FillWorker) Total() uint64 { - return atomic.LoadUint64(&fw.total) -} - -func (fw *FillWorker) Progress() uint64 { - return atomic.LoadUint64(&fw.progress) -} - -func (fw *FillWorker) fillAccounts(plainStateCollector *etl.Collector) { - defer func() { - atomic.AddUint64(fw.doneCount, 1) - }() - it := fw.ac.IterateAccountsHistory(fw.fromKey, fw.toKey, fw.txNum) - atomic.StoreUint64(&fw.total, it.Total()) - for it.HasNext() { - key, val, progress := it.Next() - atomic.StoreUint64(&fw.progress, progress) - fw.currentKey = key - if len(val) > 0 { - var a accounts.Account - a.Reset() - pos := 0 - nonceBytes := int(val[pos]) - pos++ - if nonceBytes > 0 { - a.Nonce = bytesToUint64(val[pos : pos+nonceBytes]) - pos += nonceBytes - } - balanceBytes := int(val[pos]) - pos++ - if balanceBytes > 0 { - a.Balance.SetBytes(val[pos : pos+balanceBytes]) - pos += balanceBytes - } - codeHashBytes := int(val[pos]) - pos++ - if codeHashBytes > 0 { - copy(a.CodeHash[:], val[pos:pos+codeHashBytes]) - pos += codeHashBytes - } - incBytes := int(val[pos]) - pos++ - if incBytes > 0 { - a.Incarnation = bytesToUint64(val[pos : pos+incBytes]) - } - value := make([]byte, a.EncodingLengthForStorage()) - a.EncodeForStorage(value) - if err := plainStateCollector.Collect(key, value); err != nil { - panic(err) - } - //fmt.Printf("Account [%x]=>{Balance: %d, Nonce: %d, Root: %x, CodeHash: %x}\n", key, &a.Balance, a.Nonce, a.Root, a.CodeHash) - } - } -} - -func (fw *FillWorker) fillStorage(plainStateCollector *etl.Collector) { - defer func() { - atomic.AddUint64(fw.doneCount, 1) - }() - it := fw.ac.IterateStorageHistory(fw.fromKey, fw.toKey, fw.txNum) - atomic.StoreUint64(&fw.total, it.Total()) - for it.HasNext() { - key, val, progress := it.Next() - atomic.StoreUint64(&fw.progress, progress) - fw.currentKey = key - compositeKey := dbutils.PlainGenerateCompositeStorageKey(key[:20], state.FirstContractIncarnation, key[20:]) - if len(val) > 0 { - if err := plainStateCollector.Collect(compositeKey, val); err != nil { - panic(err) - } - //fmt.Printf("Storage [%x] => [%x]\n", compositeKey, val) - } - } -} - -func (fw *FillWorker) fillCode(codeCollector, plainContractCollector *etl.Collector) { - defer func() { - atomic.AddUint64(fw.doneCount, 1) - }() - it := fw.ac.IterateCodeHistory(fw.fromKey, fw.toKey, fw.txNum) - atomic.StoreUint64(&fw.total, it.Total()) - for it.HasNext() { - key, val, progress := it.Next() - atomic.StoreUint64(&fw.progress, progress) - fw.currentKey = key - compositeKey := dbutils.PlainGenerateStoragePrefix(key, state.FirstContractIncarnation) - if len(val) > 0 { - codeHash, err := common.HashData(val) - if err != nil { - panic(err) - } - if err = codeCollector.Collect(codeHash[:], val); err != nil { - panic(err) - } - if err = plainContractCollector.Collect(compositeKey, codeHash[:]); err != nil { - panic(err) - } - //fmt.Printf("Code [%x] => [%x]\n", compositeKey, val) - } - } -} - -func (fw *FillWorker) ResetProgress() { - fw.total = 0 - fw.progress = 0 -} - -func (fw *FillWorker) bitmapAccounts() { - defer func() { - atomic.AddUint64(fw.doneCount, 1) - }() - it := fw.ac.IterateAccountsReconTxs(fw.fromKey, fw.toKey, fw.txNum) - atomic.StoreUint64(&fw.total, it.Total()) - for it.HasNext() { - txNum, progress := it.Next() - atomic.StoreUint64(&fw.progress, progress) - fw.bitmap.Add(txNum) - } -} - -func (fw *FillWorker) bitmapStorage() { - defer func() { - atomic.AddUint64(fw.doneCount, 1) - }() - it := fw.ac.IterateStorageReconTxs(fw.fromKey, fw.toKey, fw.txNum) - atomic.StoreUint64(&fw.total, it.Total()) - for it.HasNext() { - txNum, progress := it.Next() - atomic.StoreUint64(&fw.progress, progress) - fw.bitmap.Add(txNum) - } -} - -func (fw *FillWorker) bitmapCode() { - defer func() { - atomic.AddUint64(fw.doneCount, 1) - }() - it := fw.ac.IterateCodeReconTxs(fw.fromKey, fw.toKey, fw.txNum) - atomic.StoreUint64(&fw.total, it.Total()) - for it.HasNext() { - txNum, progress := it.Next() - atomic.StoreUint64(&fw.progress, progress) - fw.bitmap.Add(txNum) - } -} - -func Recon(genesis *core.Genesis, logger log.Logger) error { - sigs := make(chan os.Signal, 1) - interruptCh := make(chan bool, 1) - signal.Notify(sigs, syscall.SIGINT, syscall.SIGTERM) - - go func() { - <-sigs - interruptCh <- true - }() - ctx := context.Background() - aggPath := filepath.Join(datadir, "agg22") - agg, err := libstate.NewAggregator22(aggPath, AggregationStep) - if err != nil { - return fmt.Errorf("create history: %w", err) - } - defer agg.Close() - reconDbPath := path.Join(datadir, "recondb") - if _, err = os.Stat(reconDbPath); err != nil { - if !errors.Is(err, os.ErrNotExist) { - return err - } - } else if err = os.RemoveAll(reconDbPath); err != nil { - return err - } - startTime := time.Now() - db, err := kv2.NewMDBX(logger).Path(reconDbPath).WriteMap().Open() - if err != nil { - return err - } - var blockReader services.FullBlockReader - allSnapshots := snapshotsync.NewRoSnapshots(ethconfig.NewSnapCfg(true, false, true), path.Join(datadir, "snapshots")) - defer allSnapshots.Close() - if err := allSnapshots.ReopenFolder(); err != nil { - return fmt.Errorf("reopen snapshot segments: %w", err) - } - blockReader = snapshotsync.NewBlockReaderWithSnapshots(allSnapshots) - // Compute mapping blockNum -> last TxNum in that block - txNums := make([]uint64, allSnapshots.BlocksAvailable()+1) - if err = allSnapshots.Bodies.View(func(bs []*snapshotsync.BodySegment) error { - for _, b := range bs { - if err = b.Iterate(func(blockNum, baseTxNum, txAmount uint64) { - txNums[blockNum] = baseTxNum + txAmount - }); err != nil { - return err - } - } - return nil - }); err != nil { - return fmt.Errorf("build txNum => blockNum mapping: %w", err) - } - endTxNumMinimax := agg.EndTxNumMinimax() - fmt.Printf("Max txNum in files: %d\n", endTxNumMinimax) - blockNum := uint64(sort.Search(len(txNums), func(i int) bool { - return txNums[i] > endTxNumMinimax - })) - if blockNum == uint64(len(txNums)) { - return fmt.Errorf("mininmax txNum not found in snapshot blocks: %d", endTxNumMinimax) - } - if blockNum == 0 { - return fmt.Errorf("not enough transactions in the history data") - } - if block+1 > blockNum { - return fmt.Errorf("specified block %d which is higher than available %d", block, blockNum) - } - fmt.Printf("Max blockNum = %d\n", blockNum) - blockNum = block + 1 - txNum := txNums[blockNum-1] - fmt.Printf("Corresponding block num = %d, txNum = %d\n", blockNum, txNum) - workerCount := runtime.NumCPU() - var wg sync.WaitGroup - workCh := make(chan state.TxTask, 128) - rs := state.NewReconState(workCh) - var fromKey, toKey []byte - bigCount := big.NewInt(int64(workerCount)) - bigStep := big.NewInt(0x100000000) - bigStep.Div(bigStep, bigCount) - bigCurrent := big.NewInt(0) - fillWorkers := make([]*FillWorker, workerCount) - var doneCount uint64 - for i := 0; i < workerCount; i++ { - fromKey = toKey - if i == workerCount-1 { - toKey = nil - } else { - bigCurrent.Add(bigCurrent, bigStep) - toKey = make([]byte, 4) - bigCurrent.FillBytes(toKey) - } - //fmt.Printf("%d) Fill worker [%x] - [%x]\n", i, fromKey, toKey) - fillWorkers[i] = NewFillWorker(txNum, &doneCount, agg, fromKey, toKey) - } - logEvery := time.NewTicker(logInterval) - defer logEvery.Stop() - doneCount = 0 - for i := 0; i < workerCount; i++ { - fillWorkers[i].ResetProgress() - go fillWorkers[i].bitmapAccounts() - } - for atomic.LoadUint64(&doneCount) < uint64(workerCount) { - select { - case <-logEvery.C: - var m runtime.MemStats - libcommon.ReadMemStats(&m) - var p float64 - for i := 0; i < workerCount; i++ { - if total := fillWorkers[i].Total(); total > 0 { - p += float64(fillWorkers[i].Progress()) / float64(total) - } - } - p *= 100.0 - log.Info("Scan accounts history", "workers", workerCount, "progress", fmt.Sprintf("%.2f%%", p), - "alloc", libcommon.ByteCount(m.Alloc), "sys", libcommon.ByteCount(m.Sys), - ) - } - } - doneCount = 0 - for i := 0; i < workerCount; i++ { - fillWorkers[i].ResetProgress() - go fillWorkers[i].bitmapStorage() - } - for atomic.LoadUint64(&doneCount) < uint64(workerCount) { - select { - case <-logEvery.C: - var m runtime.MemStats - libcommon.ReadMemStats(&m) - var p float64 - for i := 0; i < workerCount; i++ { - if total := fillWorkers[i].Total(); total > 0 { - p += float64(fillWorkers[i].Progress()) / float64(total) - } - } - p *= 100.0 - log.Info("Scan storage history", "workers", workerCount, "progress", fmt.Sprintf("%.2f%%", p), - "alloc", libcommon.ByteCount(m.Alloc), "sys", libcommon.ByteCount(m.Sys), - ) - } - } - doneCount = 0 - for i := 0; i < workerCount; i++ { - fillWorkers[i].ResetProgress() - go fillWorkers[i].bitmapCode() - } - for atomic.LoadUint64(&doneCount) < uint64(workerCount) { - select { - case <-logEvery.C: - var m runtime.MemStats - libcommon.ReadMemStats(&m) - var p float64 - for i := 0; i < workerCount; i++ { - if total := fillWorkers[i].Total(); total > 0 { - p += float64(fillWorkers[i].Progress()) / float64(total) - } - } - p *= 100.0 - log.Info("Scan code history", "workers", workerCount, "progress", fmt.Sprintf("%.2f%%", p), - "alloc", libcommon.ByteCount(m.Alloc), "sys", libcommon.ByteCount(m.Sys), - ) - } - } - var bitmap roaring64.Bitmap - for i := 0; i < workerCount; i++ { - bitmap.Or(&fillWorkers[i].bitmap) - } - log.Info("Ready to replay", "transactions", bitmap.GetCardinality(), "out of", txNum) - var lock sync.RWMutex - reconWorkers := make([]*ReconWorker, workerCount) - roTxs := make([]kv.Tx, workerCount) - defer func() { - for i := 0; i < workerCount; i++ { - if roTxs[i] != nil { - roTxs[i].Rollback() - } - } - }() - for i := 0; i < workerCount; i++ { - roTxs[i], err = db.BeginRo(ctx) - if err != nil { - return err - } - } - for i := 0; i < workerCount; i++ { - reconWorkers[i] = NewReconWorker(lock.RLocker(), &wg, rs, agg, blockReader, allSnapshots, chainConfig, logger, genesis) - reconWorkers[i].SetTx(roTxs[i]) - } - wg.Add(workerCount) - count := uint64(0) - rollbackCount := uint64(0) - total := bitmap.GetCardinality() - for i := 0; i < workerCount; i++ { - go reconWorkers[i].run() - } - commitThreshold := uint64(256 * 1024 * 1024) - prevCount := uint64(0) - prevRollbackCount := uint64(0) - prevTime := time.Now() - reconDone := make(chan struct{}) - go func() { - for { - select { - case <-reconDone: - return - case <-logEvery.C: - var m runtime.MemStats - libcommon.ReadMemStats(&m) - sizeEstimate := rs.SizeEstimate() - count = rs.DoneCount() - rollbackCount = rs.RollbackCount() - currentTime := time.Now() - interval := currentTime.Sub(prevTime) - speedTx := float64(count-prevCount) / (float64(interval) / float64(time.Second)) - progress := 100.0 * float64(count) / float64(total) - var repeatRatio float64 - if count > prevCount { - repeatRatio = 100.0 * float64(rollbackCount-prevRollbackCount) / float64(count-prevCount) - } - prevTime = currentTime - prevCount = count - prevRollbackCount = rollbackCount - log.Info("State reconstitution", "workers", workerCount, "progress", fmt.Sprintf("%.2f%%", progress), "tx/s", fmt.Sprintf("%.1f", speedTx), "repeat ratio", fmt.Sprintf("%.2f%%", repeatRatio), "buffer", libcommon.ByteCount(sizeEstimate), - "alloc", libcommon.ByteCount(m.Alloc), "sys", libcommon.ByteCount(m.Sys), - ) - if sizeEstimate >= commitThreshold { - err := func() error { - lock.Lock() - defer lock.Unlock() - for i := 0; i < workerCount; i++ { - roTxs[i].Rollback() - } - rwTx, err := db.BeginRw(ctx) - if err != nil { - return err - } - if err = rs.Flush(rwTx); err != nil { - return err - } - if err = rwTx.Commit(); err != nil { - return err - } - for i := 0; i < workerCount; i++ { - if roTxs[i], err = db.BeginRo(ctx); err != nil { - return err - } - reconWorkers[i].SetTx(roTxs[i]) - } - return nil - }() - if err != nil { - panic(err) - } - } - } - } - }() - var inputTxNum uint64 - var header *types.Header - for bn := uint64(0); bn < blockNum; bn++ { - if header, err = blockReader.HeaderByNumber(ctx, nil, bn); err != nil { - panic(err) - } - blockHash := header.Hash() - b, _, err := blockReader.BlockWithSenders(ctx, nil, blockHash, bn) - if err != nil { - panic(err) - } - txs := b.Transactions() - for txIndex := -1; txIndex <= len(txs); txIndex++ { - if bitmap.Contains(inputTxNum) { - txTask := state.TxTask{ - Header: header, - BlockNum: bn, - Block: b, - TxNum: inputTxNum, - TxIndex: txIndex, - BlockHash: blockHash, - Final: txIndex == len(txs), - } - if txIndex >= 0 && txIndex < len(txs) { - txTask.Tx = txs[txIndex] - } - workCh <- txTask - } - inputTxNum++ - } - } - close(workCh) - wg.Wait() - reconDone <- struct{}{} // Complete logging and committing go-routine - for i := 0; i < workerCount; i++ { - roTxs[i].Rollback() - } - rwTx, err := db.BeginRw(ctx) - if err != nil { - return err - } - defer func() { - if rwTx != nil { - rwTx.Rollback() - } - }() - if err = rs.Flush(rwTx); err != nil { - return err - } - if err = rwTx.Commit(); err != nil { - return err - } - plainStateCollector := etl.NewCollector("recon plainState", datadir, etl.NewSortableBuffer(etl.BufferOptimalSize)) - defer plainStateCollector.Close() - codeCollector := etl.NewCollector("recon code", datadir, etl.NewOldestEntryBuffer(etl.BufferOptimalSize)) - defer codeCollector.Close() - plainContractCollector := etl.NewCollector("recon plainContract", datadir, etl.NewSortableBuffer(etl.BufferOptimalSize)) - defer plainContractCollector.Close() - roTx, err := db.BeginRo(ctx) - if err != nil { - return err - } - defer roTx.Rollback() - cursor, err := roTx.Cursor(kv.PlainStateR) - if err != nil { - return err - } - defer cursor.Close() - var k, v []byte - for k, v, err = cursor.First(); err == nil && k != nil; k, v, err = cursor.Next() { - if err = plainStateCollector.Collect(k[8:], v); err != nil { - return err - } - } - if err != nil { - return err - } - cursor.Close() - if cursor, err = roTx.Cursor(kv.CodeR); err != nil { - return err - } - defer cursor.Close() - for k, v, err = cursor.First(); err == nil && k != nil; k, v, err = cursor.Next() { - if err = codeCollector.Collect(k[8:], v); err != nil { - return err - } - } - if err != nil { - return err - } - cursor.Close() - if cursor, err = roTx.Cursor(kv.PlainContractR); err != nil { - return err - } - defer cursor.Close() - for k, v, err = cursor.First(); err == nil && k != nil; k, v, err = cursor.Next() { - if err = plainContractCollector.Collect(k[8:], v); err != nil { - return err - } - } - if err != nil { - return err - } - cursor.Close() - roTx.Rollback() - if rwTx, err = db.BeginRw(ctx); err != nil { - return err - } - if err = rwTx.ClearBucket(kv.PlainStateR); err != nil { - return err - } - if err = rwTx.ClearBucket(kv.CodeR); err != nil { - return err - } - if err = rwTx.ClearBucket(kv.PlainContractR); err != nil { - return err - } - if err = rwTx.Commit(); err != nil { - return err - } - plainStateCollectors := make([]*etl.Collector, workerCount) - codeCollectors := make([]*etl.Collector, workerCount) - plainContractCollectors := make([]*etl.Collector, workerCount) - for i := 0; i < workerCount; i++ { - plainStateCollectors[i] = etl.NewCollector(fmt.Sprintf("plainState %d", i), datadir, etl.NewSortableBuffer(etl.BufferOptimalSize)) - defer plainStateCollectors[i].Close() - codeCollectors[i] = etl.NewCollector(fmt.Sprintf("code %d", i), datadir, etl.NewSortableBuffer(etl.BufferOptimalSize)) - defer codeCollectors[i].Close() - plainContractCollectors[i] = etl.NewCollector(fmt.Sprintf("plainContract %d", i), datadir, etl.NewSortableBuffer(etl.BufferOptimalSize)) - defer plainContractCollectors[i].Close() - } - doneCount = 0 - for i := 0; i < workerCount; i++ { - fillWorkers[i].ResetProgress() - go fillWorkers[i].fillAccounts(plainStateCollectors[i]) - } - for atomic.LoadUint64(&doneCount) < uint64(workerCount) { - select { - case <-logEvery.C: - var m runtime.MemStats - libcommon.ReadMemStats(&m) - var p float64 - for i := 0; i < workerCount; i++ { - if total := fillWorkers[i].Total(); total > 0 { - p += float64(fillWorkers[i].Progress()) / float64(total) - } - } - p *= 100.0 - log.Info("Filling accounts", "workers", workerCount, "progress", fmt.Sprintf("%.2f%%", p), - "alloc", libcommon.ByteCount(m.Alloc), "sys", libcommon.ByteCount(m.Sys), - ) - } - } - doneCount = 0 - for i := 0; i < workerCount; i++ { - fillWorkers[i].ResetProgress() - go fillWorkers[i].fillStorage(plainStateCollectors[i]) - } - for atomic.LoadUint64(&doneCount) < uint64(workerCount) { - select { - case <-logEvery.C: - var m runtime.MemStats - libcommon.ReadMemStats(&m) - var p float64 - for i := 0; i < workerCount; i++ { - if total := fillWorkers[i].Total(); total > 0 { - p += float64(fillWorkers[i].Progress()) / float64(total) - } - } - p *= 100.0 - log.Info("Filling storage", "workers", workerCount, "progress", fmt.Sprintf("%.2f%%", p), - "alloc", libcommon.ByteCount(m.Alloc), "sys", libcommon.ByteCount(m.Sys), - ) - } - } - doneCount = 0 - for i := 0; i < workerCount; i++ { - fillWorkers[i].ResetProgress() - go fillWorkers[i].fillCode(codeCollectors[i], plainContractCollectors[i]) - } - for atomic.LoadUint64(&doneCount) < uint64(workerCount) { - select { - case <-logEvery.C: - var m runtime.MemStats - libcommon.ReadMemStats(&m) - var p float64 - for i := 0; i < workerCount; i++ { - if total := fillWorkers[i].Total(); total > 0 { - p += float64(fillWorkers[i].Progress()) / float64(total) - } - } - p *= 100.0 - log.Info("Filling code", "workers", workerCount, "progress", fmt.Sprintf("%.2f%%", p), - "alloc", libcommon.ByteCount(m.Alloc), "sys", libcommon.ByteCount(m.Sys), - ) - } - } - // Load all collections into the main collector - for i := 0; i < workerCount; i++ { - if err = plainStateCollectors[i].Load(nil, "", func(k, v []byte, table etl.CurrentTableReader, next etl.LoadNextFunc) error { - return plainStateCollector.Collect(k, v) - }, etl.TransformArgs{}); err != nil { - return err - } - plainStateCollectors[i].Close() - if err = codeCollectors[i].Load(nil, "", func(k, v []byte, table etl.CurrentTableReader, next etl.LoadNextFunc) error { - return codeCollector.Collect(k, v) - }, etl.TransformArgs{}); err != nil { - return err - } - codeCollectors[i].Close() - if err = plainContractCollectors[i].Load(nil, "", func(k, v []byte, table etl.CurrentTableReader, next etl.LoadNextFunc) error { - return plainContractCollector.Collect(k, v) - }, etl.TransformArgs{}); err != nil { - return err - } - plainContractCollectors[i].Close() - } - rwTx, err = db.BeginRw(ctx) - if err != nil { - return err - } - if err = plainStateCollector.Load(rwTx, kv.PlainState, etl.IdentityLoadFunc, etl.TransformArgs{}); err != nil { - return err - } - plainStateCollector.Close() - if err = codeCollector.Load(rwTx, kv.Code, etl.IdentityLoadFunc, etl.TransformArgs{}); err != nil { - return err - } - codeCollector.Close() - if err = plainContractCollector.Load(rwTx, kv.PlainContractCode, etl.IdentityLoadFunc, etl.TransformArgs{}); err != nil { - return err - } - plainContractCollector.Close() - if err = rwTx.Commit(); err != nil { - return err - } - if rwTx, err = db.BeginRw(ctx); err != nil { - return err - } - log.Info("Reconstitution complete", "duration", time.Since(startTime)) - log.Info("Computing hashed state") - tmpDir := filepath.Join(datadir, "tmp") - if err = stagedsync.PromoteHashedStateCleanly("recon", rwTx, stagedsync.StageHashStateCfg(db, tmpDir), ctx); err != nil { - return err - } - if err = rwTx.Commit(); err != nil { - return err - } - if rwTx, err = db.BeginRw(ctx); err != nil { - return err - } - var rootHash common.Hash - if rootHash, err = stagedsync.RegenerateIntermediateHashes("recon", rwTx, stagedsync.StageTrieCfg(db, false /* checkRoot */, false /* saveHashesToDB */, false /* badBlockHalt */, tmpDir, blockReader, nil /* HeaderDownload */), common.Hash{}, make(chan struct{}, 1)); err != nil { - return err - } - if err = rwTx.Commit(); err != nil { - return err - } - if rootHash != header.Root { - log.Error("Incorrect root hash", "expected", fmt.Sprintf("%x", header.Root)) - } - return nil -} diff --git a/cmd/state/commands/state_root.go b/cmd/state/commands/state_root.go index dc94011b8cd..4727d3045dc 100644 --- a/cmd/state/commands/state_root.go +++ b/cmd/state/commands/state_root.go @@ -6,7 +6,6 @@ import ( "fmt" "os" "os/signal" - "path" "path/filepath" "syscall" @@ -20,6 +19,7 @@ import ( "github.com/ledgerwatch/erigon/core/types" "github.com/ledgerwatch/erigon/core/vm" "github.com/ledgerwatch/erigon/eth/stagedsync" + datadir2 "github.com/ledgerwatch/erigon/node/nodecfg/datadir" "github.com/ledgerwatch/erigon/params" "github.com/ledgerwatch/erigon/turbo/trie" "github.com/ledgerwatch/log/v3" @@ -50,7 +50,8 @@ func StateRoot(genesis *core.Genesis, logger log.Logger, blockNum uint64, datadi <-sigs interruptCh <- true }() - historyDb, err := kv2.NewMDBX(logger).Path(path.Join(datadir, "chaindata")).Open() + dirs := datadir2.New(datadir) + historyDb, err := kv2.NewMDBX(logger).Path(dirs.Chaindata).Open() if err != nil { return err } @@ -131,7 +132,7 @@ func StateRoot(genesis *core.Genesis, logger log.Logger, blockNum uint64, datadi r := state.NewPlainStateReader(tx) intraBlockState := state.New(r) getHeader := func(hash common.Hash, number uint64) *types.Header { return rawdb.ReadHeader(historyTx, hash, number) } - if _, err = runBlock(ethash.NewFullFaker(), intraBlockState, noOpWriter, w, chainConfig, getHeader, nil, b, vmConfig, false); err != nil { + if _, err = runBlock(ethash.NewFullFaker(), intraBlockState, noOpWriter, w, chainConfig, getHeader, b, vmConfig, false); err != nil { return fmt.Errorf("block %d: %w", block, err) } if block+1 == blockNum { @@ -141,7 +142,7 @@ func StateRoot(genesis *core.Genesis, logger log.Logger, blockNum uint64, datadi if err = rwTx.ClearBucket(kv.HashedStorage); err != nil { return err } - if err = stagedsync.PromoteHashedStateCleanly("hashedstate", rwTx, stagedsync.StageHashStateCfg(nil, stateDbPath), ctx); err != nil { + if err = stagedsync.PromoteHashedStateCleanly("hashedstate", rwTx, stagedsync.StageHashStateCfg(nil, dirs, false, nil), ctx); err != nil { return err } var root common.Hash diff --git a/cmd/state/commands/calltracer22.go b/cmd/state/exec3/calltracer22.go similarity index 89% rename from cmd/state/commands/calltracer22.go rename to cmd/state/exec3/calltracer22.go index 09e58b3dbc2..ed1d28912dd 100644 --- a/cmd/state/commands/calltracer22.go +++ b/cmd/state/exec3/calltracer22.go @@ -1,4 +1,4 @@ -package commands +package exec3 import ( "math/big" @@ -19,6 +19,8 @@ func NewCallTracer() *CallTracer { tos: map[common.Address]struct{}{}, } } +func (ct *CallTracer) Froms() map[common.Address]struct{} { return ct.froms } +func (ct *CallTracer) Tos() map[common.Address]struct{} { return ct.tos } func (ct *CallTracer) CaptureStart(evm *vm.EVM, depth int, from common.Address, to common.Address, precompile bool, create bool, calltype vm.CallType, input []byte, gas uint64, value *big.Int, code []byte) { ct.froms[from] = struct{}{} diff --git a/cmd/state/exec3/state.go b/cmd/state/exec3/state.go new file mode 100644 index 00000000000..b7fd243712e --- /dev/null +++ b/cmd/state/exec3/state.go @@ -0,0 +1,314 @@ +package exec3 + +import ( + "context" + "math/big" + "sync" + + "github.com/ledgerwatch/erigon-lib/kv" + "github.com/ledgerwatch/erigon/common" + "github.com/ledgerwatch/erigon/consensus" + "github.com/ledgerwatch/erigon/consensus/misc" + "github.com/ledgerwatch/erigon/core" + "github.com/ledgerwatch/erigon/core/rawdb" + "github.com/ledgerwatch/erigon/core/state" + "github.com/ledgerwatch/erigon/core/systemcontracts" + "github.com/ledgerwatch/erigon/core/types" + "github.com/ledgerwatch/erigon/core/vm" + "github.com/ledgerwatch/erigon/params" + "github.com/ledgerwatch/erigon/turbo/services" + "github.com/ledgerwatch/log/v3" +) + +type Worker22 struct { + lock sync.Locker + wg *sync.WaitGroup + chainDb kv.RoDB + chainTx kv.Tx + background bool + blockReader services.FullBlockReader + rs *state.State22 + stateWriter *state.StateWriter22 + stateReader *state.StateReader22 + chainConfig *params.ChainConfig + getHeader func(hash common.Hash, number uint64) *types.Header + + ctx context.Context + engine consensus.Engine + logger log.Logger + genesis *core.Genesis + resultCh chan *state.TxTask + epoch EpochReader + chain ChainReader + isPoSA bool + posa consensus.PoSA +} + +func NewWorker22(lock sync.Locker, background bool, chainDb kv.RoDB, wg *sync.WaitGroup, rs *state.State22, blockReader services.FullBlockReader, chainConfig *params.ChainConfig, logger log.Logger, genesis *core.Genesis, resultCh chan *state.TxTask, engine consensus.Engine) *Worker22 { + ctx := context.Background() + w := &Worker22{ + lock: lock, + chainDb: chainDb, + wg: wg, + rs: rs, + background: background, + blockReader: blockReader, + stateWriter: state.NewStateWriter22(rs), + stateReader: state.NewStateReader22(rs), + chainConfig: chainConfig, + + ctx: ctx, + logger: logger, + genesis: genesis, + resultCh: resultCh, + engine: engine, + } + w.getHeader = func(hash common.Hash, number uint64) *types.Header { + h, err := blockReader.Header(ctx, w.chainTx, hash, number) + if err != nil { + panic(err) + } + return h + } + + w.posa, w.isPoSA = engine.(consensus.PoSA) + return w +} + +func (rw *Worker22) Tx() kv.Tx { return rw.chainTx } +func (rw *Worker22) ResetTx(chainTx kv.Tx) { + if rw.background && rw.chainTx != nil { + rw.chainTx.Rollback() + rw.chainTx = nil + } + if chainTx != nil { + rw.chainTx = chainTx + rw.stateReader.SetTx(rw.chainTx) + rw.epoch = EpochReader{tx: rw.chainTx} + rw.chain = ChainReader{config: rw.chainConfig, tx: rw.chainTx, blockReader: rw.blockReader} + } +} + +func (rw *Worker22) Run() { + defer rw.wg.Done() + for txTask, ok := rw.rs.Schedule(); ok; txTask, ok = rw.rs.Schedule() { + rw.RunTxTask(txTask) + rw.resultCh <- txTask // Needs to have outside of the lock + } +} + +func (rw *Worker22) RunTxTask(txTask *state.TxTask) { + rw.lock.Lock() + defer rw.lock.Unlock() + if rw.background && rw.chainTx == nil { + var err error + if rw.chainTx, err = rw.chainDb.BeginRo(rw.ctx); err != nil { + panic(err) + } + rw.stateReader.SetTx(rw.chainTx) + rw.epoch = EpochReader{tx: rw.chainTx} + rw.chain = ChainReader{config: rw.chainConfig, tx: rw.chainTx, blockReader: rw.blockReader} + } + txTask.Error = nil + rw.stateReader.SetTxNum(txTask.TxNum) + rw.stateWriter.SetTxNum(txTask.TxNum) + rw.stateReader.ResetReadSet() + rw.stateWriter.ResetWriteSet() + ibs := state.New(rw.stateReader) + rules := txTask.Rules + daoForkTx := rw.chainConfig.DAOForkSupport && rw.chainConfig.DAOForkBlock != nil && rw.chainConfig.DAOForkBlock.Uint64() == txTask.BlockNum && txTask.TxIndex == -1 + var err error + if txTask.BlockNum == 0 && txTask.TxIndex == -1 { + //fmt.Printf("txNum=%d, blockNum=%d, Genesis\n", txTask.TxNum, txTask.BlockNum) + // Genesis block + _, ibs, err = rw.genesis.ToBlock() + if err != nil { + panic(err) + } + // For Genesis, rules should be empty, so that empty accounts can be included + rules = ¶ms.Rules{} + } else if daoForkTx { + //fmt.Printf("txNum=%d, blockNum=%d, DAO fork\n", txTask.TxNum, txTask.BlockNum) + misc.ApplyDAOHardFork(ibs) + ibs.SoftFinalise() + } else if txTask.TxIndex == -1 { + // Block initialisation + //fmt.Printf("txNum=%d, blockNum=%d, initialisation of the block\n", txTask.TxNum, txTask.BlockNum) + if rw.isPoSA { + systemcontracts.UpgradeBuildInSystemContract(rw.chainConfig, txTask.Block.Number(), ibs) + } + syscall := func(contract common.Address, data []byte) ([]byte, error) { + return core.SysCallContract(contract, data, *rw.chainConfig, ibs, txTask.Block.Header(), rw.engine) + } + rw.engine.Initialize(rw.chainConfig, rw.chain, rw.epoch, txTask.Block.Header(), txTask.Block.Transactions(), txTask.Block.Uncles(), syscall) + } else if txTask.Final { + if txTask.BlockNum > 0 { + //fmt.Printf("txNum=%d, blockNum=%d, finalisation of the block\n", txTask.TxNum, txTask.BlockNum) + // End of block transaction in a block + syscall := func(contract common.Address, data []byte) ([]byte, error) { + return core.SysCallContract(contract, data, *rw.chainConfig, ibs, txTask.Block.Header(), rw.engine) + } + if _, _, err := rw.engine.Finalize(rw.chainConfig, txTask.Block.Header(), ibs, txTask.Block.Transactions(), txTask.Block.Uncles(), nil /* receipts */, rw.epoch, rw.chain, syscall); err != nil { + //fmt.Printf("error=%v\n", err) + txTask.Error = err + } else { + txTask.TraceTos = map[common.Address]struct{}{} + txTask.TraceTos[txTask.Block.Coinbase()] = struct{}{} + for _, uncle := range txTask.Block.Uncles() { + txTask.TraceTos[uncle.Coinbase] = struct{}{} + } + } + } + } else { + //fmt.Printf("txNum=%d, blockNum=%d, txIndex=%d\n", txTask.TxNum, txTask.BlockNum, txTask.TxIndex) + if rw.isPoSA { + if isSystemTx, err := rw.posa.IsSystemTransaction(txTask.Tx, txTask.Block.Header()); err != nil { + panic(err) + } else if isSystemTx { + //fmt.Printf("System tx\n") + return + } + } + txHash := txTask.Tx.Hash() + gp := new(core.GasPool).AddGas(txTask.Tx.GetGas()) + ct := NewCallTracer() + vmConfig := vm.Config{Debug: true, Tracer: ct, SkipAnalysis: core.SkipAnalysis(rw.chainConfig, txTask.BlockNum)} + ibs.Prepare(txHash, txTask.BlockHash, txTask.TxIndex) + getHashFn := core.GetHashFn(txTask.Block.Header(), rw.getHeader) + blockContext := core.NewEVMBlockContext(txTask.Block.Header(), getHashFn, rw.engine, nil /* author */) + msg := txTask.TxAsMessage + txContext := core.NewEVMTxContext(msg) + vmenv := vm.NewEVM(blockContext, txContext, ibs, rw.chainConfig, vmConfig) + if _, err = core.ApplyMessage(vmenv, msg, gp, true /* refunds */, false /* gasBailout */); err != nil { + txTask.Error = err + //fmt.Printf("error=%v\n", err) + } else { + // Update the state with pending changes + ibs.SoftFinalise() + txTask.Logs = ibs.GetLogs(txHash) + txTask.TraceFroms = ct.froms + txTask.TraceTos = ct.tos + } + } + // Prepare read set, write set and balanceIncrease set and send for serialisation + if txTask.Error == nil { + txTask.BalanceIncreaseSet = ibs.BalanceIncreaseSet() + //for addr, bal := range txTask.BalanceIncreaseSet { + // fmt.Printf("[%x]=>[%d]\n", addr, &bal) + //} + if err = ibs.MakeWriteSet(rules, rw.stateWriter); err != nil { + panic(err) + } + txTask.ReadLists = rw.stateReader.ReadSet() + txTask.WriteLists = rw.stateWriter.WriteSet() + txTask.AccountPrevs, txTask.AccountDels, txTask.StoragePrevs, txTask.CodePrevs = rw.stateWriter.PrevAndDels() + size := (20 + 32) * len(txTask.BalanceIncreaseSet) + for _, list := range txTask.ReadLists { + for _, b := range list.Keys { + size += len(b) + } + for _, b := range list.Vals { + size += len(b) + } + } + for _, list := range txTask.WriteLists { + for _, b := range list.Keys { + size += len(b) + } + for _, b := range list.Vals { + size += len(b) + } + } + txTask.ResultsSize = int64(size) + } +} + +type ChainReader struct { + config *params.ChainConfig + tx kv.Tx + blockReader services.FullBlockReader +} + +func NewChainReader(config *params.ChainConfig, tx kv.Tx, blockReader services.FullBlockReader) ChainReader { + return ChainReader{config: config, tx: tx, blockReader: blockReader} +} + +func (cr ChainReader) Config() *params.ChainConfig { return cr.config } +func (cr ChainReader) CurrentHeader() *types.Header { panic("") } +func (cr ChainReader) GetHeader(hash common.Hash, number uint64) *types.Header { + if cr.blockReader != nil { + h, _ := cr.blockReader.Header(context.Background(), cr.tx, hash, number) + return h + } + return rawdb.ReadHeader(cr.tx, hash, number) +} +func (cr ChainReader) GetHeaderByNumber(number uint64) *types.Header { + if cr.blockReader != nil { + h, _ := cr.blockReader.HeaderByNumber(context.Background(), cr.tx, number) + return h + } + return rawdb.ReadHeaderByNumber(cr.tx, number) + +} +func (cr ChainReader) GetHeaderByHash(hash common.Hash) *types.Header { + if cr.blockReader != nil { + number := rawdb.ReadHeaderNumber(cr.tx, hash) + if number == nil { + return nil + } + return cr.GetHeader(hash, *number) + } + h, _ := rawdb.ReadHeaderByHash(cr.tx, hash) + return h +} +func (cr ChainReader) GetTd(hash common.Hash, number uint64) *big.Int { + td, err := rawdb.ReadTd(cr.tx, hash, number) + if err != nil { + log.Error("ReadTd failed", "err", err) + return nil + } + return td +} + +type EpochReader struct { + tx kv.Tx +} + +func NewEpochReader(tx kv.Tx) EpochReader { return EpochReader{tx: tx} } + +func (cr EpochReader) GetEpoch(hash common.Hash, number uint64) ([]byte, error) { + return rawdb.ReadEpoch(cr.tx, number, hash) +} +func (cr EpochReader) PutEpoch(hash common.Hash, number uint64, proof []byte) error { + panic("") +} +func (cr EpochReader) GetPendingEpoch(hash common.Hash, number uint64) ([]byte, error) { + return rawdb.ReadPendingEpoch(cr.tx, number, hash) +} +func (cr EpochReader) PutPendingEpoch(hash common.Hash, number uint64, proof []byte) error { + panic("") +} +func (cr EpochReader) FindBeforeOrEqualNumber(number uint64) (blockNum uint64, blockHash common.Hash, transitionProof []byte, err error) { + return rawdb.FindEpochBeforeOrEqualNumber(cr.tx, number) +} + +func NewWorkersPool(lock sync.Locker, background bool, chainDb kv.RoDB, wg *sync.WaitGroup, rs *state.State22, blockReader services.FullBlockReader, chainConfig *params.ChainConfig, logger log.Logger, genesis *core.Genesis, engine consensus.Engine, workerCount int) (reconWorkers []*Worker22, resultCh chan *state.TxTask, clear func()) { + queueSize := workerCount * 64 + reconWorkers = make([]*Worker22, workerCount) + resultCh = make(chan *state.TxTask, queueSize) + for i := 0; i < workerCount; i++ { + reconWorkers[i] = NewWorker22(lock, background, chainDb, wg, rs, blockReader, chainConfig, logger, genesis, resultCh, engine) + } + clear = func() { + for _, w := range reconWorkers { + w.ResetTx(nil) + } + } + if workerCount > 1 { + wg.Add(workerCount) + for i := 0; i < workerCount; i++ { + go reconWorkers[i].Run() + } + } + return reconWorkers, resultCh, clear +} diff --git a/cmd/state/exec3/state_recon.go b/cmd/state/exec3/state_recon.go new file mode 100644 index 00000000000..d78a8a0cfa2 --- /dev/null +++ b/cmd/state/exec3/state_recon.go @@ -0,0 +1,379 @@ +package exec3 + +import ( + "context" + "encoding/binary" + "fmt" + "sync" + "sync/atomic" + + "github.com/RoaringBitmap/roaring/roaring64" + "github.com/ledgerwatch/erigon-lib/common/length" + "github.com/ledgerwatch/erigon-lib/etl" + "github.com/ledgerwatch/erigon-lib/kv" + "github.com/ledgerwatch/erigon-lib/state" + "github.com/ledgerwatch/erigon/common" + "github.com/ledgerwatch/erigon/consensus" + "github.com/ledgerwatch/erigon/consensus/misc" + "github.com/ledgerwatch/erigon/core" + state2 "github.com/ledgerwatch/erigon/core/state" + "github.com/ledgerwatch/erigon/core/systemcontracts" + "github.com/ledgerwatch/erigon/core/types" + "github.com/ledgerwatch/erigon/core/types/accounts" + "github.com/ledgerwatch/erigon/core/vm" + "github.com/ledgerwatch/erigon/params" + "github.com/ledgerwatch/erigon/turbo/services" + "github.com/ledgerwatch/log/v3" + atomic2 "go.uber.org/atomic" +) + +type FillWorker struct { + txNum uint64 + doneCount *atomic2.Uint64 + ac *state.Aggregator22Context + fromKey, toKey []byte + currentKey []byte + bitmap roaring64.Bitmap + total uint64 + progress uint64 +} + +func NewFillWorker(txNum uint64, doneCount *atomic2.Uint64, a *state.Aggregator22, fromKey, toKey []byte) *FillWorker { + fw := &FillWorker{ + txNum: txNum, + doneCount: doneCount, + ac: a.MakeContext(), + fromKey: fromKey, + toKey: toKey, + } + return fw +} + +func (fw *FillWorker) Total() uint64 { + return atomic.LoadUint64(&fw.total) +} +func (fw *FillWorker) Bitmap() *roaring64.Bitmap { return &fw.bitmap } + +func (fw *FillWorker) Progress() uint64 { + return atomic.LoadUint64(&fw.progress) +} + +func (fw *FillWorker) FillAccounts(plainStateCollector *etl.Collector) { + defer func() { + fw.doneCount.Add(1) + }() + it := fw.ac.IterateAccountsHistory(fw.fromKey, fw.toKey, fw.txNum) + atomic.StoreUint64(&fw.total, it.Total()) + value := make([]byte, 1024) + for it.HasNext() { + key, val, progress := it.Next() + atomic.StoreUint64(&fw.progress, progress) + fw.currentKey = key + if len(val) > 0 { + var a accounts.Account + a.Reset() + pos := 0 + nonceBytes := int(val[pos]) + pos++ + if nonceBytes > 0 { + a.Nonce = bytesToUint64(val[pos : pos+nonceBytes]) + pos += nonceBytes + } + balanceBytes := int(val[pos]) + pos++ + if balanceBytes > 0 { + a.Balance.SetBytes(val[pos : pos+balanceBytes]) + pos += balanceBytes + } + codeHashBytes := int(val[pos]) + pos++ + if codeHashBytes > 0 { + copy(a.CodeHash[:], val[pos:pos+codeHashBytes]) + pos += codeHashBytes + } + incBytes := int(val[pos]) + pos++ + if incBytes > 0 { + a.Incarnation = bytesToUint64(val[pos : pos+incBytes]) + } + if a.Incarnation > 0 { + a.Incarnation = state2.FirstContractIncarnation + } + value = value[:a.EncodingLengthForStorage()] + a.EncodeForStorage(value) + if err := plainStateCollector.Collect(key, value); err != nil { + panic(err) + } + //fmt.Printf("Account [%x]=>{Balance: %d, Nonce: %d, Root: %x, CodeHash: %x}\n", key, &a.Balance, a.Nonce, a.Root, a.CodeHash) + } + } +} + +func (fw *FillWorker) FillStorage(plainStateCollector *etl.Collector) { + defer func() { + fw.doneCount.Add(1) + }() + it := fw.ac.IterateStorageHistory(fw.fromKey, fw.toKey, fw.txNum) + atomic.StoreUint64(&fw.total, it.Total()) + var compositeKey = make([]byte, length.Addr+length.Incarnation+length.Hash) + for it.HasNext() { + key, val, progress := it.Next() + atomic.StoreUint64(&fw.progress, progress) + fw.currentKey = key + if len(val) > 0 { + copy(compositeKey[:20], key[:20]) + binary.BigEndian.PutUint64(compositeKey[20:], state2.FirstContractIncarnation) + copy(compositeKey[20+8:], key[20:]) + + if err := plainStateCollector.Collect(compositeKey, val); err != nil { + panic(err) + } + //fmt.Printf("Storage [%x] => [%x]\n", compositeKey, val) + } + } +} + +func (fw *FillWorker) FillCode(codeCollector, plainContractCollector *etl.Collector) { + defer func() { + fw.doneCount.Add(1) + }() + it := fw.ac.IterateCodeHistory(fw.fromKey, fw.toKey, fw.txNum) + atomic.StoreUint64(&fw.total, it.Total()) + var compositeKey = make([]byte, length.Addr+length.Incarnation) + + for it.HasNext() { + key, val, progress := it.Next() + atomic.StoreUint64(&fw.progress, progress) + fw.currentKey = key + if len(val) > 0 { + copy(compositeKey, key) + binary.BigEndian.PutUint64(compositeKey[length.Addr:], state2.FirstContractIncarnation) + + codeHash, err := common.HashData(val) + if err != nil { + panic(err) + } + if err = codeCollector.Collect(codeHash[:], val); err != nil { + panic(err) + } + if err = plainContractCollector.Collect(compositeKey, codeHash[:]); err != nil { + panic(err) + } + //fmt.Printf("Code [%x] => %d\n", compositeKey, len(val)) + } + } +} + +func (fw *FillWorker) ResetProgress() { + fw.total = 0 + fw.progress = 0 +} + +func (fw *FillWorker) BitmapAccounts(accountCollectorX *etl.Collector) { + defer func() { + fw.doneCount.Add(1) + }() + it := fw.ac.IterateAccountsReconTxs(fw.fromKey, fw.toKey, fw.txNum) + atomic.StoreUint64(&fw.total, it.Total()) + var txKey [8]byte + for it.HasNext() { + key, txNum, progress := it.Next() + binary.BigEndian.PutUint64(txKey[:], txNum) + if err := accountCollectorX.Collect(key, txKey[:]); err != nil { + panic(err) + } + atomic.StoreUint64(&fw.progress, progress) + fw.bitmap.Add(txNum) + } +} + +func (fw *FillWorker) BitmapStorage(storageCollectorX *etl.Collector) { + defer func() { + fw.doneCount.Add(1) + }() + it := fw.ac.IterateStorageReconTxs(fw.fromKey, fw.toKey, fw.txNum) + atomic.StoreUint64(&fw.total, it.Total()) + var txKey [8]byte + for it.HasNext() { + key, txNum, progress := it.Next() + binary.BigEndian.PutUint64(txKey[:], txNum) + if err := storageCollectorX.Collect(key, txKey[:]); err != nil { + panic(err) + } + atomic.StoreUint64(&fw.progress, progress) + fw.bitmap.Add(txNum) + } +} + +func (fw *FillWorker) BitmapCode(codeCollectorX *etl.Collector) { + defer func() { + fw.doneCount.Add(1) + }() + it := fw.ac.IterateCodeReconTxs(fw.fromKey, fw.toKey, fw.txNum) + atomic.StoreUint64(&fw.total, it.Total()) + var txKey [8]byte + for it.HasNext() { + key, txNum, progress := it.Next() + binary.BigEndian.PutUint64(txKey[:], txNum) + if err := codeCollectorX.Collect(key, txKey[:]); err != nil { + panic(err) + } + atomic.StoreUint64(&fw.progress, progress) + fw.bitmap.Add(txNum) + } +} + +func bytesToUint64(buf []byte) (x uint64) { + for i, b := range buf { + x = x<<8 + uint64(b) + if i == 7 { + return + } + } + return +} + +type ReconWorker struct { + lock sync.Locker + wg *sync.WaitGroup + rs *state2.ReconState + blockReader services.FullBlockReader + stateWriter *state2.StateReconWriter + stateReader *state2.HistoryReaderNoState + getHeader func(hash common.Hash, number uint64) *types.Header + ctx context.Context + engine consensus.Engine + chainConfig *params.ChainConfig + logger log.Logger + genesis *core.Genesis + epoch EpochReader + chain ChainReader + isPoSA bool + posa consensus.PoSA +} + +func NewReconWorker(lock sync.Locker, wg *sync.WaitGroup, rs *state2.ReconState, + a *state.Aggregator22, blockReader services.FullBlockReader, + chainConfig *params.ChainConfig, logger log.Logger, genesis *core.Genesis, engine consensus.Engine, + chainTx kv.Tx, +) *ReconWorker { + ac := a.MakeContext() + rw := &ReconWorker{ + lock: lock, + wg: wg, + rs: rs, + blockReader: blockReader, + ctx: context.Background(), + stateWriter: state2.NewStateReconWriter(ac, rs), + stateReader: state2.NewHistoryReaderNoState(ac, rs), + chainConfig: chainConfig, + logger: logger, + genesis: genesis, + engine: engine, + } + rw.epoch = NewEpochReader(chainTx) + rw.chain = NewChainReader(chainConfig, chainTx, blockReader) + rw.posa, rw.isPoSA = engine.(consensus.PoSA) + return rw +} + +func (rw *ReconWorker) SetTx(tx kv.Tx) { + rw.stateReader.SetTx(tx) + rw.stateWriter.SetTx(tx) +} + +func (rw *ReconWorker) Run() { + defer rw.wg.Done() + rw.getHeader = func(hash common.Hash, number uint64) *types.Header { + h, err := rw.blockReader.Header(rw.ctx, nil, hash, number) + if err != nil { + panic(err) + } + return h + } + for txTask, ok := rw.rs.Schedule(); ok; txTask, ok = rw.rs.Schedule() { + rw.runTxTask(txTask) + } +} + +func (rw *ReconWorker) runTxTask(txTask *state2.TxTask) { + rw.lock.Lock() + defer rw.lock.Unlock() + rw.stateReader.SetTxNum(txTask.TxNum) + rw.stateReader.ResetError() + rw.stateWriter.SetTxNum(txTask.TxNum) + noop := state2.NewNoopWriter() + rules := txTask.Rules + ibs := state2.New(rw.stateReader) + daoForkTx := rw.chainConfig.DAOForkSupport && rw.chainConfig.DAOForkBlock != nil && rw.chainConfig.DAOForkBlock.Uint64() == txTask.BlockNum && txTask.TxIndex == -1 + var err error + if txTask.BlockNum == 0 && txTask.TxIndex == -1 { + //fmt.Printf("txNum=%d, blockNum=%d, Genesis\n", txTask.TxNum, txTask.BlockNum) + // Genesis block + _, ibs, err = rw.genesis.ToBlock() + if err != nil { + panic(err) + } + // For Genesis, rules should be empty, so that empty accounts can be included + rules = ¶ms.Rules{} + } else if daoForkTx { + //fmt.Printf("txNum=%d, blockNum=%d, DAO fork\n", txNum, blockNum) + misc.ApplyDAOHardFork(ibs) + ibs.SoftFinalise() + } else if txTask.Final { + if txTask.BlockNum > 0 { + //fmt.Printf("txNum=%d, blockNum=%d, finalisation of the block\n", txNum, blockNum) + // End of block transaction in a block + syscall := func(contract common.Address, data []byte) ([]byte, error) { + return core.SysCallContract(contract, data, *rw.chainConfig, ibs, txTask.Block.Header(), rw.engine) + } + if _, _, err := rw.engine.Finalize(rw.chainConfig, txTask.Block.Header(), ibs, txTask.Block.Transactions(), txTask.Block.Uncles(), nil /* receipts */, rw.epoch, rw.chain, syscall); err != nil { + panic(fmt.Errorf("finalize of block %d failed: %w", txTask.BlockNum, err)) + } + } + } else if txTask.TxIndex == -1 { + // Block initialisation + if rw.isPoSA { + systemcontracts.UpgradeBuildInSystemContract(rw.chainConfig, txTask.Block.Number(), ibs) + } + syscall := func(contract common.Address, data []byte) ([]byte, error) { + return core.SysCallContract(contract, data, *rw.chainConfig, ibs, txTask.Block.Header(), rw.engine) + } + rw.engine.Initialize(rw.chainConfig, rw.chain, rw.epoch, txTask.Block.Header(), txTask.Block.Transactions(), txTask.Block.Uncles(), syscall) + } else { + if rw.isPoSA { + if isSystemTx, err := rw.posa.IsSystemTransaction(txTask.Tx, txTask.Block.Header()); err != nil { + panic(err) + } else if isSystemTx { + return + } + } + txHash := txTask.Tx.Hash() + gp := new(core.GasPool).AddGas(txTask.Tx.GetGas()) + vmConfig := vm.Config{NoReceipts: true, SkipAnalysis: core.SkipAnalysis(rw.chainConfig, txTask.BlockNum)} + getHashFn := core.GetHashFn(txTask.Block.Header(), rw.getHeader) + blockContext := core.NewEVMBlockContext(txTask.Block.Header(), getHashFn, rw.engine, nil /* author */) + ibs.Prepare(txHash, txTask.BlockHash, txTask.TxIndex) + msg := txTask.TxAsMessage + txContext := core.NewEVMTxContext(msg) + vmenv := vm.NewEVM(blockContext, txContext, ibs, rw.chainConfig, vmConfig) + //fmt.Printf("txNum=%d, blockNum=%d, txIndex=%d, evm=%p\n", txTask.TxNum, txTask.BlockNum, txTask.TxIndex, vmenv) + _, err = core.ApplyMessage(vmenv, msg, gp, true /* refunds */, false /* gasBailout */) + if err != nil { + panic(fmt.Errorf("could not apply blockNum=%d, txIdx=%d [%x] failed: %w", txTask.BlockNum, txTask.TxIndex, txHash, err)) + } + if err = ibs.FinalizeTx(rules, noop); err != nil { + panic(err) + } + } + if dependency, ok := rw.stateReader.ReadError(); ok { + //fmt.Printf("rollback %d\n", txNum) + rw.rs.RollbackTx(txTask, dependency) + } else { + if err = ibs.CommitBlock(rules, rw.stateWriter); err != nil { + panic(err) + } + //fmt.Printf("commit %d\n", txNum) + rw.rs.CommitTxNum(txTask.TxNum) + } +} diff --git a/cmd/utils/flags.go b/cmd/utils/flags.go index 506e8b4d4a7..d7e4d593ee9 100644 --- a/cmd/utils/flags.go +++ b/cmd/utils/flags.go @@ -31,6 +31,7 @@ import ( "text/template" "github.com/c2h5oh/datasize" + "github.com/ledgerwatch/erigon-lib/common/cmp" "github.com/ledgerwatch/erigon-lib/kv" "github.com/ledgerwatch/erigon-lib/kv/kvcache" "github.com/ledgerwatch/erigon-lib/txpool" @@ -163,6 +164,10 @@ var ( Name: "snapshots", Usage: `Default: use snapshots "true" for BSC, Mainnet and Goerli. use snapshots "false" in all other cases`, } + LightClientFlag = cli.BoolFlag{ + Name: "experimental.lightclient", + Usage: "enables experimental CL lightclient.", + } // Transaction pool settings TxPoolDisableFlag = cli.BoolFlag{ Name: "txpool.disable", @@ -322,15 +327,15 @@ var ( Usage: "HTTP-RPC server listening port", Value: nodecfg.DefaultHTTPPort, } - EngineAddr = cli.StringFlag{ - Name: "engine.addr", - Usage: "HTTP-RPC server listening interface for engineAPI", + AuthRpcAddr = cli.StringFlag{ + Name: "authrpc.addr", + Usage: "HTTP-RPC server listening interface for the Engine API", Value: nodecfg.DefaultHTTPHost, } - EnginePort = cli.UintFlag{ - Name: "engine.port", - Usage: "HTTP-RPC server listening port for the engineAPI", - Value: nodecfg.DefaultEngineHTTPPort, + AuthRpcPort = cli.UintFlag{ + Name: "authrpc.port", + Usage: "HTTP-RPC server listening port for the Engine API", + Value: nodecfg.DefaultAuthRpcPort, } JWTSecretPath = cli.StringFlag{ @@ -357,6 +362,11 @@ var ( Usage: "Comma separated list of virtual hostnames from which to accept requests (server enforced). Accepts '*' wildcard.", Value: strings.Join(nodecfg.DefaultConfig.HTTPVirtualHosts, ","), } + AuthRpcVirtualHostsFlag = cli.StringFlag{ + Name: "authrpc.vhosts", + Usage: "Comma separated list of virtual hostnames from which to accept Engine API requests (server enforced). Accepts '*' wildcard.", + Value: strings.Join(nodecfg.DefaultConfig.HTTPVirtualHosts, ","), + } HTTPApiFlag = cli.StringFlag{ Name: "http.api", Usage: "API's offered over the HTTP-RPC interface", @@ -378,7 +388,7 @@ var ( DBReadConcurrencyFlag = cli.IntFlag{ Name: "db.read.concurrency", Usage: "Does limit amount of parallel db reads. Default: equal to GOMAXPROCS (or number of CPU)", - Value: runtime.GOMAXPROCS(-1), + Value: cmp.Max(10, runtime.GOMAXPROCS(-1)*8), } RpcAccessListFlag = cli.StringFlag{ Name: "rpc.accessList", @@ -395,20 +405,6 @@ var ( Usage: "Bug for bug compatibility with OE for trace_ routines", } - StarknetGrpcAddressFlag = cli.StringFlag{ - Name: "starknet.grpc.address", - Usage: "Starknet GRPC address", - Value: "127.0.0.1:6066", - } - - TevmFlag = cli.BoolFlag{ - Name: "experimental.tevm", - Usage: "Enables Transpiled EVM experiment", - } - MemoryOverlayFlag = cli.BoolTFlag{ - Name: "experimental.overlay", - Usage: "Enables In-Memory Overlay for PoS", - } TxpoolApiAddrFlag = cli.StringFlag{ Name: "txpool.api.addr", Usage: "txpool api network address, for example: 127.0.0.1:9090 (default: use value of --private.api.addr)", @@ -626,6 +622,10 @@ var ( Usage: "Metrics HTTP server listening port", Value: metrics.DefaultConfig.Port, } + HistoryV3Flag = cli.BoolFlag{ + Name: "experimental.history.v3", + Usage: "(also known as Erigon3) Not recommended yet: Can't change this flag after node creation. New DB and Snapshots format of history allows: parallel blocks execution, get state as of given transaction without executing whole block.", + } CliqueSnapshotCheckpointIntervalFlag = cli.UintFlag{ Name: "clique.checkpoint", @@ -721,6 +721,12 @@ var ( Name: "bor.withoutheimdall", Usage: "Run without Heimdall service (for testing purpose)", } + + ConfigFlag = cli.StringFlag{ + Name: "config", + Usage: "Sets erigon flags from YAML/TOML file", + Value: "", + } ) var MetricFlags = []cli.Flag{MetricsEnabledFlag, MetricsEnabledExpensiveFlag, MetricsHTTPFlag, MetricsPortFlag} @@ -840,7 +846,7 @@ func ParseNodesFromURLs(urls []string) ([]*enode.Node, error) { } // NewP2PConfig -// - doesn't setup bootnodes - they will set when genesisHash will know +// - doesn't setup bootnodes - they will set when genesisHash will know func NewP2PConfig( nodiscover bool, dirs datadir.Dirs, @@ -1075,8 +1081,6 @@ func DataDirForNetwork(datadir string, network string) string { return networkDataDirCheckingLegacy(datadir, "rinkeby") case networkname.GoerliChainName: return networkDataDirCheckingLegacy(datadir, "goerli") - case networkname.KilnDevnetChainName: - return networkDataDirCheckingLegacy(datadir, "kiln-devnet") case networkname.SokolChainName: return networkDataDirCheckingLegacy(datadir, "sokol") case networkname.FermionChainName: @@ -1171,7 +1175,7 @@ func setGPO(ctx *cli.Context, cfg *gasprice.Config) { } } -//nolint +// nolint func setGPOCobra(f *pflag.FlagSet, cfg *gasprice.Config) { if v := f.Int(GpoBlocksFlag.Name, GpoBlocksFlag.Value, GpoBlocksFlag.Usage); v != nil { cfg.Blocks = *v @@ -1424,9 +1428,9 @@ func CheckExclusive(ctx *cli.Context, args ...interface{}) { // SetEthConfig applies eth-related command line flags to the config. func SetEthConfig(ctx *cli.Context, nodeConfig *nodecfg.Config, cfg *ethconfig.Config) { + cfg.CL = ctx.GlobalBool(LightClientFlag.Name) cfg.Sync.UseSnapshots = ctx.GlobalBoolT(SnapshotFlag.Name) cfg.Dirs = nodeConfig.Dirs - cfg.MemoryOverlay = ctx.GlobalBool(MemoryOverlayFlag.Name) cfg.Snapshot.KeepBlocks = ctx.GlobalBool(SnapKeepBlocksFlag.Name) cfg.Snapshot.Produce = !ctx.GlobalBool(SnapStopFlag.Name) cfg.Snapshot.NoDownloader = ctx.GlobalBool(NoDownloaderFlag.Name) @@ -1442,11 +1446,11 @@ func SetEthConfig(ctx *cli.Context, nodeConfig *nodecfg.Config, cfg *ethconfig.C if err := uploadRate.UnmarshalText([]byte(uploadRateStr)); err != nil { panic(err) } - log.Info("torrent verbosity", "level", ctx.GlobalInt(TorrentVerbosityFlag.Name)) lvl, dbg, err := downloadercfg.Int2LogLevel(ctx.GlobalInt(TorrentVerbosityFlag.Name)) if err != nil { panic(err) } + log.Info("torrent verbosity", "level", lvl.LogString()) cfg.Downloader, err = downloadercfg.New(cfg.Dirs.Snap, lvl, dbg, nodeConfig.P2P.NAT, downloadRate, uploadRate, ctx.GlobalInt(TorrentPortFlag.Name), ctx.GlobalInt(TorrentConnsPerFileFlag.Name), ctx.GlobalInt(TorrentDownloadSlotsFlag.Name)) if err != nil { panic(err) @@ -1476,7 +1480,8 @@ func SetEthConfig(ctx *cli.Context, nodeConfig *nodecfg.Config, cfg *ethconfig.C cfg.Ethstats = ctx.GlobalString(EthStatsURLFlag.Name) cfg.P2PEnabled = len(nodeConfig.P2P.SentryAddr) == 0 - cfg.EnabledIssuance = ctx.GlobalIsSet(EnabledIssuance.Name) + cfg.EnabledIssuance = ctx.GlobalBool(EnabledIssuance.Name) + cfg.HistoryV3 = ctx.GlobalBool(HistoryV3Flag.Name) if ctx.GlobalIsSet(NetworkIdFlag.Name) { cfg.NetworkID = ctx.GlobalUint64(NetworkIdFlag.Name) } @@ -1587,9 +1592,9 @@ func MakeConsolePreloads(ctx *cli.Context) []string { return nil } // Otherwise resolve absolute paths and return them - var preloads []string - - for _, file := range strings.Split(ctx.GlobalString(PreloadJSFlag.Name), ",") { + files := strings.Split(ctx.GlobalString(PreloadJSFlag.Name), ",") + preloads := make([]string, 0, len(files)) + for _, file := range files { preloads = append(preloads, strings.TrimSpace(file)) } return preloads diff --git a/cmd/verkle/main.go b/cmd/verkle/main.go new file mode 100644 index 00000000000..9fe021b8991 --- /dev/null +++ b/cmd/verkle/main.go @@ -0,0 +1,306 @@ +package main + +import ( + "context" + "encoding/binary" + "flag" + "os" + "time" + + "github.com/c2h5oh/datasize" + "github.com/ledgerwatch/erigon-lib/kv" + "github.com/ledgerwatch/erigon-lib/kv/mdbx" + "github.com/ledgerwatch/erigon/cmd/verkle/verkletrie" + "github.com/ledgerwatch/erigon/common" + "github.com/ledgerwatch/erigon/eth/stagedsync/stages" + "github.com/ledgerwatch/log/v3" +) + +type optionsCfg struct { + ctx context.Context + verkleDb string + stateDb string + workersCount uint + tmpdir string + disabledLookups bool +} + +func IncrementVerkleTree(cfg optionsCfg) error { + start := time.Now() + + db, err := mdbx.Open(cfg.stateDb, log.Root(), true) + if err != nil { + log.Error("Error while opening database", "err", err.Error()) + return err + } + defer db.Close() + + vDb, err := mdbx.Open(cfg.verkleDb, log.Root(), false) + if err != nil { + log.Error("Error while opening db transaction", "err", err.Error()) + return err + } + defer vDb.Close() + + vTx, err := vDb.BeginRw(cfg.ctx) + if err != nil { + return err + } + defer vTx.Rollback() + + tx, err := db.BeginRo(cfg.ctx) + if err != nil { + return err + } + defer tx.Rollback() + + from, err := stages.GetStageProgress(vTx, stages.VerkleTrie) + if err != nil { + return err + } + + to, err := stages.GetStageProgress(tx, stages.Execution) + if err != nil { + return err + } + verkleWriter := verkletrie.NewVerkleTreeWriter(vTx, cfg.tmpdir) + defer verkleWriter.Close() + if err := verkletrie.IncrementAccount(vTx, tx, uint64(cfg.workersCount), verkleWriter, from, to); err != nil { + return err + } + if _, err := verkletrie.IncrementStorage(vTx, tx, uint64(cfg.workersCount), verkleWriter, from, to); err != nil { + return err + } + if err := stages.SaveStageProgress(vTx, stages.VerkleTrie, to); err != nil { + return err + } + + log.Info("Finished", "elapesed", time.Since(start)) + return vTx.Commit() +} + +func RegeneratePedersenHashstate(cfg optionsCfg) error { + db, err := mdbx.Open(cfg.stateDb, log.Root(), true) + if err != nil { + log.Error("Error while opening database", "err", err.Error()) + return err + } + defer db.Close() + + vDb, err := mdbx.Open(cfg.stateDb, log.Root(), false) + if err != nil { + log.Error("Error while opening db transaction", "err", err.Error()) + return err + } + defer vDb.Close() + + vTx, err := vDb.BeginRw(cfg.ctx) + if err != nil { + return err + } + defer vTx.Rollback() + + tx, err := db.BeginRo(cfg.ctx) + if err != nil { + return err + } + defer tx.Rollback() + + verleWriter := verkletrie.NewVerkleTreeWriter(vTx, cfg.tmpdir) + + if err := verkletrie.RegeneratePedersenAccounts(vTx, tx, uint64(cfg.workersCount), verleWriter); err != nil { + return err + } + if err := verkletrie.RegeneratePedersenCode(vTx, tx, uint64(cfg.workersCount), verleWriter); err != nil { + return err + } + + if err := verkletrie.RegeneratePedersenStorage(vTx, tx, uint64(cfg.workersCount), verleWriter); err != nil { + return err + } + return vTx.Commit() +} + +func GenerateVerkleTree(cfg optionsCfg) error { + start := time.Now() + db, err := mdbx.Open(cfg.stateDb, log.Root(), true) + if err != nil { + log.Error("Error while opening database", "err", err.Error()) + return err + } + defer db.Close() + + vDb, err := mdbx.Open(cfg.verkleDb, log.Root(), false) + if err != nil { + log.Error("Error while opening db transaction", "err", err.Error()) + return err + } + defer vDb.Close() + + vTx, err := vDb.BeginRw(cfg.ctx) + if err != nil { + return err + } + defer vTx.Rollback() + + tx, err := db.BeginRo(cfg.ctx) + if err != nil { + return err + } + defer tx.Rollback() + + verkleWriter := verkletrie.NewVerkleTreeWriter(vTx, cfg.tmpdir) + + if err := verkletrie.RegeneratePedersenAccounts(vTx, tx, uint64(cfg.workersCount), verkleWriter); err != nil { + return err + } + if err := verkletrie.RegeneratePedersenCode(vTx, tx, uint64(cfg.workersCount), verkleWriter); err != nil { + return err + } + + if err := verkletrie.RegeneratePedersenStorage(vTx, tx, uint64(cfg.workersCount), verkleWriter); err != nil { + return err + } + + // Verkle Tree to be built + log.Info("Started Verkle Tree creation") + + var root common.Hash + if root, err = verkleWriter.CommitVerkleTreeFromScratch(); err != nil { + return err + } + + log.Info("Verkle Tree Generation completed", "elapsed", time.Since(start), "root", common.Bytes2Hex(root[:])) + + var progress uint64 + if progress, err = stages.GetStageProgress(tx, stages.Execution); err != nil { + return err + } + if err := stages.SaveStageProgress(vTx, stages.VerkleTrie, progress); err != nil { + return err + } + return vTx.Commit() +} + +func analyseOut(cfg optionsCfg) error { + db, err := mdbx.Open(cfg.verkleDb, log.Root(), false) + if err != nil { + return err + } + defer db.Close() + + tx, err := db.BeginRw(cfg.ctx) + if err != nil { + return err + } + defer tx.Rollback() + + buckets, err := tx.ListBuckets() + if err != nil { + return err + } + for _, bucket := range buckets { + size, err := tx.BucketSize(bucket) + if err != nil { + return err + } + log.Info("Bucket Analysis", "name", bucket, "size", datasize.ByteSize(size).HumanReadable()) + } + return nil +} + +func dump(cfg optionsCfg) error { + db, err := mdbx.Open(cfg.verkleDb, log.Root(), false) + if err != nil { + return err + } + defer db.Close() + + tx, err := db.BeginRw(cfg.ctx) + if err != nil { + return err + } + defer tx.Rollback() + logInterval := time.NewTicker(30 * time.Second) + file, err := os.Create("dump.txt") + if err != nil { + return err + } + defer file.Close() + verkleCursor, err := tx.Cursor(kv.VerkleTrie) + if err != nil { + return err + } + for k, v, err := verkleCursor.First(); k != nil; k, v, err = verkleCursor.Next() { + if err != nil { + return err + } + // k is the root so it will always be 32 bytes + if _, err := file.Write(k); err != nil { + return err + } + // Write length of RLP encoded note + lenNode := make([]byte, 8) + binary.BigEndian.PutUint64(lenNode, uint64(len(v))) + if _, err := file.Write(lenNode); err != nil { + return err + } + // Write Rlp encoded node + if _, err := file.Write(v); err != nil { + return err + } + select { + case <-logInterval.C: + log.Info("Dumping verkle tree to plain text", "key", common.Bytes2Hex(k)) + default: + } + } + return nil +} + +func main() { + ctx := context.Background() + mainDb := flag.String("state-chaindata", "chaindata", "path to the chaindata database file") + verkleDb := flag.String("verkle-chaindata", "out", "path to the output chaindata database file") + workersCount := flag.Uint("workers", 5, "amount of goroutines") + tmpdir := flag.String("tmpdir", "/tmp/etl-temp", "amount of goroutines") + action := flag.String("action", "", "action to execute (hashstate, bucketsizes, verkle)") + disableLookups := flag.Bool("disable-lookups", false, "disable lookups generation (more compact database)") + + flag.Parse() + log.Root().SetHandler(log.LvlFilterHandler(log.Lvl(3), log.StderrHandler)) + + opt := optionsCfg{ + ctx: ctx, + stateDb: *mainDb, + verkleDb: *verkleDb, + workersCount: *workersCount, + tmpdir: *tmpdir, + disabledLookups: *disableLookups, + } + switch *action { + case "hashstate": + if err := RegeneratePedersenHashstate(opt); err != nil { + log.Error("Error", "err", err.Error()) + } + case "bucketsizes": + if err := analyseOut(opt); err != nil { + log.Error("Error", "err", err.Error()) + } + case "verkle": + if err := GenerateVerkleTree(opt); err != nil { + log.Error("Error", "err", err.Error()) + } + case "incremental": + if err := IncrementVerkleTree(opt); err != nil { + log.Error("Error", "err", err.Error()) + } + case "dump": + log.Info("Dumping in dump.txt") + if err := dump(opt); err != nil { + log.Error("Error", "err", err.Error()) + } + default: + log.Warn("No valid --action specified, aborting") + } +} diff --git a/cmd/verkle/verkletrie/incrementAccount.go b/cmd/verkle/verkletrie/incrementAccount.go new file mode 100644 index 00000000000..7d8325e655e --- /dev/null +++ b/cmd/verkle/verkletrie/incrementAccount.go @@ -0,0 +1,137 @@ +package verkletrie + +import ( + "context" + "encoding/binary" + "sync" + "time" + + "github.com/ledgerwatch/erigon-lib/kv" + "github.com/ledgerwatch/erigon/common" + "github.com/ledgerwatch/erigon/common/changeset" + "github.com/ledgerwatch/erigon/common/dbutils" + "github.com/ledgerwatch/erigon/common/debug" + "github.com/ledgerwatch/erigon/core/types/accounts" + "github.com/ledgerwatch/log/v3" +) + +func IncrementAccount(vTx kv.RwTx, tx kv.Tx, workers uint64, verkleWriter *VerkleTreeWriter, from, to uint64) error { + logInterval := time.NewTicker(30 * time.Second) + logPrefix := "IncrementVerkleAccount" + + jobs := make(chan *regenerateIncrementalPedersenAccountsJob, batchSize) + out := make(chan *regenerateIncrementalPedersenAccountsOut, batchSize) + wg := new(sync.WaitGroup) + wg.Add(int(workers)) + ctx, cancelWorkers := context.WithCancel(context.Background()) + for i := 0; i < int(workers); i++ { + go func(threadNo int) { + defer debug.LogPanic() + defer wg.Done() + incrementalAccountWorker(ctx, logPrefix, jobs, out) + }(i) + } + defer cancelWorkers() + + accountCursor, err := tx.CursorDupSort(kv.AccountChangeSet) + if err != nil { + return err + } + defer accountCursor.Close() + + // Start Goroutine for collection + go func() { + defer debug.LogPanic() + defer cancelWorkers() + for o := range out { + if o.absentInState { + if err := verkleWriter.DeleteAccount(o.versionHash, o.isContract); err != nil { + panic(err) + } + continue + } + if err := verkleWriter.UpdateAccount(o.versionHash, o.codeSize, o.isContract, o.account); err != nil { + panic(err) + } + if err := verkleWriter.WriteContractCodeChunks(o.codeKeys, o.codeChunks); err != nil { + panic(err) + } + } + }() + marker := NewVerkleMarker() + defer marker.Rollback() + + for k, v, err := accountCursor.Seek(dbutils.EncodeBlockNumber(from)); k != nil; k, v, err = accountCursor.Next() { + if err != nil { + return err + } + blockNumber, addressBytes, _, err := changeset.DecodeAccounts(k, v) + if err != nil { + return err + } + + if blockNumber > to { + break + } + address := common.BytesToAddress(addressBytes) + + marked, err := marker.IsMarked(addressBytes) + if err != nil { + return err + } + + if marked { + continue + } + + encodedAccount, err := tx.GetOne(kv.PlainState, addressBytes) + if err != nil { + return err + } + + incarnationBytes, err := tx.GetOne(kv.IncarnationMap, addressBytes) + if err != nil { + return err + } + isContract := len(incarnationBytes) > 0 && binary.BigEndian.Uint64(incarnationBytes) != 0 + // Start + if len(encodedAccount) == 0 { + jobs <- ®enerateIncrementalPedersenAccountsJob{ + address: address, + isContract: isContract, + absentInState: true, + } + } else { + var acc accounts.Account + if err := acc.DecodeForStorage(encodedAccount); err != nil { + return err + } + + // We need to update code. + code, err := tx.GetOne(kv.Code, acc.CodeHash[:]) + if err != nil { + return err + } + + jobs <- ®enerateIncrementalPedersenAccountsJob{ + address: address, + account: acc, + code: code, + absentInState: false, + isContract: isContract, + } + } + if err := marker.MarkAsDone(addressBytes); err != nil { + return err + } + select { + case <-logInterval.C: + log.Info("Creating Verkle Trie Incrementally", "phase", "account", "blockNum", blockNumber) + default: + } + } + close(jobs) + wg.Wait() + close(out) + return nil +} diff --git a/cmd/verkle/verkletrie/incrementStorage.go b/cmd/verkle/verkletrie/incrementStorage.go new file mode 100644 index 00000000000..16aaba53ea1 --- /dev/null +++ b/cmd/verkle/verkletrie/incrementStorage.go @@ -0,0 +1,138 @@ +package verkletrie + +import ( + "context" + "sync" + "time" + + "github.com/holiman/uint256" + "github.com/ledgerwatch/erigon-lib/kv" + "github.com/ledgerwatch/erigon/common" + "github.com/ledgerwatch/erigon/common/changeset" + "github.com/ledgerwatch/erigon/common/dbutils" + "github.com/ledgerwatch/erigon/common/debug" + "github.com/ledgerwatch/erigon/core/rawdb" + "github.com/ledgerwatch/log/v3" +) + +func IncrementStorage(vTx kv.RwTx, tx kv.Tx, workers uint64, verkleWriter *VerkleTreeWriter, from, to uint64) (common.Hash, error) { + logInterval := time.NewTicker(30 * time.Second) + logPrefix := "IncrementVerkleStorage" + + jobs := make(chan *regeneratePedersenStorageJob, batchSize) + out := make(chan *regeneratePedersenStorageJob, batchSize) + wg := new(sync.WaitGroup) + wg.Add(int(workers)) + ctx, cancelWorkers := context.WithCancel(context.Background()) + for i := 0; i < int(workers); i++ { + go func(threadNo int) { + defer debug.LogPanic() + defer wg.Done() + pedersenStorageWorker(ctx, logPrefix, jobs, out) + }(i) + } + defer cancelWorkers() + + storageCursor, err := tx.CursorDupSort(kv.StorageChangeSet) + if err != nil { + return common.Hash{}, err + } + defer storageCursor.Close() + // Start Goroutine for collection + go func() { + defer debug.LogPanic() + defer cancelWorkers() + for o := range out { + if err := verkleWriter.Insert(o.storageVerkleKey[:], o.storageValue); err != nil { + panic(err) + } + } + }() + marker := NewVerkleMarker() + defer marker.Rollback() + + for k, v, err := storageCursor.Seek(dbutils.EncodeBlockNumber(from)); k != nil; k, v, err = storageCursor.Next() { + if err != nil { + return common.Hash{}, err + } + blockNumber, changesetKey, _, err := changeset.DecodeStorage(k, v) + if err != nil { + return common.Hash{}, err + } + + if blockNumber > to { + break + } + + marked, err := marker.IsMarked(changesetKey) + if err != nil { + return common.Hash{}, err + } + + if marked { + continue + } + + address := common.BytesToAddress(changesetKey[:20]) + + /*var acc accounts.Account + _, err := rawdb.ReadAccount(tx, address, &acc) + if err != nil { + return err + } + + storageIncarnation := binary.BigEndian.Uint64(changesetKey[20:28]) + // Storage and code deletion is handled due to self-destruct is handled in accounts + if !has { + if err := marker.MarkAsDone(changesetKey); err != nil { + return err + } + continue + } + + if acc.Incarnation != storageIncarnation { + continue + }*/ + + storageValue, err := tx.GetOne(kv.PlainState, changesetKey) + if err != nil { + return common.Hash{}, err + } + storageKey := new(uint256.Int).SetBytes(changesetKey[28:]) + var storageValueFormatted []byte + + if len(storageValue) > 0 { + storageValueFormatted = make([]byte, 32) + int256ToVerkleFormat(new(uint256.Int).SetBytes(storageValue), storageValueFormatted) + } + + jobs <- ®eneratePedersenStorageJob{ + address: address, + storageKey: storageKey, + storageValue: storageValueFormatted, + } + if err := marker.MarkAsDone(changesetKey); err != nil { + return common.Hash{}, err + } + select { + case <-logInterval.C: + log.Info("Creating Verkle Trie Incrementally", "phase", "storage", "blockNum", blockNumber) + default: + } + } + close(jobs) + wg.Wait() + close(out) + // Get root + root, err := rawdb.ReadVerkleRoot(tx, from) + if err != nil { + return common.Hash{}, err + } + newRoot, err := verkleWriter.CommitVerkleTree(root) + if err != nil { + return common.Hash{}, err + } + log.Info("Computed verkle root", "root", common.Bytes2Hex(newRoot[:])) + + return newRoot, rawdb.WriteVerkleRoot(vTx, to, newRoot) +} diff --git a/cmd/verkle/verkletrie/pedersen_hashstate.go b/cmd/verkle/verkletrie/pedersen_hashstate.go new file mode 100644 index 00000000000..3fd77c10a35 --- /dev/null +++ b/cmd/verkle/verkletrie/pedersen_hashstate.go @@ -0,0 +1,251 @@ +package verkletrie + +import ( + "bytes" + "context" + "encoding/binary" + "sync" + "time" + + "github.com/holiman/uint256" + "github.com/ledgerwatch/erigon-lib/kv" + "github.com/ledgerwatch/erigon/common" + "github.com/ledgerwatch/erigon/common/debug" + "github.com/ledgerwatch/erigon/core/types/accounts" + "github.com/ledgerwatch/log/v3" +) + +func RegeneratePedersenAccounts(outTx kv.RwTx, readTx kv.Tx, workers uint64, verkleWriter *VerkleTreeWriter) error { + logPrefix := "PedersenHashedAccounts" + start := time.Now() + log.Info("Started Generation of Pedersen Hashed Accounts") + + plainStateCursor, err := readTx.Cursor(kv.PlainState) + if err != nil { + return err + } + defer plainStateCursor.Close() + logEvery := time.NewTicker(30 * time.Second) + defer logEvery.Stop() + + jobs := make(chan *regeneratePedersenAccountsJob, batchSize) + out := make(chan *regeneratePedersenAccountsOut, batchSize) + wg := new(sync.WaitGroup) + wg.Add(int(workers)) + ctx, cancelWorkers := context.WithCancel(context.Background()) + for i := 0; i < int(workers); i++ { + go func(threadNo int) { + defer debug.LogPanic() + defer wg.Done() + pedersenAccountWorker(ctx, logPrefix, jobs, out) + }(i) + } + defer cancelWorkers() + // Start Goroutine for collection + go func() { + defer debug.LogPanic() + defer cancelWorkers() + for o := range out { + if err := verkleWriter.UpdateAccount(o.versionHash[:], o.codeSize, true, o.account); err != nil { + panic(err) + } + } + }() + for k, v, err := plainStateCursor.First(); k != nil; k, v, err = plainStateCursor.Next() { + if err != nil { + return err + } + if len(k) == 20 { + var acc accounts.Account + if err := acc.DecodeForStorage(v); err != nil { + return err + } + codeSize := uint64(0) + if !acc.IsEmptyCodeHash() { + code, err := readTx.GetOne(kv.Code, acc.CodeHash[:]) + if err != nil { + return err + } + codeSize = uint64(len(code)) + } + jobs <- ®eneratePedersenAccountsJob{ + address: common.BytesToAddress(k), + account: acc, + codeSize: codeSize, + } + select { + case <-logEvery.C: + log.Info("[Pedersen Account Hashing] Current progress in Collection Phase", "address", "0x"+common.Bytes2Hex(k)) + default: + } + } + } + + close(jobs) + wg.Wait() + close(out) + + log.Info("Finished generation of Pedersen Hashed Accounts", "elapsed", time.Since(start)) + + return nil +} + +func RegeneratePedersenStorage(outTx kv.RwTx, readTx kv.Tx, workers uint64, verkleWriter *VerkleTreeWriter) error { + logPrefix := "PedersenHashedStorage" + start := time.Now() + log.Info("Started Generation of Pedersen Hashed Storage") + + plainStateCursor, err := readTx.Cursor(kv.PlainState) + if err != nil { + return err + } + defer plainStateCursor.Close() + + logInterval := time.NewTicker(30 * time.Second) + defer logInterval.Stop() + + jobs := make(chan *regeneratePedersenStorageJob, batchSize) + out := make(chan *regeneratePedersenStorageJob, batchSize) + wg := new(sync.WaitGroup) + wg.Add(int(workers)) + ctx, cancelWorkers := context.WithCancel(context.Background()) + for i := 0; i < int(workers); i++ { + go func(threadNo int) { + defer debug.LogPanic() + defer wg.Done() + pedersenStorageWorker(ctx, logPrefix, jobs, out) + }(i) + } + defer cancelWorkers() + // Start Goroutine for collection + go func() { + defer debug.LogPanic() + defer cancelWorkers() + for o := range out { + if err := verkleWriter.Insert(o.storageVerkleKey[:], o.storageValue); err != nil { + panic(err) + } + } + }() + + var address common.Address + var incarnation uint64 + for k, v, err := plainStateCursor.First(); k != nil; k, v, err = plainStateCursor.Next() { + if err != nil { + return err + } + if len(k) == 60 { + if !bytes.Equal(address[:], k[:20]) || binary.BigEndian.Uint64(k[20:28]) != incarnation { + continue + } + storageValue := new(uint256.Int).SetBytes(v).Bytes32() + jobs <- ®eneratePedersenStorageJob{ + storageKey: new(uint256.Int).SetBytes(k[28:]), + storageValue: storageValue[:], + address: address, + } + select { + case <-logInterval.C: + log.Info("[Pedersen Storage Hashing] Current progress in Collection Phase", "address", "0x"+common.Bytes2Hex(k[:20])) + default: + } + } else if len(k) == 20 { + acc := accounts.NewAccount() + if err := acc.DecodeForStorage(v); err != nil { + return err + } + incarnation = acc.Incarnation + address = common.BytesToAddress(k) + } + } + + close(jobs) + wg.Wait() + close(out) + + log.Info("Finished generation of Pedersen Hashed Storage", "elapsed", time.Since(start)) + + return nil +} + +func RegeneratePedersenCode(outTx kv.RwTx, readTx kv.Tx, workers uint64, verkleWriter *VerkleTreeWriter) error { + logPrefix := "PedersenHashedCode" + start := time.Now() + log.Info("Started Generation of Pedersen Hashed Code") + + plainStateCursor, err := readTx.Cursor(kv.PlainState) + if err != nil { + return err + } + defer plainStateCursor.Close() + + logInterval := time.NewTicker(30 * time.Second) + defer logInterval.Stop() + + jobs := make(chan *regeneratePedersenCodeJob, batchSize) + out := make(chan *regeneratePedersenCodeOut, batchSize) + wg := new(sync.WaitGroup) + wg.Add(int(workers)) + ctx, cancelWorkers := context.WithCancel(context.Background()) + for i := 0; i < int(workers); i++ { + go func(threadNo int) { + defer debug.LogPanic() + defer wg.Done() + pedersenCodeWorker(ctx, logPrefix, jobs, out) + }(i) + } + defer cancelWorkers() + // Start Goroutine for collection + go func() { + defer debug.LogPanic() + defer cancelWorkers() + for o := range out { + // Write code chunks + if o.codeSize == 0 { + continue + } + if err := verkleWriter.WriteContractCodeChunks(o.chunksKeys, o.chunks); err != nil { + panic(err) + } + } + }() + + for k, v, err := plainStateCursor.First(); k != nil; k, v, err = plainStateCursor.Next() { + if err != nil { + return err + } + if len(k) != 20 { + continue + } + + acc := accounts.NewAccount() + acc.DecodeForStorage(v) + + if acc.IsEmptyCodeHash() { + continue + } + + code, err := readTx.GetOne(kv.Code, acc.CodeHash[:]) + if err != nil { + return err + } + + jobs <- ®eneratePedersenCodeJob{ + address: common.BytesToAddress(k), + code: common.CopyBytes(code), + } + select { + case <-logInterval.C: + log.Info("[Pedersen Code Hashing] Current progress in Collection Phase", "address", "0x"+common.Bytes2Hex(k)) + default: + } + } + + close(jobs) + wg.Wait() + close(out) + + log.Info("Finished generation of Pedersen Hashed Code", "elapsed", time.Since(start)) + + return nil +} diff --git a/cmd/verkle/verkletrie/verkle_marker.go b/cmd/verkle/verkletrie/verkle_marker.go new file mode 100644 index 00000000000..86c6d4afc6d --- /dev/null +++ b/cmd/verkle/verkletrie/verkle_marker.go @@ -0,0 +1,44 @@ +package verkletrie + +import ( + "context" + + "github.com/ledgerwatch/erigon-lib/kv" + "github.com/ledgerwatch/erigon-lib/kv/mdbx" +) + +type VerkleMarker struct { + db kv.RwDB + tx kv.RwTx +} + +//nolint:gocritic +func NewVerkleMarker() *VerkleMarker { + markedSlotsDb, err := mdbx.NewTemporaryMdbx() + if err != nil { + panic(err) + } + + tx, err := markedSlotsDb.BeginRw(context.TODO()) + if err != nil { + panic(err) + } + + return &VerkleMarker{ + db: markedSlotsDb, + tx: tx, + } +} + +func (v *VerkleMarker) MarkAsDone(key []byte) error { + return v.tx.Put(kv.Headers, key, []byte{0}) +} + +func (v *VerkleMarker) IsMarked(key []byte) (bool, error) { + return v.tx.Has(kv.Headers, key) +} + +func (v *VerkleMarker) Rollback() { + v.tx.Rollback() + v.db.Close() +} diff --git a/cmd/verkle/verkletrie/verkle_tree_writer.go b/cmd/verkle/verkletrie/verkle_tree_writer.go new file mode 100644 index 00000000000..91cc6e9609a --- /dev/null +++ b/cmd/verkle/verkletrie/verkle_tree_writer.go @@ -0,0 +1,280 @@ +package verkletrie + +import ( + "context" + "encoding/binary" + "time" + + "github.com/anacrolix/sync" + "github.com/gballet/go-verkle" + "github.com/holiman/uint256" + "github.com/ledgerwatch/erigon-lib/etl" + "github.com/ledgerwatch/erigon-lib/kv" + "github.com/ledgerwatch/erigon/common" + "github.com/ledgerwatch/erigon/core/rawdb" + "github.com/ledgerwatch/erigon/core/types/accounts" + "github.com/ledgerwatch/erigon/turbo/trie/vtree" + "github.com/ledgerwatch/log/v3" +) + +func identityFuncForVerkleTree(k []byte, value []byte, _ etl.CurrentTableReader, next etl.LoadNextFunc) error { + return next(k, k, value) +} + +func int256ToVerkleFormat(x *uint256.Int, buffer []byte) { + bbytes := x.ToBig().Bytes() + if len(bbytes) > 0 { + for i, b := range bbytes { + buffer[len(bbytes)-i-1] = b + } + } +} + +func flushVerkleNode(db kv.RwTx, node verkle.VerkleNode, logInterval *time.Ticker, key []byte) error { + var err error + totalInserted := 0 + node.(*verkle.InternalNode).Flush(func(node verkle.VerkleNode) { + if err != nil { + return + } + + err = rawdb.WriteVerkleNode(db, node) + if err != nil { + return + } + totalInserted++ + select { + case <-logInterval.C: + log.Info("Flushing Verkle nodes", "inserted", totalInserted, "key", common.Bytes2Hex(key)) + default: + } + }) + return err +} + +func collectVerkleNode(collector *etl.Collector, node verkle.VerkleNode, logInterval *time.Ticker, key []byte) error { + var err error + totalInserted := 0 + node.(*verkle.InternalNode).Flush(func(node verkle.VerkleNode) { + if err != nil { + return + } + var encodedNode []byte + + rootHash := node.Commitment().Bytes() + encodedNode, err = node.Serialize() + if err != nil { + return + } + err = collector.Collect(rootHash[:], encodedNode) + totalInserted++ + select { + case <-logInterval.C: + log.Info("Flushing Verkle nodes", "inserted", totalInserted, "key", common.Bytes2Hex(key)) + default: + } + }) + return err +} + +type VerkleTreeWriter struct { + db kv.RwTx + collector *etl.Collector + mu sync.Mutex + tmpdir string +} + +func NewVerkleTreeWriter(db kv.RwTx, tmpdir string) *VerkleTreeWriter { + return &VerkleTreeWriter{ + db: db, + collector: etl.NewCollector("verkleTreeWriterLogPrefix", tmpdir, etl.NewSortableBuffer(etl.BufferOptimalSize*8)), + tmpdir: tmpdir, + } +} + +func (v *VerkleTreeWriter) UpdateAccount(versionKey []byte, codeSize uint64, isContract bool, acc accounts.Account) error { + v.mu.Lock() + defer v.mu.Unlock() + var codeHashKey, nonceKey, balanceKey, codeSizeKey, nonce, balance, cs [32]byte + copy(codeHashKey[:], versionKey[:31]) + copy(nonceKey[:], versionKey[:31]) + copy(balanceKey[:], versionKey[:31]) + copy(codeSizeKey[:], versionKey[:31]) + codeHashKey[31] = vtree.CodeKeccakLeafKey + nonceKey[31] = vtree.NonceLeafKey + balanceKey[31] = vtree.BalanceLeafKey + codeSizeKey[31] = vtree.CodeSizeLeafKey + // Process values + int256ToVerkleFormat(&acc.Balance, balance[:]) + binary.LittleEndian.PutUint64(nonce[:], acc.Nonce) + + // Insert in the tree + if err := v.collector.Collect(versionKey, []byte{0}); err != nil { + return err + } + + if err := v.collector.Collect(nonceKey[:], nonce[:]); err != nil { + return err + } + if err := v.collector.Collect(balanceKey[:], balance[:]); err != nil { + return err + } + if isContract { + binary.LittleEndian.PutUint64(cs[:], codeSize) + if err := v.collector.Collect(codeHashKey[:], acc.CodeHash[:]); err != nil { + return err + } + if err := v.collector.Collect(codeSizeKey[:], cs[:]); err != nil { + return err + } + } + return nil +} + +func (v *VerkleTreeWriter) DeleteAccount(versionKey []byte, isContract bool) error { + v.mu.Lock() + defer v.mu.Unlock() + var codeHashKey, nonceKey, balanceKey, codeSizeKey [32]byte + copy(codeHashKey[:], versionKey[:31]) + copy(nonceKey[:], versionKey[:31]) + copy(balanceKey[:], versionKey[:31]) + copy(codeSizeKey[:], versionKey[:31]) + codeHashKey[31] = vtree.CodeKeccakLeafKey + nonceKey[31] = vtree.NonceLeafKey + balanceKey[31] = vtree.BalanceLeafKey + codeSizeKey[31] = vtree.CodeSizeLeafKey + // Insert in the tree + if err := v.collector.Collect(versionKey, []byte{0}); err != nil { + return err + } + + if err := v.collector.Collect(nonceKey[:], []byte{0}); err != nil { + return err + } + if err := v.collector.Collect(balanceKey[:], []byte{0}); err != nil { + return err + } + if isContract { + if err := v.collector.Collect(codeHashKey[:], []byte{0}); err != nil { + return err + } + if err := v.collector.Collect(codeSizeKey[:], []byte{0}); err != nil { + return err + } + } + return nil +} + +func (v *VerkleTreeWriter) Insert(key, value []byte) error { + v.mu.Lock() + defer v.mu.Unlock() + return v.collector.Collect(key, value) +} + +func (v *VerkleTreeWriter) WriteContractCodeChunks(codeKeys [][]byte, chunks [][]byte) error { + v.mu.Lock() + defer v.mu.Unlock() + + for i, codeKey := range codeKeys { + if err := v.collector.Collect(codeKey, chunks[i]); err != nil { + return err + } + } + return nil +} + +func (v *VerkleTreeWriter) CommitVerkleTreeFromScratch() (common.Hash, error) { + if err := v.db.ClearBucket(kv.VerkleTrie); err != nil { + return common.Hash{}, err + } + + verkleCollector := etl.NewCollector(kv.VerkleTrie, v.tmpdir, etl.NewSortableBuffer(etl.BufferOptimalSize)) + defer verkleCollector.Close() + + root := verkle.New() + + logInterval := time.NewTicker(30 * time.Second) + if err := v.collector.Load(v.db, kv.VerkleTrie, func(k []byte, v []byte, _ etl.CurrentTableReader, next etl.LoadNextFunc) error { + if len(v) == 0 { + return next(k, nil, nil) + } + if err := root.InsertOrdered(common.CopyBytes(k), common.CopyBytes(v), func(node verkle.VerkleNode) { + rootHash := node.Commitment().Bytes() + encodedNode, err := node.Serialize() + if err != nil { + panic(err) + } + if err := verkleCollector.Collect(rootHash[:], encodedNode); err != nil { + panic(err) + } + select { + case <-logInterval.C: + log.Info("[Verkle] Assembling Verkle Tree", "key", common.Bytes2Hex(k)) + default: + } + }); err != nil { + return err + } + return next(k, nil, nil) + }, etl.TransformArgs{Quit: context.Background().Done()}); err != nil { + return common.Hash{}, err + } + + // Flush the rest all at once + if err := collectVerkleNode(v.collector, root, logInterval, nil); err != nil { + return common.Hash{}, err + } + + log.Info("Started Verkle Tree Flushing") + return root.Commitment().Bytes(), verkleCollector.Load(v.db, kv.VerkleTrie, etl.IdentityLoadFunc, etl.TransformArgs{Quit: context.Background().Done(), + LogDetailsLoad: func(k, v []byte) (additionalLogArguments []interface{}) { + return []interface{}{"key", common.Bytes2Hex(k)} + }}) +} + +func (v *VerkleTreeWriter) CommitVerkleTree(root common.Hash) (common.Hash, error) { + resolverFunc := func(root []byte) ([]byte, error) { + return v.db.GetOne(kv.VerkleTrie, root) + } + + var rootNode verkle.VerkleNode + var err error + if root != (common.Hash{}) { + rootNode, err = rawdb.ReadVerkleNode(v.db, root) + if err != nil { + return common.Hash{}, err + } + } else { + return v.CommitVerkleTreeFromScratch() // TODO(Giulio2002): ETL is buggy, go fix it >:(. + } + + verkleCollector := etl.NewCollector(kv.VerkleTrie, v.tmpdir, etl.NewSortableBuffer(etl.BufferOptimalSize)) + defer verkleCollector.Close() + + insertionBeforeFlushing := 2_000_000 // 2M node to flush at a time + insertions := 0 + logInterval := time.NewTicker(30 * time.Second) + if err := v.collector.Load(v.db, kv.VerkleTrie, func(key []byte, value []byte, _ etl.CurrentTableReader, next etl.LoadNextFunc) error { + if len(value) > 0 { + if err := rootNode.Insert(common.CopyBytes(key), common.CopyBytes(value), resolverFunc); err != nil { + return err + } + insertions++ + } + if insertions > insertionBeforeFlushing { + if err := flushVerkleNode(v.db, rootNode, logInterval, key); err != nil { + return err + } + insertions = 0 + } + return next(key, nil, nil) + }, etl.TransformArgs{Quit: context.Background().Done()}); err != nil { + return common.Hash{}, err + } + commitment := rootNode.Commitment().Bytes() + return common.BytesToHash(commitment[:]), flushVerkleNode(v.db, rootNode, logInterval, nil) +} + +func (v *VerkleTreeWriter) Close() { + v.collector.Close() +} diff --git a/cmd/verkle/verkletrie/workers.go b/cmd/verkle/verkletrie/workers.go new file mode 100644 index 00000000000..fbe40f0d582 --- /dev/null +++ b/cmd/verkle/verkletrie/workers.go @@ -0,0 +1,234 @@ +package verkletrie + +import ( + "context" + + "github.com/holiman/uint256" + "github.com/ledgerwatch/erigon/common" + "github.com/ledgerwatch/erigon/core/types/accounts" + "github.com/ledgerwatch/erigon/turbo/trie/vtree" +) + +type regeneratePedersenAccountsJob struct { + address common.Address + account accounts.Account + codeSize uint64 +} + +type regeneratePedersenAccountsOut struct { + versionHash common.Hash + address common.Address + account accounts.Account + codeSize uint64 +} + +type regeneratePedersenStorageJob struct { + storageVerkleKey common.Hash + storageKey *uint256.Int + storageValue []byte + address common.Address +} + +type regeneratePedersenCodeJob struct { + address common.Address + code []byte +} + +type regeneratePedersenCodeOut struct { + chunks [][]byte + address common.Address + chunksKeys [][]byte + codeSize int +} + +type regenerateIncrementalPedersenAccountsJob struct { + // Update + address common.Address + account accounts.Account + code []byte // New code + isContract bool + absentInState bool +} + +type regenerateIncrementalPedersenAccountsOut struct { + address common.Address + versionHash []byte + account accounts.Account + codeSize uint64 + codeChunks [][]byte + codeKeys [][]byte + isContract bool + absentInState bool + badKeys [][]byte +} + +const batchSize = 10000 + +func pedersenAccountWorker(ctx context.Context, logPrefix string, in chan *regeneratePedersenAccountsJob, out chan *regeneratePedersenAccountsOut) { + var job *regeneratePedersenAccountsJob + var ok bool + for { + select { + case job, ok = <-in: + if !ok { + return + } + if job == nil { + return + } + case <-ctx.Done(): + return + } + + // prevent sending to close channel + out <- ®eneratePedersenAccountsOut{ + versionHash: common.BytesToHash(vtree.GetTreeKeyVersion(job.address[:])), + account: job.account, + address: job.address, + codeSize: job.codeSize, + } + } +} + +func pedersenStorageWorker(ctx context.Context, logPrefix string, in, out chan *regeneratePedersenStorageJob) { + var job *regeneratePedersenStorageJob + var ok bool + for { + select { + case job, ok = <-in: + if !ok { + return + } + if job == nil { + return + } + case <-ctx.Done(): + return + } + out <- ®eneratePedersenStorageJob{ + storageVerkleKey: common.BytesToHash(vtree.GetTreeKeyStorageSlot(job.address[:], job.storageKey)), + storageKey: job.storageKey, + address: job.address, + storageValue: job.storageValue, + } + } +} + +func pedersenCodeWorker(ctx context.Context, logPrefix string, in chan *regeneratePedersenCodeJob, out chan *regeneratePedersenCodeOut) { + var job *regeneratePedersenCodeJob + var ok bool + for { + select { + case job, ok = <-in: + if !ok { + return + } + if job == nil { + return + } + case <-ctx.Done(): + return + } + + var chunks [][]byte + var chunkKeys [][]byte + if job.code == nil || len(job.code) == 0 { + out <- ®eneratePedersenCodeOut{ + chunks: chunks, + chunksKeys: chunkKeys, + codeSize: 0, + address: job.address, + } + } + // Chunkify contract code and build keys for each chunks and insert them in the tree + chunkedCode := vtree.ChunkifyCode(job.code) + offset := byte(0) + offsetOverflow := false + currentKey := vtree.GetTreeKeyCodeChunk(job.address[:], uint256.NewInt(0)) + // Write code chunks + for i := 0; i < len(chunkedCode); i += 32 { + chunks = append(chunks, common.CopyBytes(chunkedCode[i:i+32])) + if currentKey[31]+offset < currentKey[31] || offsetOverflow { + currentKey = vtree.GetTreeKeyCodeChunk(job.address[:], uint256.NewInt(uint64(i)/32)) + chunkKeys = append(chunkKeys, common.CopyBytes(currentKey)) + offset = 1 + offsetOverflow = false + } else { + codeKey := common.CopyBytes(currentKey) + codeKey[31] += offset + chunkKeys = append(chunkKeys, common.CopyBytes(codeKey)) + offset += 1 + // If offset overflows, handle it. + offsetOverflow = offset == 0 + } + } + out <- ®eneratePedersenCodeOut{ + chunks: chunks, + chunksKeys: chunkKeys, + codeSize: len(job.code), + address: job.address, + } + } +} + +func incrementalAccountWorker(ctx context.Context, logPrefix string, in chan *regenerateIncrementalPedersenAccountsJob, out chan *regenerateIncrementalPedersenAccountsOut) { + var job *regenerateIncrementalPedersenAccountsJob + var ok bool + for { + select { + case job, ok = <-in: + if !ok { + return + } + if job == nil { + return + } + case <-ctx.Done(): + return + } + versionKey := common.BytesToHash(vtree.GetTreeKeyVersion(job.address[:])) + if job.absentInState { + out <- ®enerateIncrementalPedersenAccountsOut{ + versionHash: versionKey[:], + isContract: job.isContract, + absentInState: job.absentInState, + } + continue + } + + var chunks [][]byte + var chunkKeys [][]byte + // Chunkify contract code and build keys for each chunks and insert them in the tree + chunkedCode := vtree.ChunkifyCode(job.code) + offset := byte(0) + offsetOverflow := false + currentKey := vtree.GetTreeKeyCodeChunk(job.address[:], uint256.NewInt(0)) + // Write code chunks + for i := 0; i < len(chunkedCode); i += 32 { + chunks = append(chunks, common.CopyBytes(chunkedCode[i:i+32])) + codeKey := common.CopyBytes(currentKey) + if currentKey[31]+offset < currentKey[31] || offsetOverflow { + currentKey = vtree.GetTreeKeyCodeChunk(job.address[:], uint256.NewInt(uint64(i)/32)) + chunkKeys = append(chunkKeys, codeKey) + offset = 1 + offsetOverflow = false + } else { + codeKey[31] += offset + chunkKeys = append(chunkKeys, codeKey) + offset += 1 + // If offset overflows, handle it. + offsetOverflow = offset == 0 + } + } + out <- ®enerateIncrementalPedersenAccountsOut{ + versionHash: versionKey[:], + account: job.account, + codeSize: uint64(len(job.code)), + codeChunks: chunks, + codeKeys: chunkKeys, + absentInState: job.absentInState, + isContract: job.isContract, + address: job.address, + } + } +} diff --git a/common/bitutil/compress_test.go b/common/bitutil/compress_test.go index 5ebb8c3fad3..d797498ca3a 100644 --- a/common/bitutil/compress_test.go +++ b/common/bitutil/compress_test.go @@ -21,6 +21,8 @@ import ( "math/rand" "testing" + "github.com/ledgerwatch/log/v3" + "github.com/ledgerwatch/erigon/common/hexutil" ) @@ -176,6 +178,9 @@ func benchmarkEncoding(b *testing.B, bytes int, fill float64) { b.ResetTimer() b.ReportAllocs() for i := 0; i < b.N; i++ { - bitsetDecodeBytes(bitsetEncodeBytes(data), len(data)) + _, decodeErr := bitsetDecodeBytes(bitsetEncodeBytes(data), len(data)) + if decodeErr != nil { + log.Warn("Failed to decode bitset bytes", "err", decodeErr) + } } } diff --git a/common/changeset/storage_changeset.go b/common/changeset/storage_changeset.go index 1cd78aa461f..f491783b12a 100644 --- a/common/changeset/storage_changeset.go +++ b/common/changeset/storage_changeset.go @@ -101,7 +101,7 @@ func RewindData(db kv.Tx, timestampSrc, timestampDst uint64, changes *etl.Collec } func walkAndCollect(collectorFunc func([]byte, []byte) error, db kv.Tx, bucket string, timestampDst, timestampSrc uint64, quit <-chan struct{}) error { - return ForRange(db, bucket, timestampDst, timestampSrc+1, func(_ uint64, k, v []byte) error { + return ForRange(db, bucket, timestampDst, timestampSrc+1, func(bl uint64, k, v []byte) error { if err := libcommon.Stopped(quit); err != nil { return err } diff --git a/common/compiler/solidity.go b/common/compiler/solidity.go index 0e50cd18a86..a8966bec375 100644 --- a/common/compiler/solidity.go +++ b/common/compiler/solidity.go @@ -25,6 +25,8 @@ import ( "os/exec" "strconv" "strings" + + "github.com/ledgerwatch/log/v3" ) // Solidity contains information about the solidity compiler. @@ -165,9 +167,16 @@ func ParseCombinedJSON(combinedJSON []byte, source string, languageVersion strin if err := json.Unmarshal([]byte(info.Abi), &abi); err != nil { return nil, fmt.Errorf("solc: error reading abi definition (%w)", err) } + var userdoc, devdoc interface{} - json.Unmarshal([]byte(info.Userdoc), &userdoc) - json.Unmarshal([]byte(info.Devdoc), &devdoc) + marshalErr := json.Unmarshal([]byte(info.Userdoc), &userdoc) + if marshalErr != nil { + log.Warn("Failed to unmarshal info.Devdoc", "", marshalErr) + } + marshalErr = json.Unmarshal([]byte(info.Devdoc), &devdoc) + if marshalErr != nil { + log.Warn("Failed to unmarshal info.Devdoc", "", marshalErr) + } contracts[name] = &Contract{ Code: "0x" + info.Bin, diff --git a/common/dbutils/history_index.go b/common/dbutils/history_index.go index 4853a1efbc6..7794a11bfde 100644 --- a/common/dbutils/history_index.go +++ b/common/dbutils/history_index.go @@ -14,7 +14,6 @@ func AccountIndexChunkKey(key []byte, blockNumber uint64) []byte { return blockNumBytes } -// func StorageIndexChunkKey(key []byte, blockNumber uint64) []byte { //remove incarnation and add block number blockNumBytes := make([]byte, common.AddressLength+common.HashLength+8) diff --git a/common/dbutils/suffix_type.go b/common/dbutils/suffix_type.go index 8443dc661f9..634e61e6de9 100644 --- a/common/dbutils/suffix_type.go +++ b/common/dbutils/suffix_type.go @@ -5,7 +5,7 @@ import "encoding/binary" type Suffix []byte func ToSuffix(b []byte) Suffix { - return Suffix(b) + return b } func (s Suffix) Add(key []byte) Suffix { @@ -20,7 +20,7 @@ func (s Suffix) Add(key []byte) Suffix { binary.BigEndian.PutUint32(dv, 1+s.KeyCount()) // Increment the counter of keys dv[l] = byte(len(key)) copy(dv[l+1:], key) - return Suffix(dv) + return dv } func (s Suffix) MultiAdd(keys [][]byte) Suffix { var l int @@ -43,7 +43,7 @@ func (s Suffix) MultiAdd(keys [][]byte) Suffix { copy(dv[i:], key) i += len(key) } - return Suffix(dv) + return dv } func (s Suffix) KeyCount() uint32 { diff --git a/common/debug/experiments.go b/common/debug/experiments.go index d9ce8558875..cf091ae659e 100644 --- a/common/debug/experiments.go +++ b/common/debug/experiments.go @@ -70,6 +70,8 @@ func SlowCommit() time.Duration { var ( stopBeforeStage string stopBeforeStageFlag sync.Once + stopAfterStage string + stopAfterStageFlag sync.Once ) func StopBeforeStage() string { @@ -82,3 +84,17 @@ func StopBeforeStage() string { stopBeforeStageFlag.Do(f) return stopBeforeStage } + +// TODO(allada) We should possibly consider removing `STOP_BEFORE_STAGE`, as `STOP_AFTER_STAGE` can +// perform all same the functionality, but due to reverse compatibility reasons we are going to +// leave it. +func StopAfterStage() string { + f := func() { + v, _ := os.LookupEnv("STOP_AFTER_STAGE") // see names in eth/stagedsync/stages/stages.go + if v != "" { + stopAfterStage = v + } + } + stopAfterStageFlag.Do(f) + return stopAfterStage +} diff --git a/common/debugprint/receipts.go b/common/debugprint/receipts.go index f21beda4747..ade3e65d1f0 100644 --- a/common/debugprint/receipts.go +++ b/common/debugprint/receipts.go @@ -6,7 +6,7 @@ import ( "github.com/ledgerwatch/erigon/core/types" ) -//nolint +// nolint func Transactions(ts1, ts2 types.Transactions) { fmt.Printf("==== Transactions ====\n") fmt.Printf("len(Transactions): %d, %d\n", len(ts1), len(ts2)) @@ -26,7 +26,7 @@ func Transactions(ts1, ts2 types.Transactions) { } } -//nolint +// nolint func Receipts(rs1, rs2 types.Receipts) { fmt.Printf("==== Receipts ====\n") fmt.Printf("len(Receipts): %d, %d\n", len(rs1), len(rs2)) @@ -75,7 +75,7 @@ func Receipts(rs1, rs2 types.Receipts) { } } -//nolint +// nolint func Headers(h1, h2 *types.Header) { fmt.Printf("==== Header ====\n") fmt.Printf("root: %x, %x\n", h1.Root, h2.Root) diff --git a/common/hexutil/hexutil.go b/common/hexutil/hexutil.go index ad59d2ff840..f74ea23db5d 100644 --- a/common/hexutil/hexutil.go +++ b/common/hexutil/hexutil.go @@ -18,7 +18,7 @@ Package hexutil implements hex encoding with 0x prefix. This encoding is used by the Ethereum RPC API to transport binary data in JSON payloads. -Encoding Rules +# Encoding Rules All hex data must have prefix "0x". diff --git a/common/math/big.go b/common/math/big.go index 29a2a8eb033..e95f7fa1cde 100644 --- a/common/math/big.go +++ b/common/math/big.go @@ -245,10 +245,10 @@ func U256Bytes(n *big.Int) []byte { // S256 interprets x as a two's complement number. // x must not exceed 256 bits (the result is undefined if it does) and is not modified. // -// S256(0) = 0 -// S256(1) = 1 -// S256(2**255) = -2**255 -// S256(2**256-1) = -1 +// S256(0) = 0 +// S256(1) = 1 +// S256(2**255) = -2**255 +// S256(2**256-1) = -1 func S256(x *big.Int) *big.Int { if x.Cmp(tt255) < 0 { return x diff --git a/common/math/integer.go b/common/math/integer.go index 2f6712951a6..00d7b798b5e 100644 --- a/common/math/integer.go +++ b/common/math/integer.go @@ -17,7 +17,10 @@ package math import ( + "crypto/rand" "fmt" + "math" + "math/big" "math/bits" "strconv" ) @@ -104,3 +107,11 @@ func AbsoluteDifference(x, y uint64) uint64 { } return y - x } + +func RandInt64() (int64, error) { + n, err := rand.Int(rand.Reader, big.NewInt(math.MaxInt64)) + if err != nil { + return 0, err + } + return n.Int64(), nil +} diff --git a/common/math/integer_test.go b/common/math/integer_test.go index ab3e0f4d878..3179aac130d 100644 --- a/common/math/integer_test.go +++ b/common/math/integer_test.go @@ -120,6 +120,6 @@ func TestMustParseUint64Panic(t *testing.T) { func TestAbsoluteDifference(t *testing.T) { x1 := uint64(99) x2 := uint64(45) - assert.Equal(t, AbsoluteDifference(x1, x2), uint64(x1-x2)) - assert.Equal(t, AbsoluteDifference(x2, x1), uint64(x1-x2)) + assert.Equal(t, AbsoluteDifference(x1, x2), x1-x2) + assert.Equal(t, AbsoluteDifference(x2, x1), x1-x2) } diff --git a/common/mclock/mclock.go b/common/mclock/mclock.go index c05738cf2bf..915081ff282 100644 --- a/common/mclock/mclock.go +++ b/common/mclock/mclock.go @@ -19,7 +19,6 @@ package mclock import ( "time" - _ "unsafe" // for go:linkname ) diff --git a/common/prque/lazyqueue.go b/common/prque/lazyqueue.go index 07e0bb7aa08..0c8903851bb 100644 --- a/common/prque/lazyqueue.go +++ b/common/prque/lazyqueue.go @@ -26,9 +26,10 @@ import ( // LazyQueue is a priority queue data structure where priorities can change over // time and are only evaluated on demand. // Two callbacks are required: -// - priority evaluates the actual priority of an item -// - maxPriority gives an upper estimate for the priority in any moment between -// now and the given absolute time +// - priority evaluates the actual priority of an item +// - maxPriority gives an upper estimate for the priority in any moment between +// now and the given absolute time +// // If the upper estimate is exceeded then Update should be called for that item. // A global Refresh function should also be called periodically. type LazyQueue struct { diff --git a/common/types_test.go b/common/types_test.go index 2f85b76a5af..39f4a31016f 100644 --- a/common/types_test.go +++ b/common/types_test.go @@ -28,8 +28,8 @@ import ( ) func TestBytesConversion(t *testing.T) { - bytes := []byte{5} - hash := BytesToHash(bytes) + byteSlice := []byte{5} + hash := BytesToHash(byteSlice) var exp Hash exp[31] = 5 diff --git a/consensus/aura/aura.go b/consensus/aura/aura.go index 67e08197318..9278b277e09 100644 --- a/consensus/aura/aura.go +++ b/consensus/aura/aura.go @@ -140,7 +140,7 @@ type PermissionedStep struct { type ReceivedStepHashes map[uint64]map[common.Address]common.Hash //BTreeMap<(u64, Address), H256> -//nolint +// nolint func (r ReceivedStepHashes) get(step uint64, author common.Address) (common.Hash, bool) { res, ok := r[step] if !ok { @@ -150,7 +150,7 @@ func (r ReceivedStepHashes) get(step uint64, author common.Address) (common.Hash return result, ok } -//nolint +// nolint func (r ReceivedStepHashes) insert(step uint64, author common.Address, blockHash common.Hash) { res, ok := r[step] if !ok { @@ -160,7 +160,7 @@ func (r ReceivedStepHashes) insert(step uint64, author common.Address, blockHash res[author] = blockHash } -//nolint +// nolint func (r ReceivedStepHashes) dropAncient(step uint64) { for i := range r { if i < step { @@ -169,7 +169,7 @@ func (r ReceivedStepHashes) dropAncient(step uint64) { } } -//nolint +// nolint type EpochManager struct { epochTransitionHash common.Hash // H256, epochTransitionNumber uint64 // BlockNumber @@ -188,7 +188,7 @@ func (e *EpochManager) noteNewEpoch() { e.force = true } // zoomValidators - Zooms to the epoch after the header with the given hash. Returns true if succeeded, false otherwise. // It's analog of zoom_to_after function in OE, but doesn't require external locking -//nolint +// nolint func (e *EpochManager) zoomToAfter(chain consensus.ChainHeaderReader, er consensus.EpochReader, validators ValidatorSet, hash common.Hash, call consensus.SystemCall) (*RollingFinality, uint64, bool) { var lastWasParent bool if e.finalityChecker.lastPushed != nil { @@ -246,12 +246,12 @@ func (e *EpochManager) zoomToAfter(chain consensus.ChainHeaderReader, er consens return e.finalityChecker, e.epochTransitionNumber, true } -/// Get the transition to the epoch the given parent hash is part of -/// or transitions to. -/// This will give the epoch that any children of this parent belong to. -/// -/// The block corresponding the the parent hash must be stored already. -//nolint +// / Get the transition to the epoch the given parent hash is part of +// / or transitions to. +// / This will give the epoch that any children of this parent belong to. +// / +// / The block corresponding the the parent hash must be stored already. +// nolint func epochTransitionFor2(chain consensus.ChainHeaderReader, e consensus.EpochReader, parentHash common.Hash) (transition EpochTransition, ok bool) { //TODO: probably this version of func doesn't support non-canonical epoch transitions h := chain.GetHeaderByHash(parentHash) @@ -268,7 +268,7 @@ func epochTransitionFor2(chain consensus.ChainHeaderReader, e consensus.EpochRea return EpochTransition{BlockNumber: num, BlockHash: hash, ProofRlp: transitionProof}, true } -//nolint +// nolint func epochTransitionFor(chain consensus.ChainHeaderReader, e consensus.EpochReader, parentHash common.Hash) (transition EpochTransition, ok bool) { // slow path: loop back block by block for { @@ -321,7 +321,7 @@ func epochTransitionFor(chain consensus.ChainHeaderReader, e consensus.EpochRead } // AuRa -//nolint +// nolint type AuRa struct { db kv.RwDB // Database to store and retrieve snapshot checkpoints exitCh chan struct{} @@ -417,7 +417,7 @@ func NewAuRa(config *params.AuRaConfig, db kv.RwDB, ourSigningAddress common.Add if auraParams.StartStep != nil { initialStep = *auraParams.StartStep } - var durations []StepDurationInfo + durations := make([]StepDurationInfo, 0, 1+len(auraParams.StepDurations)) durInfo := StepDurationInfo{ TransitionStep: 0, TransitionTimestamp: 0, @@ -522,7 +522,7 @@ func (c *AuRa) VerifyHeader(chain consensus.ChainHeaderReader, header *types.Hea return nil } -//nolint +// nolint func (c *AuRa) hasReceivedStepHashes(step uint64, author common.Address, newHash common.Hash) bool { /* self @@ -534,7 +534,7 @@ func (c *AuRa) hasReceivedStepHashes(step uint64, author common.Address, newHash return false } -//nolint +// nolint func (c *AuRa) insertReceivedStepHashes(step uint64, author common.Address, newHash common.Hash) { /* self.received_step_hashes @@ -543,7 +543,7 @@ func (c *AuRa) insertReceivedStepHashes(step uint64, author common.Address, newH */ } -//nolint +// nolint func (c *AuRa) verifyFamily(chain consensus.ChainHeaderReader, e consensus.EpochReader, header *types.Header, call consensus.Call, syscall consensus.SystemCall) error { // TODO: I call it from Initialize - because looks like no much reason to have separated "verifyFamily" call @@ -830,7 +830,7 @@ func (c *AuRa) Initialize(config *params.ChainConfig, chain consensus.ChainHeade } -//word `signal epoch` == word `pending epoch` +// word `signal epoch` == word `pending epoch` func (c *AuRa) Finalize(config *params.ChainConfig, header *types.Header, state *state.IntraBlockState, txs types.Transactions, uncles []*types.Header, receipts types.Receipts, e consensus.EpochReader, chain consensus.ChainHeaderReader, syscall consensus.SystemCall, @@ -1200,7 +1200,7 @@ func (c *AuRa) epochSet(chain consensus.ChainHeaderReader, e consensus.EpochRead return finalityChecker.signers, epochTransitionNumber, nil } -//nolint +// nolint func headerStep(current *types.Header) (val uint64, err error) { if len(current.Seal) < 1 { panic("was either checked with verify_block_basic or is genesis; has 2 fields; qed (Make sure the spec file has a correct genesis seal)") @@ -1235,7 +1235,8 @@ func (c *AuRa) CalcDifficulty(chain consensus.ChainHeaderReader, time, parentTim } // calculateScore - analog of PoW difficulty: -// sqrt(U256::max_value()) + parent_step - current_step + current_empty_steps +// +// sqrt(U256::max_value()) + parent_step - current_step + current_empty_steps func calculateScore(parentStep, currentStep, currentEmptySteps uint64) *uint256.Int { maxU128 := uint256.NewInt(0).SetAllOne() maxU128 = maxU128.Rsh(maxU128, 128) @@ -1268,7 +1269,7 @@ func (c *AuRa) APIs(chain consensus.ChainHeaderReader) []rpc.API { } } -//nolint +// nolint func (c *AuRa) emptySteps(fromStep, toStep uint64, parentHash common.Hash) []EmptyStep { from := EmptyStep{step: fromStep + 1, parentHash: parentHash} to := EmptyStep{step: toStep} @@ -1359,9 +1360,17 @@ func callBlockRewardAbi(contractAddr common.Address, syscall consensus.SystemCal if err != nil { panic(err) } - _ = res[0] - _ = res[1] - return nil, nil + beneficiariesRes := res[0].([]common.Address) + rewardsBig := res[1].([]*big.Int) + rewardsU256 := make([]*uint256.Int, len(rewardsBig)) + for i := 0; i < len(rewardsBig); i++ { + var overflow bool + rewardsU256[i], overflow = uint256.FromBig(rewardsBig[i]) + if overflow { + panic("Overflow in callBlockRewardAbi") + } + } + return beneficiariesRes, rewardsU256 } func blockRewardAbi() abi.ABI { @@ -1376,7 +1385,7 @@ func blockRewardAbi() abi.ABI { // the `parent_hash` in order to save space. The included signature is of the original empty step // message, which can be reconstructed by using the parent hash of the block in which this sealed // empty message is inc luded. -//nolint +// nolint type SealedEmptyStep struct { signature []byte // H520 step uint64 @@ -1423,12 +1432,12 @@ func headerEmptyStepsRaw(header *types.Header) []byte { // // An empty step message is created _instead of_ a block if there are no pending transactions. // It cannot itself be a parent, and `parent_hash` always points to the most recent block. E.g.: -// * Validator A creates block `bA`. -// * Validator B has no pending transactions, so it signs an empty step message `mB` -// instead whose hash points to block `bA`. -// * Validator C also has no pending transactions, so it also signs an empty step message `mC` -// instead whose hash points to block `bA`. -// * Validator D creates block `bD`. The parent is block `bA`, and the header includes `mB` and `mC`. +// - Validator A creates block `bA`. +// - Validator B has no pending transactions, so it signs an empty step message `mB` +// instead whose hash points to block `bA`. +// - Validator C also has no pending transactions, so it also signs an empty step message `mC` +// instead whose hash points to block `bA`. +// - Validator D creates block `bD`. The parent is block `bA`, and the header includes `mB` and `mC`. type EmptyStep struct { // The signature of the other two fields, by the message's author. signature []byte // H520 @@ -1480,7 +1489,7 @@ func (s *EmptyStep) verify(validators ValidatorSet) (bool, error) { //nolint return true, nil } -//nolint +// nolint func (s *EmptyStep) author() (common.Address, error) { sRlp, err := EmptyStepRlp(s.step, s.parentHash) if err != nil { @@ -1538,7 +1547,7 @@ func EmptyStepRlp(step uint64, parentHash common.Hash) ([]byte, error) { return rlp.EncodeToBytes(A{s: step, h: parentHash}) } -//nolint +// nolint type unAssembledHeader struct { hash common.Hash number uint64 @@ -1568,7 +1577,7 @@ func (u unAssembledHeaders) Front() *unAssembledHeader { // RollingFinality checker for authority round consensus. // Stores a chain of unfinalized hashes that can be pushed onto. -//nolint +// nolint type RollingFinality struct { headers unAssembledHeaders //nolint signers *SimpleList diff --git a/consensus/aura/aura_test.go b/consensus/aura/aura_test.go index a04eda65225..ab9c6e0bdbe 100644 --- a/consensus/aura/aura_test.go +++ b/consensus/aura/aura_test.go @@ -9,86 +9,90 @@ import ( "github.com/ledgerwatch/erigon-lib/kv/memdb" "github.com/ledgerwatch/erigon/common" "github.com/ledgerwatch/erigon/consensus/aura" + "github.com/ledgerwatch/erigon/consensus/aura/consensusconfig" "github.com/ledgerwatch/erigon/consensus/aura/test" "github.com/ledgerwatch/erigon/core" "github.com/ledgerwatch/erigon/core/rawdb" + "github.com/ledgerwatch/erigon/core/types" "github.com/ledgerwatch/erigon/core/types/accounts" "github.com/ledgerwatch/erigon/turbo/stages" + "github.com/ledgerwatch/erigon/turbo/trie" "github.com/stretchr/testify/require" ) /* - #[test] - fn block_reward_contract() { - let spec = Spec::new_test_round_block_reward_contract(); - let tap = Arc::new(AccountProvider::transient_provider()); - - let addr1 = tap.insert_account(keccak("1").into(), &"1".into()).unwrap(); - - let engine = &*spec.engine; - let genesis_header = spec.genesis_header(); - let db1 = spec - .ensure_db_good(get_temp_state_db(), &Default::default()) - .unwrap(); - let db2 = spec - .ensure_db_good(get_temp_state_db(), &Default::default()) - .unwrap(); - - let last_hashes = Arc::new(vec![genesis_header.hash()]); - - let client = generate_dummy_client_with_spec(Spec::new_test_round_block_reward_contract); - engine.register_client(Arc::downgrade(&client) as _); - - // step 2 - let b1 = OpenBlock::new( - engine, - Default::default(), - false, - db1, - &genesis_header, - last_hashes.clone(), - addr1, - (3141562.into(), 31415620.into()), - vec![], - false, - None, - ) - .unwrap(); - let b1 = b1.close_and_lock().unwrap(); - - // since the block is empty it isn't sealed and we generate empty steps - engine.set_signer(Some(Box::new((tap.clone(), addr1, "1".into())))); - assert_eq!(engine.generate_seal(&b1, &genesis_header), Seal::None); - engine.step(); - - // step 3 - // the signer of the accumulated empty step message should be rewarded - let b2 = OpenBlock::new( - engine, - Default::default(), - false, - db2, - &genesis_header, - last_hashes.clone(), - addr1, - (3141562.into(), 31415620.into()), - vec![], - false, - None, - ) - .unwrap(); - let addr1_balance = b2.state.balance(&addr1).unwrap(); - - // after closing the block `addr1` should be reward twice, one for the included empty step - // message and another for block creation - let b2 = b2.close_and_lock().unwrap(); - - // the contract rewards (1000 + kind) for each benefactor/reward kind - assert_eq!( - b2.state.balance(&addr1).unwrap(), - addr1_balance + (1000 + 0) + (1000 + 2), - ) - } +#[test] + + fn block_reward_contract() { + let spec = Spec::new_test_round_block_reward_contract(); + let tap = Arc::new(AccountProvider::transient_provider()); + + let addr1 = tap.insert_account(keccak("1").into(), &"1".into()).unwrap(); + + let engine = &*spec.engine; + let genesis_header = spec.genesis_header(); + let db1 = spec + .ensure_db_good(get_temp_state_db(), &Default::default()) + .unwrap(); + let db2 = spec + .ensure_db_good(get_temp_state_db(), &Default::default()) + .unwrap(); + + let last_hashes = Arc::new(vec![genesis_header.hash()]); + + let client = generate_dummy_client_with_spec(Spec::new_test_round_block_reward_contract); + engine.register_client(Arc::downgrade(&client) as _); + + // step 2 + let b1 = OpenBlock::new( + engine, + Default::default(), + false, + db1, + &genesis_header, + last_hashes.clone(), + addr1, + (3141562.into(), 31415620.into()), + vec![], + false, + None, + ) + .unwrap(); + let b1 = b1.close_and_lock().unwrap(); + + // since the block is empty it isn't sealed and we generate empty steps + engine.set_signer(Some(Box::new((tap.clone(), addr1, "1".into())))); + assert_eq!(engine.generate_seal(&b1, &genesis_header), Seal::None); + engine.step(); + + // step 3 + // the signer of the accumulated empty step message should be rewarded + let b2 = OpenBlock::new( + engine, + Default::default(), + false, + db2, + &genesis_header, + last_hashes.clone(), + addr1, + (3141562.into(), 31415620.into()), + vec![], + false, + None, + ) + .unwrap(); + let addr1_balance = b2.state.balance(&addr1).unwrap(); + + // after closing the block `addr1` should be reward twice, one for the included empty step + // message and another for block creation + let b2 = b2.close_and_lock().unwrap(); + + // the contract rewards (1000 + kind) for each benefactor/reward kind + assert_eq!( + b2.state.balance(&addr1).unwrap(), + addr1_balance + (1000 + 0) + (1000 + 2), + ) + } */ func TestRewardContract(t *testing.T) { t.Skip("not ready yet") @@ -187,3 +191,46 @@ func TestRewardContract(t *testing.T) { ) */ } + +// Check that the first block of Gnosis Chain, which doesn't have any transactions, +// does not change the state root. +func TestEmptyBlock(t *testing.T) { + types.SetHeaderSealFlag(true) + defer types.SetHeaderSealFlag(false) + + require := require.New(t) + genesis := core.DefaultGnosisGenesisBlock() + genesisBlock, _, err := genesis.ToBlock() + require.NoError(err) + + chainConfig := genesis.Config + auraDB := memdb.NewTestDB(t) + engine, err := aura.NewAuRa(chainConfig.Aura, auraDB, chainConfig.Aura.Etherbase, consensusconfig.GetConfigByChain(chainConfig.ChainName)) + require.NoError(err) + m := stages.MockWithGenesisEngine(t, genesis, engine, false) + + time := uint64(1539016985) + header := core.MakeEmptyHeader(genesisBlock.Header(), chainConfig, time, nil) + header.UncleHash = types.EmptyUncleHash + header.TxHash = trie.EmptyRoot + header.ReceiptHash = trie.EmptyRoot + header.Coinbase = common.HexToAddress("0xcace5b3c29211740e595850e80478416ee77ca21") + header.Difficulty = engine.CalcDifficulty(nil, time, + 0, + genesisBlock.Difficulty(), + genesisBlock.NumberU64(), + genesisBlock.Hash(), + genesisBlock.UncleHash(), + genesisBlock.Seal(), + ) + + block := types.NewBlockWithHeader(header) + + headers, blocks, receipts := make([]*types.Header, 1), make(types.Blocks, 1), make([]types.Receipts, 1) + headers[0] = header + blocks[0] = block + + chain := &core.ChainPack{Headers: headers, Blocks: blocks, Receipts: receipts, TopBlock: block} + err = m.InsertChain(chain) + require.NoError(err) +} diff --git a/consensus/aura/aurainterfaces/interface.go b/consensus/aura/aurainterfaces/interface.go index 367e3465180..0f2af178a33 100644 --- a/consensus/aura/aurainterfaces/interface.go +++ b/consensus/aura/aurainterfaces/interface.go @@ -45,7 +45,7 @@ type SealRegular SealKind // Engine does not generate seal for this block right now. type None SealKind -/// The type of sealing the engine is currently able to perform. +// / The type of sealing the engine is currently able to perform. type SealingState uint8 const ( diff --git a/consensus/aura/config.go b/consensus/aura/config.go index 9015821c69c..1e9f522ab7d 100644 --- a/consensus/aura/config.go +++ b/consensus/aura/config.go @@ -14,7 +14,7 @@ // You should have received a copy of the GNU Lesser General Public License // along with the go-ethereum library. If not, see . -// Package clique implements the proof-of-authority consensus engine. +// Package aura implements the proof-of-authority consensus engine. package aura import ( @@ -42,7 +42,7 @@ type ValidatorSetJson struct { List []common.Address `json:"list"` // Address of a contract that indicates the list of authorities. SafeContract *common.Address `json:"safeContract"` - // Address of a contract that indicates the list of authorities and enables reporting of theor misbehaviour using transactions. + // Address of a contract that indicates the list of authorities and enables reporting of their misbehaviour using transactions. Contract *common.Address `json:"contract"` // A map of starting blocks for each validator set. Multi map[uint64]*ValidatorSetJson `json:"multi"` @@ -57,8 +57,8 @@ func newValidatorSetFromJson(j *ValidatorSetJson, posdaoTransition *uint64) Vali } if j.Contract != nil { return &ValidatorContract{ - contractAddress: *j.SafeContract, - validators: ValidatorSafeContract{contractAddress: *j.SafeContract, posdaoTransition: posdaoTransition}, + contractAddress: *j.Contract, + validators: ValidatorSafeContract{contractAddress: *j.Contract, posdaoTransition: posdaoTransition}, posdaoTransition: posdaoTransition, } } @@ -73,7 +73,7 @@ func newValidatorSetFromJson(j *ValidatorSetJson, posdaoTransition *uint64) Vali return nil } -//TODO: StepDuration and BlockReward - now are uint64, but it can be an object in non-sokol consensus +// TODO: StepDuration and BlockReward - now are uint64, but it can be an object in non-sokol consensus type JsonSpec struct { StepDuration *uint64 `json:"stepDuration"` // Block duration, in seconds. Validators *ValidatorSetJson `json:"validators"` // Valid authorities diff --git a/consensus/aura/consensusconfig/poagnosis.json b/consensus/aura/consensusconfig/poagnosis.json index e5474089ee5..faa3103f5a3 100644 --- a/consensus/aura/consensusconfig/poagnosis.json +++ b/consensus/aura/consensusconfig/poagnosis.json @@ -6,20 +6,25 @@ "validators": { "multi": { "0": { - "safeContract": "0xcace5b3c29211740e595850e80478416ee77ca21" + "list": [ + "0xcace5b3c29211740e595850e80478416ee77ca21" + ] }, "1300": { "safeContract": "0x22e1229a2c5b95a60983b5577f745a603284f535" }, "9186425": { - "safeContract": "0xB87BE9f7196F2AE084Ca1DE6af5264292976e013" + "contract": "0xB87BE9f7196F2AE084Ca1DE6af5264292976e013" } } }, "blockRewardContractAddress": "0x867305d19606aadba405ce534e303d0e225f9556", "blockRewardContractTransition": 1310, + "blockRewardContractTransitions": { + "9186425": "0x481c034c6d9441db23ea48de68bcae812c5d39ba" + }, "randomnessContractAddress": { "9186425": "0x5870b0527DeDB1cFBD9534343Feda1a41Ce47766" - } + }, + "posdaoTransition": 9186425 } - diff --git a/consensus/aura/validators.go b/consensus/aura/validators.go index bab7e6759c6..c80bc5c1e69 100644 --- a/consensus/aura/validators.go +++ b/consensus/aura/validators.go @@ -22,7 +22,7 @@ import ( "go.uber.org/atomic" ) -//nolint +// nolint type CallResults struct { data []byte proof [][]byte @@ -192,7 +192,7 @@ func count(s ValidatorSet, h common.Hash, call consensus.Call) (uint64, error) { return s.countWithCaller(h, call) } -//nolint +// nolint type MultiItem struct { num uint64 hash common.Hash @@ -274,7 +274,7 @@ func (s *Multi) onCloseBlock(header *types.Header, address common.Address) error } // TODO: do we need add `proof` argument? -//nolint +// nolint func (s *Multi) epochSet(firstInEpoch bool, num uint64, proof []byte, call consensus.SystemCall) (SimpleList, common.Hash, error) { setBlock, set := s.correctSetByNumber(num) firstInEpoch = setBlock == num @@ -333,20 +333,20 @@ func NewSimpleList(validators []common.Address) *SimpleList { return &SimpleList{validators: validators} } -//nolint +// nolint type ReportQueueItem struct { addr common.Address blockNum uint64 data []byte } -//nolint +// nolint type ReportQueue struct { mu sync.RWMutex list *list.List } -//nolint +// nolint func (q *ReportQueue) push(addr common.Address, blockNum uint64, data []byte) { q.mu.Lock() defer q.mu.Unlock() @@ -354,7 +354,7 @@ func (q *ReportQueue) push(addr common.Address, blockNum uint64, data []byte) { } // Filters reports of validators that have already been reported or are banned. -//nolint +// nolint func (q *ReportQueue) filter(abi aurainterfaces.ValidatorSetABI, client client, ourAddr, contractAddr common.Address) error { q.mu.Lock() defer q.mu.Unlock() @@ -384,7 +384,7 @@ func (q *ReportQueue) filter(abi aurainterfaces.ValidatorSetABI, client client, } // Removes reports from the queue if it contains more than `MAX_QUEUED_REPORTS` entries. -//nolint +// nolint func (q *ReportQueue) truncate() { // The maximum number of reports to keep queued. const MaxQueuedReports = 10 @@ -405,7 +405,7 @@ func (q *ReportQueue) truncate() { } // The validator contract should have the following interface: -//nolint +// nolint type ValidatorSafeContract struct { contractAddress common.Address validators *lru.Cache // RwLock>, @@ -439,7 +439,7 @@ func NewValidatorSafeContract(contractAddress common.Address, posdaoTransition * // but with the same parameters. // // Returns a list of contract calls to be pushed onto the new block. -//func generateEngineTransactions(_firstInEpoch bool, _header *types.Header, _call SystemCall) -> Result, EthcoreError> +// func generateEngineTransactions(_firstInEpoch bool, _header *types.Header, _call SystemCall) -> Result, EthcoreError> func (s *ValidatorSafeContract) epochSet(firstInEpoch bool, num uint64, setProof []byte, call consensus.SystemCall) (SimpleList, common.Hash, error) { if firstInEpoch { var proof FirstValidatorSetProof @@ -512,7 +512,7 @@ func (s *ValidatorSafeContract) epochSet(firstInEpoch bool, num uint64, setProof } // check a first proof: fetch the validator set at the given block. -//nolint +// nolint func checkFirstValidatorSetProof(contract_address common.Address, oldHeader *types.Header, dbItems [][]byte) ([]common.Address, error) { /* fn check_first_proof( @@ -579,7 +579,7 @@ func checkFirstValidatorSetProof(contract_address common.Address, oldHeader *typ // inter-contract proofs are a header and receipts. // checking will involve ensuring that the receipts match the header and // extracting the validator set from the receipts. -//nolint +// nolint func (s *ValidatorSafeContract) defaultCaller(blockHash common.Hash) (Call, error) { return func(addr common.Address, data []byte) (CallResults, error) { return s.client.CallAtBlockHash(blockHash, addr, data) @@ -650,18 +650,18 @@ func (s *ValidatorSafeContract) genesisEpochData(header *types.Header, call cons } func (s *ValidatorSafeContract) onEpochBegin(firstInEpoch bool, header *types.Header, caller consensus.SystemCall) error { - data := common.FromHex("75286211") + data := common.FromHex("75286211") // s.abi.Pack("finalizeChange") _, err := caller(s.contractAddress, data) if err != nil { return err } /* - let data = validator_set::functions::finalize_change::encode_input(); - caller(self.contract_address, data) - .map(|_| ()) - .map_err(::engines::EngineError::FailedSystemCall) - .map_err(Into::into) + let data = validator_set::functions::finalize_change::encode_input(); + caller(self.contract_address, data) + .map(|_| ()) + .map_err(::engines::EngineError::FailedSystemCall) + .map_err(Into::into) */ return nil } diff --git a/consensus/bor/api.go b/consensus/bor/api.go index 1801c385c3a..ff33958e057 100644 --- a/consensus/bor/api.go +++ b/consensus/bor/api.go @@ -132,7 +132,7 @@ func (api *API) GetRootHash(start uint64, end uint64) (string, error) { if root, known := api.rootHashCache.Get(key); known { return root.(string), nil } - length := uint64(end - start + 1) + length := end - start + 1 if length > MaxCheckpointLength { return "", &MaxCheckpointLengthExceededError{start, end} } @@ -147,7 +147,7 @@ func (api *API) GetRootHash(start uint64, end uint64) (string, error) { wg.Add(1) concurrent <- true go func(number uint64) { - blockHeaders[number-start] = api.chain.GetHeaderByNumber(uint64(number)) + blockHeaders[number-start] = api.chain.GetHeaderByNumber(number) <-concurrent wg.Done() }(i) diff --git a/consensus/bor/bor.go b/consensus/bor/bor.go index 18e28a3f6b6..229dc18e0d4 100644 --- a/consensus/bor/bor.go +++ b/consensus/bor/bor.go @@ -554,11 +554,14 @@ func (c *Bor) snapshot(chain consensus.ChainHeaderReader, number uint64, hash co return nil, err } c.recents.Add(snap.Hash, snap) - // We've generated a new checkpoint snapshot, save to disk - if err = snap.store(c.DB); err != nil { - return nil, err + + if snap.Number%checkpointInterval == 0 { + // We've generated a new checkpoint snapshot, save to disk + if err = snap.store(c.DB); err != nil { + return nil, err + } + log.Trace("Stored snapshot to disk", "number", snap.Number, "hash", snap.Hash) } - log.Trace("Stored snapshot to disk", "number", snap.Number, "hash", snap.Hash) } if cont { snap = nil @@ -1081,7 +1084,7 @@ func (c *Bor) getSpanForBlock(blockNum uint64) (*HeimdallSpan, error) { } else { for span.StartBlock > blockNum { // Span wit low enough block number is not loaded - var spanID uint64 = span.ID - 1 + var spanID = span.ID - 1 var heimdallSpan HeimdallSpan log.Info("Span with low enough block number is not loaded", "fetching span", spanID) response, err := c.HeimdallClient.FetchWithRetry(c.execCtx, fmt.Sprintf("bor/span/%d", spanID), "") @@ -1137,7 +1140,7 @@ func (c *Bor) fetchAndCommitSpan( } // get validators bytes - var validators []MinimalVal + validators := make([]MinimalVal, 0, len(heimdallSpan.ValidatorSet.Validators)) for _, val := range heimdallSpan.ValidatorSet.Validators { validators = append(validators, val.MinimalVal()) } @@ -1147,7 +1150,7 @@ func (c *Bor) fetchAndCommitSpan( } // get producers bytes - var producers []MinimalVal + producers := make([]MinimalVal, 0, len(heimdallSpan.SelectedProducers)) for _, val := range heimdallSpan.SelectedProducers { producers = append(producers, val.MinimalVal()) } @@ -1330,7 +1333,7 @@ func getUpdatedValidatorSet(oldValidatorSet *ValidatorSet, newVals []*Validator) v := oldValidatorSet oldVals := v.Validators - var changes []*Validator + changes := make([]*Validator, 0, len(oldVals)) for _, ov := range oldVals { if f, ok := validatorContains(newVals, ov); ok { ov.VotingPower = f.VotingPower diff --git a/consensus/bor/merkle.go b/consensus/bor/merkle.go index 68244f8d080..ba00bc53e8f 100644 --- a/consensus/bor/merkle.go +++ b/consensus/bor/merkle.go @@ -36,8 +36,8 @@ func ConvertTo32(input []byte) (output [32]byte, err error) { return } -func Convert(input []([32]byte)) [][]byte { - var output [][]byte +func Convert(input [][32]byte) [][]byte { + output := make([][]byte, 0, len(input)) for _, in := range input { newInput := make([]byte, len(in[:])) copy(newInput, in[:]) diff --git a/consensus/bor/rest.go b/consensus/bor/rest.go index 0a9954f84f4..5b133aba0af 100644 --- a/consensus/bor/rest.go +++ b/consensus/bor/rest.go @@ -39,7 +39,7 @@ func NewHeimdallClient(urlString string) (*HeimdallClient, error) { h := &HeimdallClient{ urlString: urlString, client: http.Client{ - Timeout: time.Duration(5 * time.Second), + Timeout: 5 * time.Second, }, } return h, nil diff --git a/consensus/bor/validator.go b/consensus/bor/validator.go index 11e3dfb1e7e..edfa2f4e593 100644 --- a/consensus/bor/validator.go +++ b/consensus/bor/validator.go @@ -110,7 +110,7 @@ func (v *Validator) MinimalVal() MinimalVal { // ParseValidators returns validator set bytes func ParseValidators(validatorsBytes []byte) ([]*Validator, error) { if len(validatorsBytes)%40 != 0 { - return nil, errors.New("Invalid validators bytes") + return nil, errors.New("invalid validators bytes") } result := make([]*Validator, len(validatorsBytes)/40) diff --git a/consensus/bor/validator_set.go b/consensus/bor/validator_set.go index c1b7f181034..93c341f7d94 100644 --- a/consensus/bor/validator_set.go +++ b/consensus/bor/validator_set.go @@ -577,14 +577,15 @@ func (vals *ValidatorSet) updateWithChangeSet(changes []*Validator, allowDeletes // UpdateWithChangeSet attempts to update the validator set with 'changes'. // It performs the following steps: -// - validates the changes making sure there are no duplicates and splits them in updates and deletes -// - verifies that applying the changes will not result in errors -// - computes the total voting power BEFORE removals to ensure that in the next steps the priorities -// across old and newly added validators are fair -// - computes the priorities of new validators against the final set -// - applies the updates against the validator set -// - applies the removals against the validator set -// - performs scaling and centering of priority values +// - validates the changes making sure there are no duplicates and splits them in updates and deletes +// - verifies that applying the changes will not result in errors +// - computes the total voting power BEFORE removals to ensure that in the next steps the priorities +// across old and newly added validators are fair +// - computes the priorities of new validators against the final set +// - applies the updates against the validator set +// - applies the removals against the validator set +// - performs scaling and centering of priority values +// // If an error is detected during verification steps, it is returned and the validator set // is not changed. func (vals *ValidatorSet) UpdateWithChangeSet(changes []*Validator) error { diff --git a/consensus/clique/clique.go b/consensus/clique/clique.go index cd61882a551..1ed5bfa0248 100644 --- a/consensus/clique/clique.go +++ b/consensus/clique/clique.go @@ -310,7 +310,7 @@ func (c *Clique) Prepare(chain consensus.ChainHeaderReader, header *types.Header } // If there's pending proposals, cast a vote on them if len(addresses) > 0 { - header.Coinbase = addresses[rand.Intn(len(addresses))] + header.Coinbase = addresses[rand.Intn(len(addresses))] // nolint: gosec if c.proposals[header.Coinbase] { copy(header.Nonce[:], NonceAuthVote) } else { @@ -438,7 +438,7 @@ func (c *Clique) Seal(chain consensus.ChainHeaderReader, block *types.Block, res if header.Difficulty.Cmp(diffNoTurn) == 0 { // It's not our turn explicitly to sign, delay it a bit wiggle := time.Duration(len(snap.Signers)/2+1) * wiggleTime - delay += time.Duration(rand.Int63n(int64(wiggle))) + delay += time.Duration(rand.Int63n(int64(wiggle))) // nolint: gosec log.Trace("Out-of-turn signing requested", "wiggle", common.PrettyDuration(wiggle)) } diff --git a/consensus/db/db.go b/consensus/db/db.go index d5b6f45e8f2..638adce2e27 100644 --- a/consensus/db/db.go +++ b/consensus/db/db.go @@ -6,8 +6,11 @@ import ( "github.com/ledgerwatch/log/v3" ) -func OpenDatabase(path string, logger log.Logger, inmem bool) kv.RwDB { +func OpenDatabase(path string, logger log.Logger, inmem bool, readonly bool) kv.RwDB { opts := mdbx.NewMDBX(logger).Label(kv.ConsensusDB) + if readonly { + opts = opts.Readonly() + } if inmem { opts = opts.InMem() } else { diff --git a/consensus/ethash/algorithm.go b/consensus/ethash/algorithm.go index e8f342b9198..51ab8d58e77 100644 --- a/consensus/ethash/algorithm.go +++ b/consensus/ethash/algorithm.go @@ -114,8 +114,14 @@ func makeHasher(h hash.Hash) hasher { outputLen := rh.Size() return func(dest []byte, data []byte) { rh.Reset() - rh.Write(data) - rh.Read(dest[:outputLen]) + _, writeErr := rh.Write(data) + if writeErr != nil { + log.Warn("Failed to write data", "err", writeErr) + } + _, readErr := rh.Read(dest[:outputLen]) + if readErr != nil { + log.Warn("Failed to read data", "err", readErr) + } } } @@ -132,9 +138,15 @@ func seedHash(block uint64) []byte { for i := 0; i < int(block/epochLength); i++ { h.Sha.Reset() //nolint:errcheck - h.Sha.Write(seed) + _, writeErr := h.Sha.Write(seed) + if writeErr != nil { + log.Warn("Failed to write data", "err", writeErr) + } //nolint:errcheck - h.Sha.Read(seed) + _, readErr := h.Sha.Read(seed) + if readErr != nil { + log.Warn("Failed to read data", "err", readErr) + } } common.ReturnHasherToPool(h) diff --git a/consensus/ethash/api.go b/consensus/ethash/api.go index 64eba4bab02..16d2747d97d 100644 --- a/consensus/ethash/api.go +++ b/consensus/ethash/api.go @@ -34,10 +34,11 @@ type API struct { // GetWork returns a work package for external miner. // // The work package consists of 3 strings: -// result[0] - 32 bytes hex encoded current block header pow-hash -// result[1] - 32 bytes hex encoded seed hash used for DAG -// result[2] - 32 bytes hex encoded boundary condition ("target"), 2^256/difficulty -// result[3] - hex encoded block number +// +// result[0] - 32 bytes hex encoded current block header pow-hash +// result[1] - 32 bytes hex encoded seed hash used for DAG +// result[2] - 32 bytes hex encoded boundary condition ("target"), 2^256/difficulty +// result[3] - hex encoded block number func (api *API) GetWork() ([4]string, error) { if api.ethash.remote == nil { return [4]string{}, errors.New("not supported") diff --git a/consensus/ethash/ethash.go b/consensus/ethash/ethash.go index 042ca99f0b4..3c2666ad659 100644 --- a/consensus/ethash/ethash.go +++ b/consensus/ethash/ethash.go @@ -35,6 +35,7 @@ import ( "github.com/hashicorp/golang-lru/simplelru" "github.com/ledgerwatch/erigon/common/debug" + cmath "github.com/ledgerwatch/erigon/common/math" "github.com/ledgerwatch/erigon/consensus" "github.com/ledgerwatch/erigon/metrics" "github.com/ledgerwatch/erigon/rpc" @@ -135,8 +136,12 @@ func memoryMapAndGenerate(path string, size uint64, lock bool, generator func(bu if err := os.MkdirAll(filepath.Dir(path), 0755); err != nil { return nil, nil, nil, err } + suffix, err := cmath.RandInt64() + if err != nil { + return nil, nil, nil, fmt.Errorf("failed to get random integer: %v", err) + } // Create a huge temporary empty file to fill with data - temp := path + "." + strconv.Itoa(rand.Int()) + temp := path + "." + strconv.Itoa(int(suffix)) dump, err := os.Create(temp) if err != nil { diff --git a/consensus/ethash/sealer.go b/consensus/ethash/sealer.go index ce143340ba3..4c655b03f33 100644 --- a/consensus/ethash/sealer.go +++ b/consensus/ethash/sealer.go @@ -60,7 +60,7 @@ func (ethash *Ethash) Seal(chain consensus.ChainHeaderReader, block *types.Block ethash.lock.Unlock() return err } - ethash.rand = rand.New(rand.NewSource(seed.Int64())) + ethash.rand = rand.New(rand.NewSource(seed.Int64())) // nolint } ethash.lock.Unlock() // Push new work to remote sealer @@ -220,10 +220,11 @@ func (s *remoteSealer) loop() { // makeWork creates a work package for external miner. // // The work package consists of 3 strings: -// result[0], 32 bytes hex encoded current block header pow-hash -// result[1], 32 bytes hex encoded seed hash used for DAG -// result[2], 32 bytes hex encoded boundary condition ("target"), 2^256/difficulty -// result[3], hex encoded block number +// +// result[0], 32 bytes hex encoded current block header pow-hash +// result[1], 32 bytes hex encoded seed hash used for DAG +// result[2], 32 bytes hex encoded boundary condition ("target"), 2^256/difficulty +// result[3], hex encoded block number func (s *remoteSealer) makeWork(block *types.Block) { hash := s.ethash.SealHash(block.Header()) s.currentWork[0] = hash.Hex() diff --git a/consensus/misc/dao.go b/consensus/misc/dao.go index a13dbbd4d9c..4fb068d8483 100644 --- a/consensus/misc/dao.go +++ b/consensus/misc/dao.go @@ -42,10 +42,11 @@ var ( // ensure it conforms to DAO hard-fork rules. // // DAO hard-fork extension to the header validity: -// a) if the node is no-fork, do not accept blocks in the [fork, fork+10) range -// with the fork specific extra-data set -// b) if the node is pro-fork, require blocks in the specific range to have the -// unique extra-data set. +// +// a) if the node is no-fork, do not accept blocks in the [fork, fork+10) range +// with the fork specific extra-data set +// b) if the node is pro-fork, require blocks in the specific range to have the +// unique extra-data set. func VerifyDAOHeaderExtraData(config *params.ChainConfig, header *types.Header) error { // Short circuit validation if the node doesn't care about the DAO fork if config.DAOForkBlock == nil { diff --git a/consensus/misc/eip1559_test.go b/consensus/misc/eip1559_test.go index e1c7c96d530..1d64ff833cf 100644 --- a/consensus/misc/eip1559_test.go +++ b/consensus/misc/eip1559_test.go @@ -29,37 +29,41 @@ import ( // do not use e.g. SetInt() on the numbers. For testing only func copyConfig(original *params.ChainConfig) *params.ChainConfig { return ¶ms.ChainConfig{ - ChainName: original.ChainName, - ChainID: original.ChainID, - Consensus: original.Consensus, - HomesteadBlock: original.HomesteadBlock, - DAOForkBlock: original.DAOForkBlock, - DAOForkSupport: original.DAOForkSupport, - TangerineWhistleBlock: original.TangerineWhistleBlock, - TangerineWhistleHash: original.TangerineWhistleHash, - SpuriousDragonBlock: original.SpuriousDragonBlock, - ByzantiumBlock: original.ByzantiumBlock, - ConstantinopleBlock: original.ConstantinopleBlock, - PetersburgBlock: original.PetersburgBlock, - IstanbulBlock: original.IstanbulBlock, - MuirGlacierBlock: original.MuirGlacierBlock, - BerlinBlock: original.BerlinBlock, - LondonBlock: original.LondonBlock, - ArrowGlacierBlock: original.ArrowGlacierBlock, - GrayGlacierBlock: original.GrayGlacierBlock, - RamanujanBlock: original.RamanujanBlock, - NielsBlock: original.NielsBlock, - MirrorSyncBlock: original.MirrorSyncBlock, - BrunoBlock: original.BrunoBlock, - TerminalTotalDifficulty: original.TerminalTotalDifficulty, - TerminalBlockNumber: original.TerminalBlockNumber, - TerminalBlockHash: original.TerminalBlockHash, - MergeNetsplitBlock: original.MergeNetsplitBlock, - Ethash: original.Ethash, - Clique: original.Clique, - Aura: original.Aura, - Parlia: original.Parlia, - Bor: original.Bor, + ChainName: original.ChainName, + ChainID: original.ChainID, + Consensus: original.Consensus, + HomesteadBlock: original.HomesteadBlock, + DAOForkBlock: original.DAOForkBlock, + DAOForkSupport: original.DAOForkSupport, + TangerineWhistleBlock: original.TangerineWhistleBlock, + TangerineWhistleHash: original.TangerineWhistleHash, + SpuriousDragonBlock: original.SpuriousDragonBlock, + ByzantiumBlock: original.ByzantiumBlock, + ConstantinopleBlock: original.ConstantinopleBlock, + PetersburgBlock: original.PetersburgBlock, + IstanbulBlock: original.IstanbulBlock, + MuirGlacierBlock: original.MuirGlacierBlock, + BerlinBlock: original.BerlinBlock, + LondonBlock: original.LondonBlock, + ArrowGlacierBlock: original.ArrowGlacierBlock, + GrayGlacierBlock: original.GrayGlacierBlock, + TerminalTotalDifficulty: original.TerminalTotalDifficulty, + TerminalTotalDifficultyPassed: original.TerminalTotalDifficultyPassed, + MergeNetsplitBlock: original.MergeNetsplitBlock, + ShanghaiBlock: original.ShanghaiBlock, + CancunBlock: original.CancunBlock, + RamanujanBlock: original.RamanujanBlock, + NielsBlock: original.NielsBlock, + MirrorSyncBlock: original.MirrorSyncBlock, + BrunoBlock: original.BrunoBlock, + EulerBlock: original.EulerBlock, + GibbsBlock: original.GibbsBlock, + PosdaoBlock: original.PosdaoBlock, + Ethash: original.Ethash, + Clique: original.Clique, + Aura: original.Aura, + Parlia: original.Parlia, + Bor: original.Bor, } } diff --git a/consensus/parlia/parlia.go b/consensus/parlia/parlia.go index 731fd618121..211a14ab071 100644 --- a/consensus/parlia/parlia.go +++ b/consensus/parlia/parlia.go @@ -2,17 +2,19 @@ package parlia import ( "bytes" + "context" "encoding/hex" "errors" "fmt" "io" "math/big" - "os" "sort" "strings" "sync" "time" + "github.com/ledgerwatch/erigon/core/rawdb" + lru "github.com/hashicorp/golang-lru" "github.com/holiman/uint256" "github.com/ledgerwatch/erigon-lib/kv" @@ -219,6 +221,7 @@ type Parlia struct { config *params.ParliaConfig // Consensus engine configuration parameters for parlia consensus genesisHash common.Hash db kv.RwDB // Database to store and retrieve snapshot checkpoints + chainDb kv.RwDB recentSnaps *lru.ARCCache // Snapshots for recent block to speed up signatures *lru.ARCCache // Signatures of recent blocks to speed up mining @@ -228,7 +231,9 @@ type Parlia struct { val common.Address // Ethereum address of the signing key signFn SignFn // Signer function to authorize hashes with - lock sync.RWMutex // Protects the signer fields + signerLock sync.RWMutex // Protects the signer fields + + snapLock sync.RWMutex // Protects snapshots creation validatorSetABI abi.ABI slashABI abi.ABI @@ -244,6 +249,7 @@ func New( chainConfig *params.ChainConfig, db kv.RwDB, snapshots *snapshotsync.RoSnapshots, + chainDb kv.RwDB, ) *Parlia { // get parlia config parliaConfig := chainConfig.Parlia @@ -274,6 +280,7 @@ func New( chainConfig: chainConfig, config: parliaConfig, db: db, + chainDb: chainDb, recentSnaps: recentSnaps, signatures: signatures, validatorSetABI: vABI, @@ -491,8 +498,17 @@ func (p *Parlia) snapshot(chain consensus.ChainHeaderReader, number uint64, hash var ( headers []*types.Header snap *Snapshot + doLog bool ) + if s, ok := p.recentSnaps.Get(hash); ok { + snap = s.(*Snapshot) + } else { + p.snapLock.Lock() + defer p.snapLock.Unlock() + doLog = true + } + for snap == nil { // If an in-memory snapshot was found, use that if s, ok := p.recentSnaps.Get(hash); ok { @@ -538,7 +554,10 @@ func (p *Parlia) snapshot(chain consensus.ChainHeaderReader, number uint64, hash } parents = parents[:len(parents)-1] } else { - // No explicit parents (or no more left), reach out to the database + if doLog && number%100_000 == 0 { + // No explicit parents (or no more left), reach out to the database + log.Info("[parlia] snapshots build, gather headers", "block", number) + } header = chain.GetHeader(hash, number) if header == nil { return nil, consensus.ErrUnknownAncestor @@ -557,14 +576,14 @@ func (p *Parlia) snapshot(chain consensus.ChainHeaderReader, number uint64, hash for i := 0; i < len(headers)/2; i++ { headers[i], headers[len(headers)-1-i] = headers[len(headers)-1-i], headers[i] } - snap, err := snap.apply(headers, chain, parents, p.chainConfig.ChainID) + snap, err := snap.apply(headers, chain, parents, p.chainConfig.ChainID, doLog) if err != nil { return nil, err } p.recentSnaps.Add(snap.Hash, snap) // If we've generated a new checkpoint snapshot, save to disk - if snap.Number%checkpointInterval == 0 && len(headers) > 0 { + if verify && snap.Number%checkpointInterval == 0 && len(headers) > 0 { if err = snap.store(p.db); err != nil { return nil, err } @@ -719,8 +738,8 @@ func (p *Parlia) finalize(header *types.Header, state *state.IntraBlockState, tx if number == 1 { var err error if txs, systemTxs, receipts, err = p.initContract(state, header, txs, receipts, systemTxs, &header.GasUsed, mining); err != nil { - log.Error("[parlia] init contract failed: %+v", err) - os.Exit(1) + log.Error("[parlia] init contract failed", "err", err) + return nil, nil, fmt.Errorf("init contract failed: %v", err) } } if header.Difficulty.Cmp(diffInTurn) != 0 { @@ -742,12 +761,14 @@ func (p *Parlia) finalize(header *types.Header, state *state.IntraBlockState, tx } else { txs = append(txs, tx) receipts = append(receipts, receipt) + log.Debug("slash successful", "txns", txs.Len(), "receipts", len(receipts), "gasUsed", header.GasUsed) } } } if txs, systemTxs, receipts, err = p.distributeIncoming(header.Coinbase, state, header, txs, receipts, systemTxs, &header.GasUsed, mining); err != nil { return nil, nil, err } + log.Debug("distribute successful", "txns", txs.Len(), "receipts", len(receipts), "gasUsed", header.GasUsed) if len(systemTxs) > 0 { return nil, nil, fmt.Errorf("the length of systemTxs is still %d", len(systemTxs)) } @@ -775,8 +796,8 @@ func (p *Parlia) FinalizeAndAssemble(_ *params.ChainConfig, header *types.Header // Authorize injects a private key into the consensus engine to mint new blocks // with. func (p *Parlia) Authorize(val common.Address, signFn SignFn) { - p.lock.Lock() - defer p.lock.Unlock() + p.signerLock.Lock() + defer p.signerLock.Unlock() p.val = val p.signFn = signFn @@ -801,9 +822,9 @@ func (p *Parlia) Seal(chain consensus.ChainHeaderReader, block *types.Block, res return nil } // Don't hold the val fields for the entire sealing procedure - p.lock.RLock() + p.signerLock.RLock() val, signFn := p.val, p.signFn - p.lock.RUnlock() + p.signerLock.RUnlock() snap, err := p.snapshot(chain, number-1, header.ParentHash, nil, false /* verify */) if err != nil { @@ -829,7 +850,7 @@ func (p *Parlia) Seal(chain consensus.ChainHeaderReader, block *types.Block, res // Sweet, the protocol permits us to sign the block, wait for our time delay := p.delayForRamanujanFork(snap, header) - log.Info("Sealing block with", "number", number, "delay", delay, "headerDifficulty", header.Difficulty, "val", val.Hex()) + log.Info("Sealing block with", "number", number, "delay", delay, "headerDifficulty", header.Difficulty, "val", val.Hex(), "headerHash", header.Hash().Hex(), "gasUsed", header.GasUsed, "block txn number", block.Transactions().Len(), "State Root", header.Root) // Sign all the things! sig, err := signFn(val, crypto.Keccak256(parliaRLP(header, p.chainConfig.ChainID)), p.chainConfig.ChainID) @@ -846,7 +867,7 @@ func (p *Parlia) Seal(chain consensus.ChainHeaderReader, block *types.Block, res return case <-time.After(delay): } - if p.shouldWaitForCurrentBlockProcess(chain, header, snap) { + if p.shouldWaitForCurrentBlockProcess(p.chainDb, header, snap) { log.Info("[parlia] Waiting for received in turn block to process") select { case <-stop: @@ -933,12 +954,20 @@ func (p *Parlia) IsSystemContract(to *common.Address) bool { return isToSystemContract(*to) } -func (p *Parlia) shouldWaitForCurrentBlockProcess(chain consensus.ChainHeaderReader, header *types.Header, snap *Snapshot) bool { +func (p *Parlia) shouldWaitForCurrentBlockProcess(chainDb kv.RwDB, header *types.Header, snap *Snapshot) bool { if header.Difficulty.Cmp(diffInTurn) == 0 { return false } - highestVerifiedHeader := chain.CurrentHeader() + roTx, err := chainDb.BeginRo(context.Background()) + if err != nil { + return false + } + defer roTx.Rollback() + hash := rawdb.ReadHeadHeaderHash(roTx) + number := rawdb.ReadHeaderNumber(roTx, hash) + + highestVerifiedHeader := rawdb.ReadHeader(roTx, hash, *number) if highestVerifiedHeader == nil { return false } @@ -1218,7 +1247,7 @@ func (p *Parlia) systemCall(from, contract common.Address, data []byte, ibs *sta ) vmConfig := vm.Config{NoReceipts: true} // Create a new context to be used in the EVM environment - blockContext := core.NewEVMBlockContext(header, core.GetHashFn(header, nil), p, &from, nil) + blockContext := core.NewEVMBlockContext(header, core.GetHashFn(header, nil), p, &from) evm := vm.NewEVM(blockContext, core.NewEVMTxContext(msg), ibs, chainConfig, vmConfig) ret, leftOverGas, err := evm.Call( vm.AccountRef(msg.From()), diff --git a/consensus/parlia/ramanujanfork.go b/consensus/parlia/ramanujanfork.go index e908c8f5f0e..1756d795896 100644 --- a/consensus/parlia/ramanujanfork.go +++ b/consensus/parlia/ramanujanfork.go @@ -21,7 +21,7 @@ func (p *Parlia) delayForRamanujanFork(snap *Snapshot, header *types.Header) tim if header.Difficulty.Cmp(diffNoTurn) == 0 { // It's not our turn explicitly to sign, delay it a bit wiggle := time.Duration(len(snap.Validators)/2+1) * wiggleTimeBeforeFork - delay += fixedBackOffTimeBeforeFork + time.Duration(rand.Int63n(int64(wiggle))) + delay += fixedBackOffTimeBeforeFork + time.Duration(rand.Int63n(int64(wiggle))) // nolint } return delay } diff --git a/consensus/parlia/snapshot.go b/consensus/parlia/snapshot.go index 2d9198d1dbb..cd7f3381496 100644 --- a/consensus/parlia/snapshot.go +++ b/consensus/parlia/snapshot.go @@ -27,6 +27,7 @@ import ( "sort" "github.com/ledgerwatch/erigon-lib/kv" + "github.com/ledgerwatch/log/v3" lru "github.com/hashicorp/golang-lru" @@ -149,7 +150,7 @@ func (s *Snapshot) copy() *Snapshot { return cpy } -//nolint +// nolint func (s *Snapshot) isMajorityFork(forkHash string) bool { ally := 0 for _, h := range s.RecentForkHashes { @@ -160,7 +161,7 @@ func (s *Snapshot) isMajorityFork(forkHash string) bool { return ally > len(s.RecentForkHashes)/2 } -func (s *Snapshot) apply(headers []*types.Header, chain consensus.ChainHeaderReader, parents []*types.Header, chainId *big.Int) (*Snapshot, error) { +func (s *Snapshot) apply(headers []*types.Header, chain consensus.ChainHeaderReader, parents []*types.Header, chainId *big.Int, doLog bool) (*Snapshot, error) { // Allow passing in no headers for cleaner code if len(headers) == 0 { return s, nil @@ -185,6 +186,9 @@ func (s *Snapshot) apply(headers []*types.Header, chain consensus.ChainHeaderRea for _, header := range headers { number := header.Number.Uint64() + if doLog && number%100_000 == 0 { + log.Info("[parlia] snapshots build, recover from headers", "block", number) + } // Delete the oldest validator from the recent list to allow it signing again if limit := uint64(len(snap.Validators)/2 + 1); number >= limit { delete(snap.Recents, number-limit) diff --git a/consensus/parlia/utils.go b/consensus/parlia/utils.go index 27f663db04f..e0d35c45f18 100644 --- a/consensus/parlia/utils.go +++ b/consensus/parlia/utils.go @@ -16,7 +16,7 @@ func backOffTime(snap *Snapshot, val common.Address) uint64 { return 0 } s := rand.NewSource(int64(snap.Number)) - r := rand.New(s) + r := rand.New(s) // nolint: gosec n := len(snap.Validators) backOffSteps := make([]uint64, 0, n) for idx := uint64(0); idx < uint64(n); idx++ { diff --git a/consensus/serenity/serenity.go b/consensus/serenity/serenity.go index c1192ea6375..a05cd7f8cef 100644 --- a/consensus/serenity/serenity.go +++ b/consensus/serenity/serenity.go @@ -81,7 +81,8 @@ func (s *Serenity) VerifyHeader(chain consensus.ChainHeaderReader, header *types return err } if !reached { - return s.eth1Engine.VerifyHeader(chain, header, seal) + // Not verifying seals if the TTD is passed + return s.eth1Engine.VerifyHeader(chain, header, !chain.Config().TerminalTotalDifficultyPassed) } // Short circuit if the parent is not known parent := chain.GetHeader(header.ParentHash, header.Number.Uint64()-1) @@ -206,6 +207,7 @@ func (s *Serenity) GenerateSeal(chain consensus.ChainHeaderReader, currnt, paren } func (s *Serenity) Initialize(config *params.ChainConfig, chain consensus.ChainHeaderReader, e consensus.EpochReader, header *types.Header, txs []types.Transaction, uncles []*types.Header, syscall consensus.SystemCall) { + s.eth1Engine.Initialize(config, chain, e, header, txs, uncles, syscall) } func (s *Serenity) APIs(chain consensus.ChainHeaderReader) []rpc.API { diff --git a/core/allocs/erigonmine.json b/core/allocs/erigonmine.json deleted file mode 100644 index 96e08ea54d3..00000000000 --- a/core/allocs/erigonmine.json +++ /dev/null @@ -1,771 +0,0 @@ -{ - "0x0000000000000000000000000000000000000000": { - "balance": "0x1" - }, - "0x0000000000000000000000000000000000000001": { - "balance": "0x1" - }, - "0x0000000000000000000000000000000000000002": { - "balance": "0x1" - }, - "0x0000000000000000000000000000000000000003": { - "balance": "0x1" - }, - "0x0000000000000000000000000000000000000004": { - "balance": "0x1" - }, - "0x0000000000000000000000000000000000000005": { - "balance": "0x1" - }, - "0x0000000000000000000000000000000000000006": { - "balance": "0x1" - }, - "0x0000000000000000000000000000000000000007": { - "balance": "0x1" - }, - "0x0000000000000000000000000000000000000008": { - "balance": "0x1" - }, - "0x0000000000000000000000000000000000000009": { - "balance": "0x1" - }, - "0x000000000000000000000000000000000000000a": { - "balance": "0x0" - }, - "0x000000000000000000000000000000000000000b": { - "balance": "0x0" - }, - "0x000000000000000000000000000000000000000c": { - "balance": "0x0" - }, - "0x000000000000000000000000000000000000000d": { - "balance": "0x0" - }, - "0x000000000000000000000000000000000000000e": { - "balance": "0x0" - }, - "0x000000000000000000000000000000000000000f": { - "balance": "0x0" - }, - "0x0000000000000000000000000000000000000010": { - "balance": "0x0" - }, - "0x0000000000000000000000000000000000000011": { - "balance": "0x0" - }, - "0x0000000000000000000000000000000000000012": { - "balance": "0x0" - }, - "0x0000000000000000000000000000000000000013": { - "balance": "0x0" - }, - "0x0000000000000000000000000000000000000014": { - "balance": "0x0" - }, - "0x0000000000000000000000000000000000000015": { - "balance": "0x0" - }, - "0x0000000000000000000000000000000000000016": { - "balance": "0x0" - }, - "0x0000000000000000000000000000000000000017": { - "balance": "0x0" - }, - "0x0000000000000000000000000000000000000018": { - "balance": "0x0" - }, - "0x0000000000000000000000000000000000000019": { - "balance": "0x0" - }, - "0x000000000000000000000000000000000000001a": { - "balance": "0x0" - }, - "0x000000000000000000000000000000000000001b": { - "balance": "0x0" - }, - "0x000000000000000000000000000000000000001c": { - "balance": "0x0" - }, - "0x000000000000000000000000000000000000001d": { - "balance": "0x0" - }, - "0x000000000000000000000000000000000000001e": { - "balance": "0x0" - }, - "0x000000000000000000000000000000000000001f": { - "balance": "0x0" - }, - "0x0000000000000000000000000000000000000020": { - "balance": "0x0" - }, - "0x0000000000000000000000000000000000000021": { - "balance": "0x0" - }, - "0x0000000000000000000000000000000000000022": { - "balance": "0x0" - }, - "0x0000000000000000000000000000000000000023": { - "balance": "0x0" - }, - "0x0000000000000000000000000000000000000024": { - "balance": "0x0" - }, - "0x0000000000000000000000000000000000000025": { - "balance": "0x0" - }, - "0x0000000000000000000000000000000000000026": { - "balance": "0x0" - }, - "0x0000000000000000000000000000000000000027": { - "balance": "0x0" - }, - "0x0000000000000000000000000000000000000028": { - "balance": "0x0" - }, - "0x0000000000000000000000000000000000000029": { - "balance": "0x0" - }, - "0x000000000000000000000000000000000000002a": { - "balance": "0x0" - }, - "0x000000000000000000000000000000000000002b": { - "balance": "0x0" - }, - "0x000000000000000000000000000000000000002c": { - "balance": "0x0" - }, - "0x000000000000000000000000000000000000002d": { - "balance": "0x0" - }, - "0x000000000000000000000000000000000000002e": { - "balance": "0x0" - }, - "0x000000000000000000000000000000000000002f": { - "balance": "0x0" - }, - "0x0000000000000000000000000000000000000030": { - "balance": "0x0" - }, - "0x0000000000000000000000000000000000000031": { - "balance": "0x0" - }, - "0x0000000000000000000000000000000000000032": { - "balance": "0x0" - }, - "0x0000000000000000000000000000000000000033": { - "balance": "0x0" - }, - "0x0000000000000000000000000000000000000034": { - "balance": "0x0" - }, - "0x0000000000000000000000000000000000000035": { - "balance": "0x0" - }, - "0x0000000000000000000000000000000000000036": { - "balance": "0x0" - }, - "0x0000000000000000000000000000000000000037": { - "balance": "0x0" - }, - "0x0000000000000000000000000000000000000038": { - "balance": "0x0" - }, - "0x0000000000000000000000000000000000000039": { - "balance": "0x0" - }, - "0x000000000000000000000000000000000000003a": { - "balance": "0x0" - }, - "0x000000000000000000000000000000000000003b": { - "balance": "0x0" - }, - "0x000000000000000000000000000000000000003c": { - "balance": "0x0" - }, - "0x000000000000000000000000000000000000003d": { - "balance": "0x0" - }, - "0x000000000000000000000000000000000000003e": { - "balance": "0x0" - }, - "0x000000000000000000000000000000000000003f": { - "balance": "0x0" - }, - "0x0000000000000000000000000000000000000040": { - "balance": "0x0" - }, - "0x0000000000000000000000000000000000000041": { - "balance": "0x0" - }, - "0x0000000000000000000000000000000000000042": { - "balance": "0x0" - }, - "0x0000000000000000000000000000000000000043": { - "balance": "0x0" - }, - "0x0000000000000000000000000000000000000044": { - "balance": "0x0" - }, - "0x0000000000000000000000000000000000000045": { - "balance": "0x0" - }, - "0x0000000000000000000000000000000000000046": { - "balance": "0x0" - }, - "0x0000000000000000000000000000000000000047": { - "balance": "0x0" - }, - "0x0000000000000000000000000000000000000048": { - "balance": "0x0" - }, - "0x0000000000000000000000000000000000000049": { - "balance": "0x0" - }, - "0x000000000000000000000000000000000000004a": { - "balance": "0x0" - }, - "0x000000000000000000000000000000000000004b": { - "balance": "0x0" - }, - "0x000000000000000000000000000000000000004c": { - "balance": "0x0" - }, - "0x000000000000000000000000000000000000004d": { - "balance": "0x0" - }, - "0x000000000000000000000000000000000000004e": { - "balance": "0x0" - }, - "0x000000000000000000000000000000000000004f": { - "balance": "0x0" - }, - "0x0000000000000000000000000000000000000050": { - "balance": "0x0" - }, - "0x0000000000000000000000000000000000000051": { - "balance": "0x0" - }, - "0x0000000000000000000000000000000000000052": { - "balance": "0x0" - }, - "0x0000000000000000000000000000000000000053": { - "balance": "0x0" - }, - "0x0000000000000000000000000000000000000054": { - "balance": "0x0" - }, - "0x0000000000000000000000000000000000000055": { - "balance": "0x0" - }, - "0x0000000000000000000000000000000000000056": { - "balance": "0x0" - }, - "0x0000000000000000000000000000000000000057": { - "balance": "0x0" - }, - "0x0000000000000000000000000000000000000058": { - "balance": "0x0" - }, - "0x0000000000000000000000000000000000000059": { - "balance": "0x0" - }, - "0x000000000000000000000000000000000000005a": { - "balance": "0x0" - }, - "0x000000000000000000000000000000000000005b": { - "balance": "0x0" - }, - "0x000000000000000000000000000000000000005c": { - "balance": "0x0" - }, - "0x000000000000000000000000000000000000005d": { - "balance": "0x0" - }, - "0x000000000000000000000000000000000000005e": { - "balance": "0x0" - }, - "0x000000000000000000000000000000000000005f": { - "balance": "0x0" - }, - "0x0000000000000000000000000000000000000060": { - "balance": "0x0" - }, - "0x0000000000000000000000000000000000000061": { - "balance": "0x0" - }, - "0x0000000000000000000000000000000000000062": { - "balance": "0x0" - }, - "0x0000000000000000000000000000000000000063": { - "balance": "0x0" - }, - "0x0000000000000000000000000000000000000064": { - "balance": "0x0" - }, - "0x0000000000000000000000000000000000000065": { - "balance": "0x0" - }, - "0x0000000000000000000000000000000000000066": { - "balance": "0x0" - }, - "0x0000000000000000000000000000000000000067": { - "balance": "0x0" - }, - "0x0000000000000000000000000000000000000068": { - "balance": "0x0" - }, - "0x0000000000000000000000000000000000000069": { - "balance": "0x0" - }, - "0x000000000000000000000000000000000000006a": { - "balance": "0x0" - }, - "0x000000000000000000000000000000000000006b": { - "balance": "0x0" - }, - "0x000000000000000000000000000000000000006c": { - "balance": "0x0" - }, - "0x000000000000000000000000000000000000006d": { - "balance": "0x0" - }, - "0x000000000000000000000000000000000000006e": { - "balance": "0x0" - }, - "0x000000000000000000000000000000000000006f": { - "balance": "0x0" - }, - "0x0000000000000000000000000000000000000070": { - "balance": "0x0" - }, - "0x0000000000000000000000000000000000000071": { - "balance": "0x0" - }, - "0x0000000000000000000000000000000000000072": { - "balance": "0x0" - }, - "0x0000000000000000000000000000000000000073": { - "balance": "0x0" - }, - "0x0000000000000000000000000000000000000074": { - "balance": "0x0" - }, - "0x0000000000000000000000000000000000000075": { - "balance": "0x0" - }, - "0x0000000000000000000000000000000000000076": { - "balance": "0x0" - }, - "0x0000000000000000000000000000000000000077": { - "balance": "0x0" - }, - "0x0000000000000000000000000000000000000078": { - "balance": "0x0" - }, - "0x0000000000000000000000000000000000000079": { - "balance": "0x0" - }, - "0x000000000000000000000000000000000000007a": { - "balance": "0x0" - }, - "0x000000000000000000000000000000000000007b": { - "balance": "0x0" - }, - "0x000000000000000000000000000000000000007c": { - "balance": "0x0" - }, - "0x000000000000000000000000000000000000007d": { - "balance": "0x0" - }, - "0x000000000000000000000000000000000000007e": { - "balance": "0x0" - }, - "0x000000000000000000000000000000000000007f": { - "balance": "0x0" - }, - "0x0000000000000000000000000000000000000080": { - "balance": "0x0" - }, - "0x0000000000000000000000000000000000000081": { - "balance": "0x0" - }, - "0x0000000000000000000000000000000000000082": { - "balance": "0x0" - }, - "0x0000000000000000000000000000000000000083": { - "balance": "0x0" - }, - "0x0000000000000000000000000000000000000084": { - "balance": "0x0" - }, - "0x0000000000000000000000000000000000000085": { - "balance": "0x0" - }, - "0x0000000000000000000000000000000000000086": { - "balance": "0x0" - }, - "0x0000000000000000000000000000000000000087": { - "balance": "0x0" - }, - "0x0000000000000000000000000000000000000088": { - "balance": "0x0" - }, - "0x0000000000000000000000000000000000000089": { - "balance": "0x0" - }, - "0x000000000000000000000000000000000000008a": { - "balance": "0x0" - }, - "0x000000000000000000000000000000000000008b": { - "balance": "0x0" - }, - "0x000000000000000000000000000000000000008c": { - "balance": "0x0" - }, - "0x000000000000000000000000000000000000008d": { - "balance": "0x0" - }, - "0x000000000000000000000000000000000000008e": { - "balance": "0x0" - }, - "0x000000000000000000000000000000000000008f": { - "balance": "0x0" - }, - "0x0000000000000000000000000000000000000090": { - "balance": "0x0" - }, - "0x0000000000000000000000000000000000000091": { - "balance": "0x0" - }, - "0x0000000000000000000000000000000000000092": { - "balance": "0x0" - }, - "0x0000000000000000000000000000000000000093": { - "balance": "0x0" - }, - "0x0000000000000000000000000000000000000094": { - "balance": "0x0" - }, - "0x0000000000000000000000000000000000000095": { - "balance": "0x0" - }, - "0x0000000000000000000000000000000000000096": { - "balance": "0x0" - }, - "0x0000000000000000000000000000000000000097": { - "balance": "0x0" - }, - "0x0000000000000000000000000000000000000098": { - "balance": "0x0" - }, - "0x0000000000000000000000000000000000000099": { - "balance": "0x0" - }, - "0x000000000000000000000000000000000000009a": { - "balance": "0x0" - }, - "0x000000000000000000000000000000000000009b": { - "balance": "0x0" - }, - "0x000000000000000000000000000000000000009c": { - "balance": "0x0" - }, - "0x000000000000000000000000000000000000009d": { - "balance": "0x0" - }, - "0x000000000000000000000000000000000000009e": { - "balance": "0x0" - }, - "0x000000000000000000000000000000000000009f": { - "balance": "0x0" - }, - "0x00000000000000000000000000000000000000a0": { - "balance": "0x0" - }, - "0x00000000000000000000000000000000000000a1": { - "balance": "0x0" - }, - "0x00000000000000000000000000000000000000a2": { - "balance": "0x0" - }, - "0x00000000000000000000000000000000000000a3": { - "balance": "0x0" - }, - "0x00000000000000000000000000000000000000a4": { - "balance": "0x0" - }, - "0x00000000000000000000000000000000000000a5": { - "balance": "0x0" - }, - "0x00000000000000000000000000000000000000a6": { - "balance": "0x0" - }, - "0x00000000000000000000000000000000000000a7": { - "balance": "0x0" - }, - "0x00000000000000000000000000000000000000a8": { - "balance": "0x0" - }, - "0x00000000000000000000000000000000000000a9": { - "balance": "0x0" - }, - "0x00000000000000000000000000000000000000aa": { - "balance": "0x0" - }, - "0x00000000000000000000000000000000000000ab": { - "balance": "0x0" - }, - "0x00000000000000000000000000000000000000ac": { - "balance": "0x0" - }, - "0x00000000000000000000000000000000000000ad": { - "balance": "0x0" - }, - "0x00000000000000000000000000000000000000ae": { - "balance": "0x0" - }, - "0x00000000000000000000000000000000000000af": { - "balance": "0x0" - }, - "0x00000000000000000000000000000000000000b0": { - "balance": "0x0" - }, - "0x00000000000000000000000000000000000000b1": { - "balance": "0x0" - }, - "0x00000000000000000000000000000000000000b2": { - "balance": "0x0" - }, - "0x00000000000000000000000000000000000000b3": { - "balance": "0x0" - }, - "0x00000000000000000000000000000000000000b4": { - "balance": "0x0" - }, - "0x00000000000000000000000000000000000000b5": { - "balance": "0x0" - }, - "0x00000000000000000000000000000000000000b6": { - "balance": "0x0" - }, - "0x00000000000000000000000000000000000000b7": { - "balance": "0x0" - }, - "0x00000000000000000000000000000000000000b8": { - "balance": "0x0" - }, - "0x00000000000000000000000000000000000000b9": { - "balance": "0x0" - }, - "0x00000000000000000000000000000000000000ba": { - "balance": "0x0" - }, - "0x00000000000000000000000000000000000000bb": { - "balance": "0x0" - }, - "0x00000000000000000000000000000000000000bc": { - "balance": "0x0" - }, - "0x00000000000000000000000000000000000000bd": { - "balance": "0x0" - }, - "0x00000000000000000000000000000000000000be": { - "balance": "0x0" - }, - "0x00000000000000000000000000000000000000bf": { - "balance": "0x0" - }, - "0x00000000000000000000000000000000000000c0": { - "balance": "0x0" - }, - "0x00000000000000000000000000000000000000c1": { - "balance": "0x0" - }, - "0x00000000000000000000000000000000000000c2": { - "balance": "0x0" - }, - "0x00000000000000000000000000000000000000c3": { - "balance": "0x0" - }, - "0x00000000000000000000000000000000000000c4": { - "balance": "0x0" - }, - "0x00000000000000000000000000000000000000c5": { - "balance": "0x0" - }, - "0x00000000000000000000000000000000000000c6": { - "balance": "0x0" - }, - "0x00000000000000000000000000000000000000c7": { - "balance": "0x0" - }, - "0x00000000000000000000000000000000000000c8": { - "balance": "0x0" - }, - "0x00000000000000000000000000000000000000c9": { - "balance": "0x0" - }, - "0x00000000000000000000000000000000000000ca": { - "balance": "0x0" - }, - "0x00000000000000000000000000000000000000cb": { - "balance": "0x0" - }, - "0x00000000000000000000000000000000000000cc": { - "balance": "0x0" - }, - "0x00000000000000000000000000000000000000cd": { - "balance": "0x0" - }, - "0x00000000000000000000000000000000000000ce": { - "balance": "0x0" - }, - "0x00000000000000000000000000000000000000cf": { - "balance": "0x0" - }, - "0x00000000000000000000000000000000000000d0": { - "balance": "0x0" - }, - "0x00000000000000000000000000000000000000d1": { - "balance": "0x0" - }, - "0x00000000000000000000000000000000000000d2": { - "balance": "0x0" - }, - "0x00000000000000000000000000000000000000d3": { - "balance": "0x0" - }, - "0x00000000000000000000000000000000000000d4": { - "balance": "0x0" - }, - "0x00000000000000000000000000000000000000d5": { - "balance": "0x0" - }, - "0x00000000000000000000000000000000000000d6": { - "balance": "0x0" - }, - "0x00000000000000000000000000000000000000d7": { - "balance": "0x0" - }, - "0x00000000000000000000000000000000000000d8": { - "balance": "0x0" - }, - "0x00000000000000000000000000000000000000d9": { - "balance": "0x0" - }, - "0x00000000000000000000000000000000000000da": { - "balance": "0x0" - }, - "0x00000000000000000000000000000000000000db": { - "balance": "0x0" - }, - "0x00000000000000000000000000000000000000dc": { - "balance": "0x0" - }, - "0x00000000000000000000000000000000000000dd": { - "balance": "0x0" - }, - "0x00000000000000000000000000000000000000de": { - "balance": "0x0" - }, - "0x00000000000000000000000000000000000000df": { - "balance": "0x0" - }, - "0x00000000000000000000000000000000000000e0": { - "balance": "0x0" - }, - "0x00000000000000000000000000000000000000e1": { - "balance": "0x0" - }, - "0x00000000000000000000000000000000000000e2": { - "balance": "0x0" - }, - "0x00000000000000000000000000000000000000e3": { - "balance": "0x0" - }, - "0x00000000000000000000000000000000000000e4": { - "balance": "0x0" - }, - "0x00000000000000000000000000000000000000e5": { - "balance": "0x0" - }, - "0x00000000000000000000000000000000000000e6": { - "balance": "0x0" - }, - "0x00000000000000000000000000000000000000e7": { - "balance": "0x0" - }, - "0x00000000000000000000000000000000000000e8": { - "balance": "0x0" - }, - "0x00000000000000000000000000000000000000e9": { - "balance": "0x0" - }, - "0x00000000000000000000000000000000000000ea": { - "balance": "0x0" - }, - "0x00000000000000000000000000000000000000eb": { - "balance": "0x0" - }, - "0x00000000000000000000000000000000000000ec": { - "balance": "0x0" - }, - "0x00000000000000000000000000000000000000ed": { - "balance": "0x0" - }, - "0x00000000000000000000000000000000000000ee": { - "balance": "0x0" - }, - "0x00000000000000000000000000000000000000ef": { - "balance": "0x0" - }, - "0x00000000000000000000000000000000000000f0": { - "balance": "0x0" - }, - "0x00000000000000000000000000000000000000f1": { - "balance": "0x0" - }, - "0x00000000000000000000000000000000000000f2": { - "balance": "0x0" - }, - "0x00000000000000000000000000000000000000f3": { - "balance": "0x0" - }, - "0x00000000000000000000000000000000000000f4": { - "balance": "0x0" - }, - "0x00000000000000000000000000000000000000f5": { - "balance": "0x0" - }, - "0x00000000000000000000000000000000000000f6": { - "balance": "0x0" - }, - "0x00000000000000000000000000000000000000f7": { - "balance": "0x0" - }, - "0x00000000000000000000000000000000000000f8": { - "balance": "0x0" - }, - "0x00000000000000000000000000000000000000f9": { - "balance": "0x0" - }, - "0x00000000000000000000000000000000000000fa": { - "balance": "0x0" - }, - "0x00000000000000000000000000000000000000fb": { - "balance": "0x0" - }, - "0x00000000000000000000000000000000000000fc": { - "balance": "0x0" - }, - "0x00000000000000000000000000000000000000fd": { - "balance": "0x0" - }, - "0x00000000000000000000000000000000000000fe": { - "balance": "0x0" - }, - "0x00000000000000000000000000000000000000ff": { - "balance": "0x0" - } - } - \ No newline at end of file diff --git a/core/allocs/kiln-devnet.json b/core/allocs/kiln-devnet.json deleted file mode 100644 index f2935e638c4..00000000000 --- a/core/allocs/kiln-devnet.json +++ /dev/null @@ -1,840 +0,0 @@ -{ - "0x0000000000000000000000000000000000000000": { - "balance": "1" - }, - "0x0000000000000000000000000000000000000001": { - "balance": "1" - }, - "0x0000000000000000000000000000000000000002": { - "balance": "1" - }, - "0x0000000000000000000000000000000000000003": { - "balance": "1" - }, - "0x0000000000000000000000000000000000000004": { - "balance": "1" - }, - "0x0000000000000000000000000000000000000005": { - "balance": "1" - }, - "0x0000000000000000000000000000000000000006": { - "balance": "1" - }, - "0x0000000000000000000000000000000000000007": { - "balance": "1" - }, - "0x0000000000000000000000000000000000000008": { - "balance": "1" - }, - "0x0000000000000000000000000000000000000009": { - "balance": "1" - }, - "0x000000000000000000000000000000000000000a": { - "balance": "1" - }, - "0x000000000000000000000000000000000000000b": { - "balance": "1" - }, - "0x000000000000000000000000000000000000000c": { - "balance": "1" - }, - "0x000000000000000000000000000000000000000d": { - "balance": "1" - }, - "0x000000000000000000000000000000000000000e": { - "balance": "1" - }, - "0x000000000000000000000000000000000000000f": { - "balance": "1" - }, - "0x0000000000000000000000000000000000000010": { - "balance": "1" - }, - "0x0000000000000000000000000000000000000011": { - "balance": "1" - }, - "0x0000000000000000000000000000000000000012": { - "balance": "1" - }, - "0x0000000000000000000000000000000000000013": { - "balance": "1" - }, - "0x0000000000000000000000000000000000000014": { - "balance": "1" - }, - "0x0000000000000000000000000000000000000015": { - "balance": "1" - }, - "0x0000000000000000000000000000000000000016": { - "balance": "1" - }, - "0x0000000000000000000000000000000000000017": { - "balance": "1" - }, - "0x0000000000000000000000000000000000000018": { - "balance": "1" - }, - "0x0000000000000000000000000000000000000019": { - "balance": "1" - }, - "0x000000000000000000000000000000000000001a": { - "balance": "1" - }, - "0x000000000000000000000000000000000000001b": { - "balance": "1" - }, - "0x000000000000000000000000000000000000001c": { - "balance": "1" - }, - "0x000000000000000000000000000000000000001d": { - "balance": "1" - }, - "0x000000000000000000000000000000000000001e": { - "balance": "1" - }, - "0x000000000000000000000000000000000000001f": { - "balance": "1" - }, - "0x0000000000000000000000000000000000000020": { - "balance": "1" - }, - "0x0000000000000000000000000000000000000021": { - "balance": "1" - }, - "0x0000000000000000000000000000000000000022": { - "balance": "1" - }, - "0x0000000000000000000000000000000000000023": { - "balance": "1" - }, - "0x0000000000000000000000000000000000000024": { - "balance": "1" - }, - "0x0000000000000000000000000000000000000025": { - "balance": "1" - }, - "0x0000000000000000000000000000000000000026": { - "balance": "1" - }, - "0x0000000000000000000000000000000000000027": { - "balance": "1" - }, - "0x0000000000000000000000000000000000000028": { - "balance": "1" - }, - "0x0000000000000000000000000000000000000029": { - "balance": "1" - }, - "0x000000000000000000000000000000000000002a": { - "balance": "1" - }, - "0x000000000000000000000000000000000000002b": { - "balance": "1" - }, - "0x000000000000000000000000000000000000002c": { - "balance": "1" - }, - "0x000000000000000000000000000000000000002d": { - "balance": "1" - }, - "0x000000000000000000000000000000000000002e": { - "balance": "1" - }, - "0x000000000000000000000000000000000000002f": { - "balance": "1" - }, - "0x0000000000000000000000000000000000000030": { - "balance": "1" - }, - "0x0000000000000000000000000000000000000031": { - "balance": "1" - }, - "0x0000000000000000000000000000000000000032": { - "balance": "1" - }, - "0x0000000000000000000000000000000000000033": { - "balance": "1" - }, - "0x0000000000000000000000000000000000000034": { - "balance": "1" - }, - "0x0000000000000000000000000000000000000035": { - "balance": "1" - }, - "0x0000000000000000000000000000000000000036": { - "balance": "1" - }, - "0x0000000000000000000000000000000000000037": { - "balance": "1" - }, - "0x0000000000000000000000000000000000000038": { - "balance": "1" - }, - "0x0000000000000000000000000000000000000039": { - "balance": "1" - }, - "0x000000000000000000000000000000000000003a": { - "balance": "1" - }, - "0x000000000000000000000000000000000000003b": { - "balance": "1" - }, - "0x000000000000000000000000000000000000003c": { - "balance": "1" - }, - "0x000000000000000000000000000000000000003d": { - "balance": "1" - }, - "0x000000000000000000000000000000000000003e": { - "balance": "1" - }, - "0x000000000000000000000000000000000000003f": { - "balance": "1" - }, - "0x0000000000000000000000000000000000000040": { - "balance": "1" - }, - "0x0000000000000000000000000000000000000041": { - "balance": "1" - }, - "0x0000000000000000000000000000000000000042": { - "balance": "1" - }, - "0x0000000000000000000000000000000000000043": { - "balance": "1" - }, - "0x0000000000000000000000000000000000000044": { - "balance": "1" - }, - "0x0000000000000000000000000000000000000045": { - "balance": "1" - }, - "0x0000000000000000000000000000000000000046": { - "balance": "1" - }, - "0x0000000000000000000000000000000000000047": { - "balance": "1" - }, - "0x0000000000000000000000000000000000000048": { - "balance": "1" - }, - "0x0000000000000000000000000000000000000049": { - "balance": "1" - }, - "0x000000000000000000000000000000000000004a": { - "balance": "1" - }, - "0x000000000000000000000000000000000000004b": { - "balance": "1" - }, - "0x000000000000000000000000000000000000004c": { - "balance": "1" - }, - "0x000000000000000000000000000000000000004d": { - "balance": "1" - }, - "0x000000000000000000000000000000000000004e": { - "balance": "1" - }, - "0x000000000000000000000000000000000000004f": { - "balance": "1" - }, - "0x0000000000000000000000000000000000000050": { - "balance": "1" - }, - "0x0000000000000000000000000000000000000051": { - "balance": "1" - }, - "0x0000000000000000000000000000000000000052": { - "balance": "1" - }, - "0x0000000000000000000000000000000000000053": { - "balance": "1" - }, - "0x0000000000000000000000000000000000000054": { - "balance": "1" - }, - "0x0000000000000000000000000000000000000055": { - "balance": "1" - }, - "0x0000000000000000000000000000000000000056": { - "balance": "1" - }, - "0x0000000000000000000000000000000000000057": { - "balance": "1" - }, - "0x0000000000000000000000000000000000000058": { - "balance": "1" - }, - "0x0000000000000000000000000000000000000059": { - "balance": "1" - }, - "0x000000000000000000000000000000000000005a": { - "balance": "1" - }, - "0x000000000000000000000000000000000000005b": { - "balance": "1" - }, - "0x000000000000000000000000000000000000005c": { - "balance": "1" - }, - "0x000000000000000000000000000000000000005d": { - "balance": "1" - }, - "0x000000000000000000000000000000000000005e": { - "balance": "1" - }, - "0x000000000000000000000000000000000000005f": { - "balance": "1" - }, - "0x0000000000000000000000000000000000000060": { - "balance": "1" - }, - "0x0000000000000000000000000000000000000061": { - "balance": "1" - }, - "0x0000000000000000000000000000000000000062": { - "balance": "1" - }, - "0x0000000000000000000000000000000000000063": { - "balance": "1" - }, - "0x0000000000000000000000000000000000000064": { - "balance": "1" - }, - "0x0000000000000000000000000000000000000065": { - "balance": "1" - }, - "0x0000000000000000000000000000000000000066": { - "balance": "1" - }, - "0x0000000000000000000000000000000000000067": { - "balance": "1" - }, - "0x0000000000000000000000000000000000000068": { - "balance": "1" - }, - "0x0000000000000000000000000000000000000069": { - "balance": "1" - }, - "0x000000000000000000000000000000000000006a": { - "balance": "1" - }, - "0x000000000000000000000000000000000000006b": { - "balance": "1" - }, - "0x000000000000000000000000000000000000006c": { - "balance": "1" - }, - "0x000000000000000000000000000000000000006d": { - "balance": "1" - }, - "0x000000000000000000000000000000000000006e": { - "balance": "1" - }, - "0x000000000000000000000000000000000000006f": { - "balance": "1" - }, - "0x0000000000000000000000000000000000000070": { - "balance": "1" - }, - "0x0000000000000000000000000000000000000071": { - "balance": "1" - }, - "0x0000000000000000000000000000000000000072": { - "balance": "1" - }, - "0x0000000000000000000000000000000000000073": { - "balance": "1" - }, - "0x0000000000000000000000000000000000000074": { - "balance": "1" - }, - "0x0000000000000000000000000000000000000075": { - "balance": "1" - }, - "0x0000000000000000000000000000000000000076": { - "balance": "1" - }, - "0x0000000000000000000000000000000000000077": { - "balance": "1" - }, - "0x0000000000000000000000000000000000000078": { - "balance": "1" - }, - "0x0000000000000000000000000000000000000079": { - "balance": "1" - }, - "0x000000000000000000000000000000000000007a": { - "balance": "1" - }, - "0x000000000000000000000000000000000000007b": { - "balance": "1" - }, - "0x000000000000000000000000000000000000007c": { - "balance": "1" - }, - "0x000000000000000000000000000000000000007d": { - "balance": "1" - }, - "0x000000000000000000000000000000000000007e": { - "balance": "1" - }, - "0x000000000000000000000000000000000000007f": { - "balance": "1" - }, - "0x0000000000000000000000000000000000000080": { - "balance": "1" - }, - "0x0000000000000000000000000000000000000081": { - "balance": "1" - }, - "0x0000000000000000000000000000000000000082": { - "balance": "1" - }, - "0x0000000000000000000000000000000000000083": { - "balance": "1" - }, - "0x0000000000000000000000000000000000000084": { - "balance": "1" - }, - "0x0000000000000000000000000000000000000085": { - "balance": "1" - }, - "0x0000000000000000000000000000000000000086": { - "balance": "1" - }, - "0x0000000000000000000000000000000000000087": { - "balance": "1" - }, - "0x0000000000000000000000000000000000000088": { - "balance": "1" - }, - "0x0000000000000000000000000000000000000089": { - "balance": "1" - }, - "0x000000000000000000000000000000000000008a": { - "balance": "1" - }, - "0x000000000000000000000000000000000000008b": { - "balance": "1" - }, - "0x000000000000000000000000000000000000008c": { - "balance": "1" - }, - "0x000000000000000000000000000000000000008d": { - "balance": "1" - }, - "0x000000000000000000000000000000000000008e": { - "balance": "1" - }, - "0x000000000000000000000000000000000000008f": { - "balance": "1" - }, - "0x0000000000000000000000000000000000000090": { - "balance": "1" - }, - "0x0000000000000000000000000000000000000091": { - "balance": "1" - }, - "0x0000000000000000000000000000000000000092": { - "balance": "1" - }, - "0x0000000000000000000000000000000000000093": { - "balance": "1" - }, - "0x0000000000000000000000000000000000000094": { - "balance": "1" - }, - "0x0000000000000000000000000000000000000095": { - "balance": "1" - }, - "0x0000000000000000000000000000000000000096": { - "balance": "1" - }, - "0x0000000000000000000000000000000000000097": { - "balance": "1" - }, - "0x0000000000000000000000000000000000000098": { - "balance": "1" - }, - "0x0000000000000000000000000000000000000099": { - "balance": "1" - }, - "0x000000000000000000000000000000000000009a": { - "balance": "1" - }, - "0x000000000000000000000000000000000000009b": { - "balance": "1" - }, - "0x000000000000000000000000000000000000009c": { - "balance": "1" - }, - "0x000000000000000000000000000000000000009d": { - "balance": "1" - }, - "0x000000000000000000000000000000000000009e": { - "balance": "1" - }, - "0x000000000000000000000000000000000000009f": { - "balance": "1" - }, - "0x00000000000000000000000000000000000000a0": { - "balance": "1" - }, - "0x00000000000000000000000000000000000000a1": { - "balance": "1" - }, - "0x00000000000000000000000000000000000000a2": { - "balance": "1" - }, - "0x00000000000000000000000000000000000000a3": { - "balance": "1" - }, - "0x00000000000000000000000000000000000000a4": { - "balance": "1" - }, - "0x00000000000000000000000000000000000000a5": { - "balance": "1" - }, - "0x00000000000000000000000000000000000000a6": { - "balance": "1" - }, - "0x00000000000000000000000000000000000000a7": { - "balance": "1" - }, - "0x00000000000000000000000000000000000000a8": { - "balance": "1" - }, - "0x00000000000000000000000000000000000000a9": { - "balance": "1" - }, - "0x00000000000000000000000000000000000000aa": { - "balance": "1" - }, - "0x00000000000000000000000000000000000000ab": { - "balance": "1" - }, - "0x00000000000000000000000000000000000000ac": { - "balance": "1" - }, - "0x00000000000000000000000000000000000000ad": { - "balance": "1" - }, - "0x00000000000000000000000000000000000000ae": { - "balance": "1" - }, - "0x00000000000000000000000000000000000000af": { - "balance": "1" - }, - "0x00000000000000000000000000000000000000b0": { - "balance": "1" - }, - "0x00000000000000000000000000000000000000b1": { - "balance": "1" - }, - "0x00000000000000000000000000000000000000b2": { - "balance": "1" - }, - "0x00000000000000000000000000000000000000b3": { - "balance": "1" - }, - "0x00000000000000000000000000000000000000b4": { - "balance": "1" - }, - "0x00000000000000000000000000000000000000b5": { - "balance": "1" - }, - "0x00000000000000000000000000000000000000b6": { - "balance": "1" - }, - "0x00000000000000000000000000000000000000b7": { - "balance": "1" - }, - "0x00000000000000000000000000000000000000b8": { - "balance": "1" - }, - "0x00000000000000000000000000000000000000b9": { - "balance": "1" - }, - "0x00000000000000000000000000000000000000ba": { - "balance": "1" - }, - "0x00000000000000000000000000000000000000bb": { - "balance": "1" - }, - "0x00000000000000000000000000000000000000bc": { - "balance": "1" - }, - "0x00000000000000000000000000000000000000bd": { - "balance": "1" - }, - "0x00000000000000000000000000000000000000be": { - "balance": "1" - }, - "0x00000000000000000000000000000000000000bf": { - "balance": "1" - }, - "0x00000000000000000000000000000000000000c0": { - "balance": "1" - }, - "0x00000000000000000000000000000000000000c1": { - "balance": "1" - }, - "0x00000000000000000000000000000000000000c2": { - "balance": "1" - }, - "0x00000000000000000000000000000000000000c3": { - "balance": "1" - }, - "0x00000000000000000000000000000000000000c4": { - "balance": "1" - }, - "0x00000000000000000000000000000000000000c5": { - "balance": "1" - }, - "0x00000000000000000000000000000000000000c6": { - "balance": "1" - }, - "0x00000000000000000000000000000000000000c7": { - "balance": "1" - }, - "0x00000000000000000000000000000000000000c8": { - "balance": "1" - }, - "0x00000000000000000000000000000000000000c9": { - "balance": "1" - }, - "0x00000000000000000000000000000000000000ca": { - "balance": "1" - }, - "0x00000000000000000000000000000000000000cb": { - "balance": "1" - }, - "0x00000000000000000000000000000000000000cc": { - "balance": "1" - }, - "0x00000000000000000000000000000000000000cd": { - "balance": "1" - }, - "0x00000000000000000000000000000000000000ce": { - "balance": "1" - }, - "0x00000000000000000000000000000000000000cf": { - "balance": "1" - }, - "0x00000000000000000000000000000000000000d0": { - "balance": "1" - }, - "0x00000000000000000000000000000000000000d1": { - "balance": "1" - }, - "0x00000000000000000000000000000000000000d2": { - "balance": "1" - }, - "0x00000000000000000000000000000000000000d3": { - "balance": "1" - }, - "0x00000000000000000000000000000000000000d4": { - "balance": "1" - }, - "0x00000000000000000000000000000000000000d5": { - "balance": "1" - }, - "0x00000000000000000000000000000000000000d6": { - "balance": "1" - }, - "0x00000000000000000000000000000000000000d7": { - "balance": "1" - }, - "0x00000000000000000000000000000000000000d8": { - "balance": "1" - }, - "0x00000000000000000000000000000000000000d9": { - "balance": "1" - }, - "0x00000000000000000000000000000000000000da": { - "balance": "1" - }, - "0x00000000000000000000000000000000000000db": { - "balance": "1" - }, - "0x00000000000000000000000000000000000000dc": { - "balance": "1" - }, - "0x00000000000000000000000000000000000000dd": { - "balance": "1" - }, - "0x00000000000000000000000000000000000000de": { - "balance": "1" - }, - "0x00000000000000000000000000000000000000df": { - "balance": "1" - }, - "0x00000000000000000000000000000000000000e0": { - "balance": "1" - }, - "0x00000000000000000000000000000000000000e1": { - "balance": "1" - }, - "0x00000000000000000000000000000000000000e2": { - "balance": "1" - }, - "0x00000000000000000000000000000000000000e3": { - "balance": "1" - }, - "0x00000000000000000000000000000000000000e4": { - "balance": "1" - }, - "0x00000000000000000000000000000000000000e5": { - "balance": "1" - }, - "0x00000000000000000000000000000000000000e6": { - "balance": "1" - }, - "0x00000000000000000000000000000000000000e7": { - "balance": "1" - }, - "0x00000000000000000000000000000000000000e8": { - "balance": "1" - }, - "0x00000000000000000000000000000000000000e9": { - "balance": "1" - }, - "0x00000000000000000000000000000000000000ea": { - "balance": "1" - }, - "0x00000000000000000000000000000000000000eb": { - "balance": "1" - }, - "0x00000000000000000000000000000000000000ec": { - "balance": "1" - }, - "0x00000000000000000000000000000000000000ed": { - "balance": "1" - }, - "0x00000000000000000000000000000000000000ee": { - "balance": "1" - }, - "0x00000000000000000000000000000000000000ef": { - "balance": "1" - }, - "0x00000000000000000000000000000000000000f0": { - "balance": "1" - }, - "0x00000000000000000000000000000000000000f1": { - "balance": "1" - }, - "0x00000000000000000000000000000000000000f2": { - "balance": "1" - }, - "0x00000000000000000000000000000000000000f3": { - "balance": "1" - }, - "0x00000000000000000000000000000000000000f4": { - "balance": "1" - }, - "0x00000000000000000000000000000000000000f5": { - "balance": "1" - }, - "0x00000000000000000000000000000000000000f6": { - "balance": "1" - }, - "0x00000000000000000000000000000000000000f7": { - "balance": "1" - }, - "0x00000000000000000000000000000000000000f8": { - "balance": "1" - }, - "0x00000000000000000000000000000000000000f9": { - "balance": "1" - }, - "0x00000000000000000000000000000000000000fa": { - "balance": "1" - }, - "0x00000000000000000000000000000000000000fb": { - "balance": "1" - }, - "0x00000000000000000000000000000000000000fc": { - "balance": "1" - }, - "0x00000000000000000000000000000000000000fd": { - "balance": "1" - }, - "0x00000000000000000000000000000000000000fe": { - "balance": "1" - }, - "0x00000000000000000000000000000000000000ff": { - "balance": "1" - }, - "0x4242424242424242424242424242424242424242": { - "balance": "0", - "code": "0x60806040526004361061003f5760003560e01c806301ffc9a71461004457806322895118146100a4578063621fd130146101ba578063c5f2892f14610244575b600080fd5b34801561005057600080fd5b506100906004803603602081101561006757600080fd5b50357fffffffff000000000000000000000000000000000000000000000000000000001661026b565b604080519115158252519081900360200190f35b6101b8600480360360808110156100ba57600080fd5b8101906020810181356401000000008111156100d557600080fd5b8201836020820111156100e757600080fd5b8035906020019184600183028401116401000000008311171561010957600080fd5b91939092909160208101903564010000000081111561012757600080fd5b82018360208201111561013957600080fd5b8035906020019184600183028401116401000000008311171561015b57600080fd5b91939092909160208101903564010000000081111561017957600080fd5b82018360208201111561018b57600080fd5b803590602001918460018302840111640100000000831117156101ad57600080fd5b919350915035610304565b005b3480156101c657600080fd5b506101cf6110b5565b6040805160208082528351818301528351919283929083019185019080838360005b838110156102095781810151838201526020016101f1565b50505050905090810190601f1680156102365780820380516001836020036101000a031916815260200191505b509250505060405180910390f35b34801561025057600080fd5b506102596110c7565b60408051918252519081900360200190f35b60007fffffffff0000000000000000000000000000000000000000000000000000000082167f01ffc9a70000000000000000000000000000000000000000000000000000000014806102fe57507fffffffff0000000000000000000000000000000000000000000000000000000082167f8564090700000000000000000000000000000000000000000000000000000000145b92915050565b6030861461035d576040517f08c379a00000000000000000000000000000000000000000000000000000000081526004018080602001828103825260268152602001806118056026913960400191505060405180910390fd5b602084146103b6576040517f08c379a000000000000000000000000000000000000000000000000000000000815260040180806020018281038252603681526020018061179c6036913960400191505060405180910390fd5b6060821461040f576040517f08c379a00000000000000000000000000000000000000000000000000000000081526004018080602001828103825260298152602001806118786029913960400191505060405180910390fd5b670de0b6b3a7640000341015610470576040517f08c379a00000000000000000000000000000000000000000000000000000000081526004018080602001828103825260268152602001806118526026913960400191505060405180910390fd5b633b9aca003406156104cd576040517f08c379a00000000000000000000000000000000000000000000000000000000081526004018080602001828103825260338152602001806117d26033913960400191505060405180910390fd5b633b9aca00340467ffffffffffffffff811115610535576040517f08c379a000000000000000000000000000000000000000000000000000000000815260040180806020018281038252602781526020018061182b6027913960400191505060405180910390fd5b6060610540826114ba565b90507f649bbc62d0e31342afea4e5cd82d4049e7e1ee912fc0889aa790803be39038c589898989858a8a6105756020546114ba565b6040805160a0808252810189905290819060208201908201606083016080840160c085018e8e80828437600083820152601f017fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe01690910187810386528c815260200190508c8c808284376000838201819052601f9091017fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe01690920188810386528c5181528c51602091820193918e019250908190849084905b83811015610648578181015183820152602001610630565b50505050905090810190601f1680156106755780820380516001836020036101000a031916815260200191505b5086810383528881526020018989808284376000838201819052601f9091017fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe0169092018881038452895181528951602091820193918b019250908190849084905b838110156106ef5781810151838201526020016106d7565b50505050905090810190601f16801561071c5780820380516001836020036101000a031916815260200191505b509d505050505050505050505050505060405180910390a1600060028a8a600060801b604051602001808484808284377fffffffffffffffffffffffffffffffff0000000000000000000000000000000090941691909301908152604080517ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff0818403018152601090920190819052815191955093508392506020850191508083835b602083106107fc57805182527fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe090920191602091820191016107bf565b51815160209384036101000a7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff01801990921691161790526040519190930194509192505080830381855afa158015610859573d6000803e3d6000fd5b5050506040513d602081101561086e57600080fd5b5051905060006002806108846040848a8c6116fe565b6040516020018083838082843780830192505050925050506040516020818303038152906040526040518082805190602001908083835b602083106108f857805182527fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe090920191602091820191016108bb565b51815160209384036101000a7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff01801990921691161790526040519190930194509192505080830381855afa158015610955573d6000803e3d6000fd5b5050506040513d602081101561096a57600080fd5b5051600261097b896040818d6116fe565b60405160009060200180848480828437919091019283525050604080518083038152602092830191829052805190945090925082918401908083835b602083106109f457805182527fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe090920191602091820191016109b7565b51815160209384036101000a7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff01801990921691161790526040519190930194509192505080830381855afa158015610a51573d6000803e3d6000fd5b5050506040513d6020811015610a6657600080fd5b5051604080516020818101949094528082019290925280518083038201815260609092019081905281519192909182918401908083835b60208310610ada57805182527fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe09092019160209182019101610a9d565b51815160209384036101000a7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff01801990921691161790526040519190930194509192505080830381855afa158015610b37573d6000803e3d6000fd5b5050506040513d6020811015610b4c57600080fd5b50516040805160208101858152929350600092600292839287928f928f92018383808284378083019250505093505050506040516020818303038152906040526040518082805190602001908083835b60208310610bd957805182527fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe09092019160209182019101610b9c565b51815160209384036101000a7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff01801990921691161790526040519190930194509192505080830381855afa158015610c36573d6000803e3d6000fd5b5050506040513d6020811015610c4b57600080fd5b50516040518651600291889160009188916020918201918291908601908083835b60208310610ca957805182527fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe09092019160209182019101610c6c565b6001836020036101000a0380198251168184511680821785525050505050509050018367ffffffffffffffff191667ffffffffffffffff1916815260180182815260200193505050506040516020818303038152906040526040518082805190602001908083835b60208310610d4e57805182527fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe09092019160209182019101610d11565b51815160209384036101000a7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff01801990921691161790526040519190930194509192505080830381855afa158015610dab573d6000803e3d6000fd5b5050506040513d6020811015610dc057600080fd5b5051604080516020818101949094528082019290925280518083038201815260609092019081905281519192909182918401908083835b60208310610e3457805182527fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe09092019160209182019101610df7565b51815160209384036101000a7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff01801990921691161790526040519190930194509192505080830381855afa158015610e91573d6000803e3d6000fd5b5050506040513d6020811015610ea657600080fd5b50519050858114610f02576040517f08c379a00000000000000000000000000000000000000000000000000000000081526004018080602001828103825260548152602001806117486054913960600191505060405180910390fd5b60205463ffffffff11610f60576040517f08c379a00000000000000000000000000000000000000000000000000000000081526004018080602001828103825260218152602001806117276021913960400191505060405180910390fd5b602080546001019081905560005b60208110156110a9578160011660011415610fa0578260008260208110610f9157fe5b0155506110ac95505050505050565b600260008260208110610faf57fe5b01548460405160200180838152602001828152602001925050506040516020818303038152906040526040518082805190602001908083835b6020831061102557805182527fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe09092019160209182019101610fe8565b51815160209384036101000a7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff01801990921691161790526040519190930194509192505080830381855afa158015611082573d6000803e3d6000fd5b5050506040513d602081101561109757600080fd5b50519250600282049150600101610f6e565b50fe5b50505050505050565b60606110c26020546114ba565b905090565b6020546000908190815b60208110156112f05781600116600114156111e6576002600082602081106110f557fe5b01548460405160200180838152602001828152602001925050506040516020818303038152906040526040518082805190602001908083835b6020831061116b57805182527fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe0909201916020918201910161112e565b51815160209384036101000a7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff01801990921691161790526040519190930194509192505080830381855afa1580156111c8573d6000803e3d6000fd5b5050506040513d60208110156111dd57600080fd5b505192506112e2565b600283602183602081106111f657fe5b015460405160200180838152602001828152602001925050506040516020818303038152906040526040518082805190602001908083835b6020831061126b57805182527fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe0909201916020918201910161122e565b51815160209384036101000a7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff01801990921691161790526040519190930194509192505080830381855afa1580156112c8573d6000803e3d6000fd5b5050506040513d60208110156112dd57600080fd5b505192505b6002820491506001016110d1565b506002826112ff6020546114ba565b600060401b6040516020018084815260200183805190602001908083835b6020831061135a57805182527fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe0909201916020918201910161131d565b51815160209384036101000a7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff01801990921691161790527fffffffffffffffffffffffffffffffffffffffffffffffff000000000000000095909516920191825250604080518083037ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff8018152601890920190819052815191955093508392850191508083835b6020831061143f57805182527fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe09092019160209182019101611402565b51815160209384036101000a7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff01801990921691161790526040519190930194509192505080830381855afa15801561149c573d6000803e3d6000fd5b5050506040513d60208110156114b157600080fd5b50519250505090565b60408051600880825281830190925260609160208201818036833701905050905060c082901b8060071a60f81b826000815181106114f457fe5b60200101907effffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff1916908160001a9053508060061a60f81b8260018151811061153757fe5b60200101907effffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff1916908160001a9053508060051a60f81b8260028151811061157a57fe5b60200101907effffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff1916908160001a9053508060041a60f81b826003815181106115bd57fe5b60200101907effffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff1916908160001a9053508060031a60f81b8260048151811061160057fe5b60200101907effffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff1916908160001a9053508060021a60f81b8260058151811061164357fe5b60200101907effffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff1916908160001a9053508060011a60f81b8260068151811061168657fe5b60200101907effffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff1916908160001a9053508060001a60f81b826007815181106116c957fe5b60200101907effffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff1916908160001a90535050919050565b6000808585111561170d578182fd5b83861115611719578182fd5b505082019391909203915056fe4465706f736974436f6e74726163743a206d65726b6c6520747265652066756c6c4465706f736974436f6e74726163743a207265636f6e7374727563746564204465706f7369744461746120646f6573206e6f74206d6174636820737570706c696564206465706f7369745f646174615f726f6f744465706f736974436f6e74726163743a20696e76616c6964207769746864726177616c5f63726564656e7469616c73206c656e6774684465706f736974436f6e74726163743a206465706f7369742076616c7565206e6f74206d756c7469706c65206f6620677765694465706f736974436f6e74726163743a20696e76616c6964207075626b6579206c656e6774684465706f736974436f6e74726163743a206465706f7369742076616c756520746f6f20686967684465706f736974436f6e74726163743a206465706f7369742076616c756520746f6f206c6f774465706f736974436f6e74726163743a20696e76616c6964207369676e6174757265206c656e677468a26469706673582212201dd26f37a621703009abf16e77e69c93dc50c79db7f6cc37543e3e0e3decdc9764736f6c634300060b0033", - "storage": { - "0x0000000000000000000000000000000000000000000000000000000000000022": "0xf5a5fd42d16a20302798ef6ed309979b43003d2320d9f0e8ea9831a92759fb4b", - "0x0000000000000000000000000000000000000000000000000000000000000023": "0xdb56114e00fdd4c1f85c892bf35ac9a89289aaecb1ebd0a96cde606a748b5d71", - "0x0000000000000000000000000000000000000000000000000000000000000024": "0xc78009fdf07fc56a11f122370658a353aaa542ed63e44c4bc15ff4cd105ab33c", - "0x0000000000000000000000000000000000000000000000000000000000000025": "0x536d98837f2dd165a55d5eeae91485954472d56f246df256bf3cae19352a123c", - "0x0000000000000000000000000000000000000000000000000000000000000026": "0x9efde052aa15429fae05bad4d0b1d7c64da64d03d7a1854a588c2cb8430c0d30", - "0x0000000000000000000000000000000000000000000000000000000000000027": "0xd88ddfeed400a8755596b21942c1497e114c302e6118290f91e6772976041fa1", - "0x0000000000000000000000000000000000000000000000000000000000000028": "0x87eb0ddba57e35f6d286673802a4af5975e22506c7cf4c64bb6be5ee11527f2c", - "0x0000000000000000000000000000000000000000000000000000000000000029": "0x26846476fd5fc54a5d43385167c95144f2643f533cc85bb9d16b782f8d7db193", - "0x000000000000000000000000000000000000000000000000000000000000002a": "0x506d86582d252405b840018792cad2bf1259f1ef5aa5f887e13cb2f0094f51e1", - "0x000000000000000000000000000000000000000000000000000000000000002b": "0xffff0ad7e659772f9534c195c815efc4014ef1e1daed4404c06385d11192e92b", - "0x000000000000000000000000000000000000000000000000000000000000002c": "0x6cf04127db05441cd833107a52be852868890e4317e6a02ab47683aa75964220", - "0x000000000000000000000000000000000000000000000000000000000000002d": "0xb7d05f875f140027ef5118a2247bbb84ce8f2f0f1123623085daf7960c329f5f", - "0x000000000000000000000000000000000000000000000000000000000000002e": "0xdf6af5f5bbdb6be9ef8aa618e4bf8073960867171e29676f8b284dea6a08a85e", - "0x000000000000000000000000000000000000000000000000000000000000002f": "0xb58d900f5e182e3c50ef74969ea16c7726c549757cc23523c369587da7293784", - "0x0000000000000000000000000000000000000000000000000000000000000030": "0xd49a7502ffcfb0340b1d7885688500ca308161a7f96b62df9d083b71fcc8f2bb", - "0x0000000000000000000000000000000000000000000000000000000000000031": "0x8fe6b1689256c0d385f42f5bbe2027a22c1996e110ba97c171d3e5948de92beb", - "0x0000000000000000000000000000000000000000000000000000000000000032": "0x8d0d63c39ebade8509e0ae3c9c3876fb5fa112be18f905ecacfecb92057603ab", - "0x0000000000000000000000000000000000000000000000000000000000000033": "0x95eec8b2e541cad4e91de38385f2e046619f54496c2382cb6cacd5b98c26f5a4", - "0x0000000000000000000000000000000000000000000000000000000000000034": "0xf893e908917775b62bff23294dbbe3a1cd8e6cc1c35b4801887b646a6f81f17f", - "0x0000000000000000000000000000000000000000000000000000000000000035": "0xcddba7b592e3133393c16194fac7431abf2f5485ed711db282183c819e08ebaa", - "0x0000000000000000000000000000000000000000000000000000000000000036": "0x8a8d7fe3af8caa085a7639a832001457dfb9128a8061142ad0335629ff23ff9c", - "0x0000000000000000000000000000000000000000000000000000000000000037": "0xfeb3c337d7a51a6fbf00b9e34c52e1c9195c969bd4e7a0bfd51d5c5bed9c1167", - "0x0000000000000000000000000000000000000000000000000000000000000038": "0xe71f0aa83cc32edfbefa9f4d3e0174ca85182eec9f3a09f6a6c0df6377a510d7", - "0x0000000000000000000000000000000000000000000000000000000000000039": "0x31206fa80a50bb6abe29085058f16212212a60eec8f049fecb92d8c8e0a84bc0", - "0x000000000000000000000000000000000000000000000000000000000000003a": "0x21352bfecbeddde993839f614c3dac0a3ee37543f9b412b16199dc158e23b544", - "0x000000000000000000000000000000000000000000000000000000000000003b": "0x619e312724bb6d7c3153ed9de791d764a366b389af13c58bf8a8d90481a46765", - "0x000000000000000000000000000000000000000000000000000000000000003c": "0x7cdd2986268250628d0c10e385c58c6191e6fbe05191bcc04f133f2cea72c1c4", - "0x000000000000000000000000000000000000000000000000000000000000003d": "0x848930bd7ba8cac54661072113fb278869e07bb8587f91392933374d017bcbe1", - "0x000000000000000000000000000000000000000000000000000000000000003e": "0x8869ff2c22b28cc10510d9853292803328be4fb0e80495e8bb8d271f5b889636", - "0x000000000000000000000000000000000000000000000000000000000000003f": "0xb5fe28e79f1b850f8658246ce9b6a1e7b49fc06db7143e8fe0b4f2b0c5523a5c", - "0x0000000000000000000000000000000000000000000000000000000000000040": "0x985e929f70af28d0bdd1a90a808f977f597c7c778c489e98d3bd8910d31ac0f7" - } - }, - "0xf97e180c050e5Ab072211Ad2C213Eb5AEE4DF134": { - "balance": "10000000000000000000000000" - }, - "0x2cA5F489CC1Fd1CEC24747B64E8dE0F4A6A850E1": { - "balance": "10000000000000000000000000" - }, - "0x7203bd333a874D9d329050ecE393820fCD501eaA": { - "balance": "10000000000000000000000000" - }, - "0xA51918aA40D78Ff8be939bf0E8404252875c6aDF": { - "balance": "10000000000000000000000000" - }, - "0xAA81078e6b2121dd7A846690DFdD6b10d7658d8B": { - "balance": "10000000000000000000000000" - }, - "0xFA2d31D8f21c1D1633E9BEB641dF77D21D63ccDd": { - "balance": "10000000000000000000000000" - }, - "0xf751C9c6d60614226fE57D2cAD6e10C856a2ddA3": { - "balance": "10000000000000000000000000" - }, - "0x9cD16887f6A808AEaa65D3c840f059EeA4ca1319": { - "balance": "10000000000000000000000000" - }, - "0x2E07043584F11BFF0AC39c927665DF6c6ebaffFB": { - "balance": "10000000000000000000000000" - }, - "0x60e771E5eCA8E26690920de669520Da210D64A9B": { - "balance": "10000000000000000000000000" - }, - "0xFC4db92C2Cf77CE02fBfd7Da0346d2CbFA66aD59": { - "balance": "10000000000000000000000000" - } -} \ No newline at end of file diff --git a/core/allocs/kovan.json b/core/allocs/kovan.json deleted file mode 100644 index 1821842a563..00000000000 --- a/core/allocs/kovan.json +++ /dev/null @@ -1,17 +0,0 @@ -{ - "0x0000000000000000000000000000000000000001": { - "balance": "0x1" - }, - "0x0000000000000000000000000000000000000002": { - "balance": "0x1" - }, - "0x0000000000000000000000000000000000000003": { - "balance": "0x1" - }, - "0x0000000000000000000000000000000000000004": { - "balance": "0x1" - }, - "0x00521965e7bd230323c423d96c657db5b79d099f": { - "balance": "1606938044258990275541962092341162602522202993782792835301376" - } -} \ No newline at end of file diff --git a/core/allocs/yolov3.json b/core/allocs/yolov3.json deleted file mode 100644 index 51da18425ab..00000000000 --- a/core/allocs/yolov3.json +++ /dev/null @@ -1,797 +0,0 @@ -{ - "0x0000000000000000000000000000000000000000": { - "balance": "0x1" - }, - "0x0000000000000000000000000000000000000001": { - "balance": "0x1" - }, - "0x0000000000000000000000000000000000000002": { - "balance": "0x1" - }, - "0x0000000000000000000000000000000000000003": { - "balance": "0x1" - }, - "0x0000000000000000000000000000000000000004": { - "balance": "0x1" - }, - "0x0000000000000000000000000000000000000005": { - "balance": "0x1" - }, - "0x0000000000000000000000000000000000000006": { - "balance": "0x1" - }, - "0x0000000000000000000000000000000000000007": { - "balance": "0x1" - }, - "0x0000000000000000000000000000000000000008": { - "balance": "0x1" - }, - "0x0000000000000000000000000000000000000009": { - "balance": "0x1" - }, - "0x000000000000000000000000000000000000000a": { - "balance": "0x1" - }, - "0x000000000000000000000000000000000000000b": { - "balance": "0x1" - }, - "0x000000000000000000000000000000000000000c": { - "balance": "0x1" - }, - "0x000000000000000000000000000000000000000d": { - "balance": "0x1" - }, - "0x000000000000000000000000000000000000000e": { - "balance": "0x1" - }, - "0x000000000000000000000000000000000000000f": { - "balance": "0x1" - }, - "0x0000000000000000000000000000000000000010": { - "balance": "0x1" - }, - "0x0000000000000000000000000000000000000011": { - "balance": "0x1" - }, - "0x0000000000000000000000000000000000000012": { - "balance": "0x1" - }, - "0x0000000000000000000000000000000000000013": { - "balance": "0x1" - }, - "0x0000000000000000000000000000000000000014": { - "balance": "0x1" - }, - "0x0000000000000000000000000000000000000015": { - "balance": "0x1" - }, - "0x0000000000000000000000000000000000000016": { - "balance": "0x1" - }, - "0x0000000000000000000000000000000000000017": { - "balance": "0x1" - }, - "0x0000000000000000000000000000000000000018": { - "balance": "0x1" - }, - "0x0000000000000000000000000000000000000019": { - "balance": "0x1" - }, - "0x000000000000000000000000000000000000001a": { - "balance": "0x1" - }, - "0x000000000000000000000000000000000000001b": { - "balance": "0x1" - }, - "0x000000000000000000000000000000000000001c": { - "balance": "0x1" - }, - "0x000000000000000000000000000000000000001d": { - "balance": "0x1" - }, - "0x000000000000000000000000000000000000001e": { - "balance": "0x1" - }, - "0x000000000000000000000000000000000000001f": { - "balance": "0x1" - }, - "0x0000000000000000000000000000000000000020": { - "balance": "0x1" - }, - "0x0000000000000000000000000000000000000021": { - "balance": "0x1" - }, - "0x0000000000000000000000000000000000000022": { - "balance": "0x1" - }, - "0x0000000000000000000000000000000000000023": { - "balance": "0x1" - }, - "0x0000000000000000000000000000000000000024": { - "balance": "0x1" - }, - "0x0000000000000000000000000000000000000025": { - "balance": "0x1" - }, - "0x0000000000000000000000000000000000000026": { - "balance": "0x1" - }, - "0x0000000000000000000000000000000000000027": { - "balance": "0x1" - }, - "0x0000000000000000000000000000000000000028": { - "balance": "0x1" - }, - "0x0000000000000000000000000000000000000029": { - "balance": "0x1" - }, - "0x000000000000000000000000000000000000002a": { - "balance": "0x1" - }, - "0x000000000000000000000000000000000000002b": { - "balance": "0x1" - }, - "0x000000000000000000000000000000000000002c": { - "balance": "0x1" - }, - "0x000000000000000000000000000000000000002d": { - "balance": "0x1" - }, - "0x000000000000000000000000000000000000002e": { - "balance": "0x1" - }, - "0x000000000000000000000000000000000000002f": { - "balance": "0x1" - }, - "0x0000000000000000000000000000000000000030": { - "balance": "0x1" - }, - "0x0000000000000000000000000000000000000031": { - "balance": "0x1" - }, - "0x0000000000000000000000000000000000000032": { - "balance": "0x1" - }, - "0x0000000000000000000000000000000000000033": { - "balance": "0x1" - }, - "0x0000000000000000000000000000000000000034": { - "balance": "0x1" - }, - "0x0000000000000000000000000000000000000035": { - "balance": "0x1" - }, - "0x0000000000000000000000000000000000000036": { - "balance": "0x1" - }, - "0x0000000000000000000000000000000000000037": { - "balance": "0x1" - }, - "0x0000000000000000000000000000000000000038": { - "balance": "0x1" - }, - "0x0000000000000000000000000000000000000039": { - "balance": "0x1" - }, - "0x000000000000000000000000000000000000003a": { - "balance": "0x1" - }, - "0x000000000000000000000000000000000000003b": { - "balance": "0x1" - }, - "0x000000000000000000000000000000000000003c": { - "balance": "0x1" - }, - "0x000000000000000000000000000000000000003d": { - "balance": "0x1" - }, - "0x000000000000000000000000000000000000003e": { - "balance": "0x1" - }, - "0x000000000000000000000000000000000000003f": { - "balance": "0x1" - }, - "0x0000000000000000000000000000000000000040": { - "balance": "0x1" - }, - "0x0000000000000000000000000000000000000041": { - "balance": "0x1" - }, - "0x0000000000000000000000000000000000000042": { - "balance": "0x1" - }, - "0x0000000000000000000000000000000000000043": { - "balance": "0x1" - }, - "0x0000000000000000000000000000000000000044": { - "balance": "0x1" - }, - "0x0000000000000000000000000000000000000045": { - "balance": "0x1" - }, - "0x0000000000000000000000000000000000000046": { - "balance": "0x1" - }, - "0x0000000000000000000000000000000000000047": { - "balance": "0x1" - }, - "0x0000000000000000000000000000000000000048": { - "balance": "0x1" - }, - "0x0000000000000000000000000000000000000049": { - "balance": "0x1" - }, - "0x000000000000000000000000000000000000004a": { - "balance": "0x1" - }, - "0x000000000000000000000000000000000000004b": { - "balance": "0x1" - }, - "0x000000000000000000000000000000000000004c": { - "balance": "0x1" - }, - "0x000000000000000000000000000000000000004d": { - "balance": "0x1" - }, - "0x000000000000000000000000000000000000004e": { - "balance": "0x1" - }, - "0x000000000000000000000000000000000000004f": { - "balance": "0x1" - }, - "0x0000000000000000000000000000000000000050": { - "balance": "0x1" - }, - "0x0000000000000000000000000000000000000051": { - "balance": "0x1" - }, - "0x0000000000000000000000000000000000000052": { - "balance": "0x1" - }, - "0x0000000000000000000000000000000000000053": { - "balance": "0x1" - }, - "0x0000000000000000000000000000000000000054": { - "balance": "0x1" - }, - "0x0000000000000000000000000000000000000055": { - "balance": "0x1" - }, - "0x0000000000000000000000000000000000000056": { - "balance": "0x1" - }, - "0x0000000000000000000000000000000000000057": { - "balance": "0x1" - }, - "0x0000000000000000000000000000000000000058": { - "balance": "0x1" - }, - "0x0000000000000000000000000000000000000059": { - "balance": "0x1" - }, - "0x000000000000000000000000000000000000005a": { - "balance": "0x1" - }, - "0x000000000000000000000000000000000000005b": { - "balance": "0x1" - }, - "0x000000000000000000000000000000000000005c": { - "balance": "0x1" - }, - "0x000000000000000000000000000000000000005d": { - "balance": "0x1" - }, - "0x000000000000000000000000000000000000005e": { - "balance": "0x1" - }, - "0x000000000000000000000000000000000000005f": { - "balance": "0x1" - }, - "0x0000000000000000000000000000000000000060": { - "balance": "0x1" - }, - "0x0000000000000000000000000000000000000061": { - "balance": "0x1" - }, - "0x0000000000000000000000000000000000000062": { - "balance": "0x1" - }, - "0x0000000000000000000000000000000000000063": { - "balance": "0x1" - }, - "0x0000000000000000000000000000000000000064": { - "balance": "0x1" - }, - "0x0000000000000000000000000000000000000065": { - "balance": "0x1" - }, - "0x0000000000000000000000000000000000000066": { - "balance": "0x1" - }, - "0x0000000000000000000000000000000000000067": { - "balance": "0x1" - }, - "0x0000000000000000000000000000000000000068": { - "balance": "0x1" - }, - "0x0000000000000000000000000000000000000069": { - "balance": "0x1" - }, - "0x000000000000000000000000000000000000006a": { - "balance": "0x1" - }, - "0x000000000000000000000000000000000000006b": { - "balance": "0x1" - }, - "0x000000000000000000000000000000000000006c": { - "balance": "0x1" - }, - "0x000000000000000000000000000000000000006d": { - "balance": "0x1" - }, - "0x000000000000000000000000000000000000006e": { - "balance": "0x1" - }, - "0x000000000000000000000000000000000000006f": { - "balance": "0x1" - }, - "0x0000000000000000000000000000000000000070": { - "balance": "0x1" - }, - "0x0000000000000000000000000000000000000071": { - "balance": "0x1" - }, - "0x0000000000000000000000000000000000000072": { - "balance": "0x1" - }, - "0x0000000000000000000000000000000000000073": { - "balance": "0x1" - }, - "0x0000000000000000000000000000000000000074": { - "balance": "0x1" - }, - "0x0000000000000000000000000000000000000075": { - "balance": "0x1" - }, - "0x0000000000000000000000000000000000000076": { - "balance": "0x1" - }, - "0x0000000000000000000000000000000000000077": { - "balance": "0x1" - }, - "0x0000000000000000000000000000000000000078": { - "balance": "0x1" - }, - "0x0000000000000000000000000000000000000079": { - "balance": "0x1" - }, - "0x000000000000000000000000000000000000007a": { - "balance": "0x1" - }, - "0x000000000000000000000000000000000000007b": { - "balance": "0x1" - }, - "0x000000000000000000000000000000000000007c": { - "balance": "0x1" - }, - "0x000000000000000000000000000000000000007d": { - "balance": "0x1" - }, - "0x000000000000000000000000000000000000007e": { - "balance": "0x1" - }, - "0x000000000000000000000000000000000000007f": { - "balance": "0x1" - }, - "0x0000000000000000000000000000000000000080": { - "balance": "0x1" - }, - "0x0000000000000000000000000000000000000081": { - "balance": "0x1" - }, - "0x0000000000000000000000000000000000000082": { - "balance": "0x1" - }, - "0x0000000000000000000000000000000000000083": { - "balance": "0x1" - }, - "0x0000000000000000000000000000000000000084": { - "balance": "0x1" - }, - "0x0000000000000000000000000000000000000085": { - "balance": "0x1" - }, - "0x0000000000000000000000000000000000000086": { - "balance": "0x1" - }, - "0x0000000000000000000000000000000000000087": { - "balance": "0x1" - }, - "0x0000000000000000000000000000000000000088": { - "balance": "0x1" - }, - "0x0000000000000000000000000000000000000089": { - "balance": "0x1" - }, - "0x000000000000000000000000000000000000008a": { - "balance": "0x1" - }, - "0x000000000000000000000000000000000000008b": { - "balance": "0x1" - }, - "0x000000000000000000000000000000000000008c": { - "balance": "0x1" - }, - "0x000000000000000000000000000000000000008d": { - "balance": "0x1" - }, - "0x000000000000000000000000000000000000008e": { - "balance": "0x1" - }, - "0x000000000000000000000000000000000000008f": { - "balance": "0x1" - }, - "0x0000000000000000000000000000000000000090": { - "balance": "0x1" - }, - "0x0000000000000000000000000000000000000091": { - "balance": "0x1" - }, - "0x0000000000000000000000000000000000000092": { - "balance": "0x1" - }, - "0x0000000000000000000000000000000000000093": { - "balance": "0x1" - }, - "0x0000000000000000000000000000000000000094": { - "balance": "0x1" - }, - "0x0000000000000000000000000000000000000095": { - "balance": "0x1" - }, - "0x0000000000000000000000000000000000000096": { - "balance": "0x1" - }, - "0x0000000000000000000000000000000000000097": { - "balance": "0x1" - }, - "0x0000000000000000000000000000000000000098": { - "balance": "0x1" - }, - "0x0000000000000000000000000000000000000099": { - "balance": "0x1" - }, - "0x000000000000000000000000000000000000009a": { - "balance": "0x1" - }, - "0x000000000000000000000000000000000000009b": { - "balance": "0x1" - }, - "0x000000000000000000000000000000000000009c": { - "balance": "0x1" - }, - "0x000000000000000000000000000000000000009d": { - "balance": "0x1" - }, - "0x000000000000000000000000000000000000009e": { - "balance": "0x1" - }, - "0x000000000000000000000000000000000000009f": { - "balance": "0x1" - }, - "0x00000000000000000000000000000000000000a0": { - "balance": "0x1" - }, - "0x00000000000000000000000000000000000000a1": { - "balance": "0x1" - }, - "0x00000000000000000000000000000000000000a2": { - "balance": "0x1" - }, - "0x00000000000000000000000000000000000000a3": { - "balance": "0x1" - }, - "0x00000000000000000000000000000000000000a4": { - "balance": "0x1" - }, - "0x00000000000000000000000000000000000000a5": { - "balance": "0x1" - }, - "0x00000000000000000000000000000000000000a6": { - "balance": "0x1" - }, - "0x00000000000000000000000000000000000000a7": { - "balance": "0x1" - }, - "0x00000000000000000000000000000000000000a8": { - "balance": "0x1" - }, - "0x00000000000000000000000000000000000000a9": { - "balance": "0x1" - }, - "0x00000000000000000000000000000000000000aa": { - "balance": "0x1" - }, - "0x00000000000000000000000000000000000000ab": { - "balance": "0x1" - }, - "0x00000000000000000000000000000000000000ac": { - "balance": "0x1" - }, - "0x00000000000000000000000000000000000000ad": { - "balance": "0x1" - }, - "0x00000000000000000000000000000000000000ae": { - "balance": "0x1" - }, - "0x00000000000000000000000000000000000000af": { - "balance": "0x1" - }, - "0x00000000000000000000000000000000000000b0": { - "balance": "0x1" - }, - "0x00000000000000000000000000000000000000b1": { - "balance": "0x1" - }, - "0x00000000000000000000000000000000000000b2": { - "balance": "0x1" - }, - "0x00000000000000000000000000000000000000b3": { - "balance": "0x1" - }, - "0x00000000000000000000000000000000000000b4": { - "balance": "0x1" - }, - "0x00000000000000000000000000000000000000b5": { - "balance": "0x1" - }, - "0x00000000000000000000000000000000000000b6": { - "balance": "0x1" - }, - "0x00000000000000000000000000000000000000b7": { - "balance": "0x1" - }, - "0x00000000000000000000000000000000000000b8": { - "balance": "0x1" - }, - "0x00000000000000000000000000000000000000b9": { - "balance": "0x1" - }, - "0x00000000000000000000000000000000000000ba": { - "balance": "0x1" - }, - "0x00000000000000000000000000000000000000bb": { - "balance": "0x1" - }, - "0x00000000000000000000000000000000000000bc": { - "balance": "0x1" - }, - "0x00000000000000000000000000000000000000bd": { - "balance": "0x1" - }, - "0x00000000000000000000000000000000000000be": { - "balance": "0x1" - }, - "0x00000000000000000000000000000000000000bf": { - "balance": "0x1" - }, - "0x00000000000000000000000000000000000000c0": { - "balance": "0x1" - }, - "0x00000000000000000000000000000000000000c1": { - "balance": "0x1" - }, - "0x00000000000000000000000000000000000000c2": { - "balance": "0x1" - }, - "0x00000000000000000000000000000000000000c3": { - "balance": "0x1" - }, - "0x00000000000000000000000000000000000000c4": { - "balance": "0x1" - }, - "0x00000000000000000000000000000000000000c5": { - "balance": "0x1" - }, - "0x00000000000000000000000000000000000000c6": { - "balance": "0x1" - }, - "0x00000000000000000000000000000000000000c7": { - "balance": "0x1" - }, - "0x00000000000000000000000000000000000000c8": { - "balance": "0x1" - }, - "0x00000000000000000000000000000000000000c9": { - "balance": "0x1" - }, - "0x00000000000000000000000000000000000000ca": { - "balance": "0x1" - }, - "0x00000000000000000000000000000000000000cb": { - "balance": "0x1" - }, - "0x00000000000000000000000000000000000000cc": { - "balance": "0x1" - }, - "0x00000000000000000000000000000000000000cd": { - "balance": "0x1" - }, - "0x00000000000000000000000000000000000000ce": { - "balance": "0x1" - }, - "0x00000000000000000000000000000000000000cf": { - "balance": "0x1" - }, - "0x00000000000000000000000000000000000000d0": { - "balance": "0x1" - }, - "0x00000000000000000000000000000000000000d1": { - "balance": "0x1" - }, - "0x00000000000000000000000000000000000000d2": { - "balance": "0x1" - }, - "0x00000000000000000000000000000000000000d3": { - "balance": "0x1" - }, - "0x00000000000000000000000000000000000000d4": { - "balance": "0x1" - }, - "0x00000000000000000000000000000000000000d5": { - "balance": "0x1" - }, - "0x00000000000000000000000000000000000000d6": { - "balance": "0x1" - }, - "0x00000000000000000000000000000000000000d7": { - "balance": "0x1" - }, - "0x00000000000000000000000000000000000000d8": { - "balance": "0x1" - }, - "0x00000000000000000000000000000000000000d9": { - "balance": "0x1" - }, - "0x00000000000000000000000000000000000000da": { - "balance": "0x1" - }, - "0x00000000000000000000000000000000000000db": { - "balance": "0x1" - }, - "0x00000000000000000000000000000000000000dc": { - "balance": "0x1" - }, - "0x00000000000000000000000000000000000000dd": { - "balance": "0x1" - }, - "0x00000000000000000000000000000000000000de": { - "balance": "0x1" - }, - "0x00000000000000000000000000000000000000df": { - "balance": "0x1" - }, - "0x00000000000000000000000000000000000000e0": { - "balance": "0x1" - }, - "0x00000000000000000000000000000000000000e1": { - "balance": "0x1" - }, - "0x00000000000000000000000000000000000000e2": { - "balance": "0x1" - }, - "0x00000000000000000000000000000000000000e3": { - "balance": "0x1" - }, - "0x00000000000000000000000000000000000000e4": { - "balance": "0x1" - }, - "0x00000000000000000000000000000000000000e5": { - "balance": "0x1" - }, - "0x00000000000000000000000000000000000000e6": { - "balance": "0x1" - }, - "0x00000000000000000000000000000000000000e7": { - "balance": "0x1" - }, - "0x00000000000000000000000000000000000000e8": { - "balance": "0x1" - }, - "0x00000000000000000000000000000000000000e9": { - "balance": "0x1" - }, - "0x00000000000000000000000000000000000000ea": { - "balance": "0x1" - }, - "0x00000000000000000000000000000000000000eb": { - "balance": "0x1" - }, - "0x00000000000000000000000000000000000000ec": { - "balance": "0x1" - }, - "0x00000000000000000000000000000000000000ed": { - "balance": "0x1" - }, - "0x00000000000000000000000000000000000000ee": { - "balance": "0x1" - }, - "0x00000000000000000000000000000000000000ef": { - "balance": "0x1" - }, - "0x00000000000000000000000000000000000000f0": { - "balance": "0x1" - }, - "0x00000000000000000000000000000000000000f1": { - "balance": "0x1" - }, - "0x00000000000000000000000000000000000000f2": { - "balance": "0x1" - }, - "0x00000000000000000000000000000000000000f3": { - "balance": "0x1" - }, - "0x00000000000000000000000000000000000000f4": { - "balance": "0x1" - }, - "0x00000000000000000000000000000000000000f5": { - "balance": "0x1" - }, - "0x00000000000000000000000000000000000000f6": { - "balance": "0x1" - }, - "0x00000000000000000000000000000000000000f7": { - "balance": "0x1" - }, - "0x00000000000000000000000000000000000000f8": { - "balance": "0x1" - }, - "0x00000000000000000000000000000000000000f9": { - "balance": "0x1" - }, - "0x00000000000000000000000000000000000000fa": { - "balance": "0x1" - }, - "0x00000000000000000000000000000000000000fb": { - "balance": "0x1" - }, - "0x00000000000000000000000000000000000000fc": { - "balance": "0x1" - }, - "0x00000000000000000000000000000000000000fd": { - "balance": "0x1" - }, - "0x00000000000000000000000000000000000000fe": { - "balance": "0x1" - }, - "0x00000000000000000000000000000000000000ff": { - "balance": "0x1" - }, - "0x0e89e2aedb1cfcdb9424d41a1f218f4132738172": { - "balance": "0x200000000000000000000000000000000000000000000000000000000000000" - }, - "0x1041afbcb359d5a8dc58c15b2ff51354ff8a217d": { - "balance": "0x200000000000000000000000000000000000000000000000000000000000000" - }, - "0x60adc0f89a41af237ce73554ede170d733ec14e0": { - "balance": "0x200000000000000000000000000000000000000000000000000000000000000" - }, - "0x799d329e5f583419167cd722962485926e338f4a": { - "balance": "0x200000000000000000000000000000000000000000000000000000000000000" - }, - "0x7cf5b79bfe291a67ab02b393e456ccc4c266f753": { - "balance": "0x200000000000000000000000000000000000000000000000000000000000000" - }, - "0x8a8eafb1cf62bfbeb1741769dae1a9dd47996192": { - "balance": "0x200000000000000000000000000000000000000000000000000000000000000" - }, - "0x8ba1f109551bd432803012645ac136ddd64dba72": { - "balance": "0x200000000000000000000000000000000000000000000000000000000000000" - }, - "0xb02a2eda1b317fbd16760128836b0ac59b560e9d": { - "balance": "0x200000000000000000000000000000000000000000000000000000000000000" - }, - "0xdf0a88b2b68c673713a8ec826003676f272e3573": { - "balance": "0x200000000000000000000000000000000000000000000000000000000000000" - } -} diff --git a/core/asm/asm.go b/core/asm/asm.go index 722f68d2382..28295cfd176 100644 --- a/core/asm/asm.go +++ b/core/asm/asm.go @@ -14,7 +14,7 @@ // You should have received a copy of the GNU Lesser General Public License // along with the go-ethereum library. If not, see . -// Provides support for dealing with EVM assembly instructions (e.g., disassembling them). +// Package asm provides support for dealing with EVM assembly instructions (e.g., disassembling them). package asm import ( @@ -34,14 +34,14 @@ type instructionIterator struct { started bool } -// Create a new instruction iterator. +// NewInstructionIterator creates a new instruction iterator. func NewInstructionIterator(code []byte) *instructionIterator { it := new(instructionIterator) it.code = code return it } -// Returns true if there is a next instruction and moves on. +// Next returns true if there is a next instruction and moves on. func (it *instructionIterator) Next() bool { if it.error != nil || uint64(len(it.code)) <= it.pc { // We previously reached an error or the end. @@ -79,27 +79,27 @@ func (it *instructionIterator) Next() bool { return true } -// Returns any error that may have been encountered. +// Error returns any error that may have been encountered. func (it *instructionIterator) Error() error { return it.error } -// Returns the PC of the current instruction. +// PC returns the PC of the current instruction. func (it *instructionIterator) PC() uint64 { return it.pc } -// Returns the opcode of the current instruction. +// Op returns the opcode of the current instruction. func (it *instructionIterator) Op() vm.OpCode { return it.op } -// Returns the argument of the current instruction. +// Arg returns the argument of the current instruction. func (it *instructionIterator) Arg() []byte { return it.arg } -// Pretty-print all disassembled EVM instructions to stdout. +// PrintDisassembled pretty-prints all disassembled EVM instructions to stdout. func PrintDisassembled(code string) error { script, err := hex.DecodeString(code) if err != nil { @@ -117,7 +117,7 @@ func PrintDisassembled(code string) error { return it.Error() } -// Return all disassembled EVM instructions in human-readable format. +// Disassemble returns all disassembled EVM instructions in human-readable format. func Disassemble(script []byte) ([]string, error) { instrs := make([]string, 0) diff --git a/core/asm/asm_test.go b/core/asm/asm_test.go index 92b26b67a5c..3a20d2db747 100644 --- a/core/asm/asm_test.go +++ b/core/asm/asm_test.go @@ -17,9 +17,8 @@ package asm import ( - "testing" - "encoding/hex" + "testing" ) // Tests disassembling the instructions for valid evm code diff --git a/core/asm/compiler.go b/core/asm/compiler.go index bae48bef6cf..8c04dbeb3f2 100644 --- a/core/asm/compiler.go +++ b/core/asm/compiler.go @@ -243,12 +243,12 @@ func (c *Compiler) pushBin(v interface{}) { // isPush returns whether the string op is either any of // push(N). func isPush(op string) bool { - return strings.ToUpper(op) == "PUSH" + return strings.EqualFold(op, "PUSH") } // isJump returns whether the string op is jump(i) func isJump(op string) bool { - return strings.ToUpper(op) == "JUMPI" || strings.ToUpper(op) == "JUMP" + return strings.EqualFold(op, "JUMPI") || strings.EqualFold(op, "JUMP") } // toBinary converts text to a vm.OpCode diff --git a/core/asm/lex_test.go b/core/asm/lex_test.go index 6b8bd3d740b..c3a97aa5087 100644 --- a/core/asm/lex_test.go +++ b/core/asm/lex_test.go @@ -24,7 +24,7 @@ import ( func lexAll(src string) []token { ch := Lex([]byte(src), false) - var tokens []token + var tokens []token // nolint:prealloc for i := range ch { tokens = append(tokens, i) } diff --git a/core/block_builder_parameters.go b/core/block_builder_parameters.go index ee64c1578ce..7beed7987b4 100644 --- a/core/block_builder_parameters.go +++ b/core/block_builder_parameters.go @@ -9,4 +9,5 @@ type BlockBuilderParameters struct { Timestamp uint64 PrevRandao common.Hash SuggestedFeeRecipient common.Address + PayloadId uint64 } diff --git a/core/blockchain.go b/core/blockchain.go index 6b845347df9..23734fee948 100644 --- a/core/blockchain.go +++ b/core/blockchain.go @@ -34,7 +34,6 @@ import ( "github.com/ledgerwatch/erigon/consensus/misc" "github.com/ledgerwatch/erigon/core/state" "github.com/ledgerwatch/erigon/core/types" - "github.com/ledgerwatch/erigon/core/types/accounts" "github.com/ledgerwatch/erigon/core/vm" "github.com/ledgerwatch/erigon/params" ) @@ -57,16 +56,16 @@ type RejectedTx struct { type RejectedTxs []*RejectedTx type EphemeralExecResult struct { - StateRoot common.Hash `json:"stateRoot"` - TxRoot common.Hash `json:"txRoot"` - ReceiptRoot common.Hash `json:"receiptsRoot"` - LogsHash common.Hash `json:"logsHash"` - Bloom types.Bloom `json:"logsBloom" gencodec:"required"` - Receipts types.Receipts `json:"receipts"` - Rejected RejectedTxs `json:"rejected,omitempty"` - Difficulty *math.HexOrDecimal256 `json:"currentDifficulty" gencodec:"required"` - GasUsed math.HexOrDecimal64 `json:"gasUsed"` - ReceiptForStorage *types.ReceiptForStorage `json:"-"` + StateRoot common.Hash `json:"stateRoot"` + TxRoot common.Hash `json:"txRoot"` + ReceiptRoot common.Hash `json:"receiptsRoot"` + LogsHash common.Hash `json:"logsHash"` + Bloom types.Bloom `json:"logsBloom" gencodec:"required"` + Receipts types.Receipts `json:"receipts"` + Rejected RejectedTxs `json:"rejected,omitempty"` + Difficulty *math.HexOrDecimal256 `json:"currentDifficulty" gencodec:"required"` + GasUsed math.HexOrDecimal64 `json:"gasUsed"` + StateSyncReceipt *types.Receipt `json:"-"` } func ExecuteBlockEphemerallyForBSC( @@ -79,7 +78,6 @@ func ExecuteBlockEphemerallyForBSC( stateWriter state.WriterWithChangeSets, epochReader consensus.EpochReader, chainReader consensus.ChainHeaderReader, - contractHasTEVM func(codeHash common.Hash) (bool, error), statelessExec bool, // for usage of this API via cli tools wherein some of the validations need to be relaxed. getTracer func(txIndex int, txHash common.Hash) (vm.Tracer, error), ) (*EphemeralExecResult, error) { @@ -129,7 +127,7 @@ func ExecuteBlockEphemerallyForBSC( writeTrace = true } - receipt, _, err := ApplyTransaction(chainConfig, blockHashFunc, engine, nil, gp, ibs, noop, header, tx, usedGas, *vmConfig, contractHasTEVM) + receipt, _, err := ApplyTransaction(chainConfig, blockHashFunc, engine, nil, gp, ibs, noop, header, tx, usedGas, *vmConfig) if writeTrace { if ftracer, ok := vmConfig.Tracer.(vm.FlushableTracer); ok { ftracer.Flush(tx) @@ -184,11 +182,11 @@ func ExecuteBlockEphemerallyForBSC( if chainConfig.IsByzantium(header.Number.Uint64()) && !vmConfig.NoReceipts { if !statelessExec && receiptSha != block.ReceiptHash() { - return nil, fmt.Errorf("mismatched receipt headers for block %d (%s != %s)", block.NumberU64(), newBlock.ReceiptHash().Hex(), block.Header().ReceiptHash.Hex()) + return nil, fmt.Errorf("mismatched receipt headers for block %d (%s != %s)", block.NumberU64(), receiptSha.Hex(), block.ReceiptHash().Hex()) } } if !statelessExec && newBlock.GasUsed() != header.GasUsed { - return nil, fmt.Errorf("gas used by execution: %d, in header: %d", *usedGas, header.GasUsed) + return nil, fmt.Errorf("gas used by execution: %d, in header: %d, in new Block: %v", *usedGas, header.GasUsed, newBlock.GasUsed()) } var bloom types.Bloom @@ -230,7 +228,6 @@ func ExecuteBlockEphemerally( stateWriter state.WriterWithChangeSets, epochReader consensus.EpochReader, chainReader consensus.ChainHeaderReader, - contractHasTEVM func(codeHash common.Hash) (bool, error), statelessExec bool, // for usage of this API via cli tools wherein some of the validations need to be relaxed. getTracer func(txIndex int, txHash common.Hash) (vm.Tracer, error), ) (*EphemeralExecResult, error) { @@ -273,7 +270,119 @@ func ExecuteBlockEphemerally( writeTrace = true } - receipt, _, err := ApplyTransaction(chainConfig, blockHashFunc, engine, nil, gp, ibs, noop, header, tx, usedGas, *vmConfig, contractHasTEVM) + receipt, _, err := ApplyTransaction(chainConfig, blockHashFunc, engine, nil, gp, ibs, noop, header, tx, usedGas, *vmConfig) + if writeTrace { + if ftracer, ok := vmConfig.Tracer.(vm.FlushableTracer); ok { + ftracer.Flush(tx) + } + + vmConfig.Tracer = nil + } + if err != nil { + if !statelessExec { + return nil, fmt.Errorf("could not apply tx %d from block %d [%v]: %w", i, block.NumberU64(), tx.Hash().Hex(), err) + } + rejectedTxs = append(rejectedTxs, &RejectedTx{i, err.Error()}) + } else { + includedTxs = append(includedTxs, tx) + if !vmConfig.NoReceipts { + receipts = append(receipts, receipt) + } + } + } + + receiptSha := types.DeriveSha(receipts) + if !statelessExec && chainConfig.IsByzantium(header.Number.Uint64()) && !vmConfig.NoReceipts && receiptSha != block.ReceiptHash() { + return nil, fmt.Errorf("mismatched receipt headers for block %d (%s != %s)", block.NumberU64(), receiptSha.Hex(), block.ReceiptHash().Hex()) + } + + if !statelessExec && *usedGas != header.GasUsed { + return nil, fmt.Errorf("gas used by execution: %d, in header: %d", *usedGas, header.GasUsed) + } + + var bloom types.Bloom + if !vmConfig.NoReceipts { + bloom = types.CreateBloom(receipts) + if !statelessExec && bloom != header.Bloom { + return nil, fmt.Errorf("bloom computed by execution: %x, in header: %x", bloom, header.Bloom) + } + } + if !vmConfig.ReadOnly { + txs := block.Transactions() + if _, _, _, err := FinalizeBlockExecution(engine, stateReader, block.Header(), txs, block.Uncles(), stateWriter, chainConfig, ibs, receipts, epochReader, chainReader, false); err != nil { + return nil, err + } + } + blockLogs := ibs.Logs() + execRs := &EphemeralExecResult{ + TxRoot: types.DeriveSha(includedTxs), + ReceiptRoot: receiptSha, + Bloom: bloom, + LogsHash: rlpHash(blockLogs), + Receipts: receipts, + Difficulty: (*math.HexOrDecimal256)(header.Difficulty), + GasUsed: math.HexOrDecimal64(*usedGas), + Rejected: rejectedTxs, + } + + return execRs, nil +} + +// ExecuteBlockEphemerallyBor runs a block from provided stateReader and +// writes the result to the provided stateWriter +func ExecuteBlockEphemerallyBor( + chainConfig *params.ChainConfig, + vmConfig *vm.Config, + blockHashFunc func(n uint64) common.Hash, + engine consensus.Engine, + block *types.Block, + stateReader state.StateReader, + stateWriter state.WriterWithChangeSets, + epochReader consensus.EpochReader, + chainReader consensus.ChainHeaderReader, + statelessExec bool, // for usage of this API via cli tools wherein some of the validations need to be relaxed. + getTracer func(txIndex int, txHash common.Hash) (vm.Tracer, error), +) (*EphemeralExecResult, error) { + + defer blockExecutionTimer.UpdateDuration(time.Now()) + block.Uncles() + ibs := state.New(stateReader) + header := block.Header() + + usedGas := new(uint64) + gp := new(GasPool) + gp.AddGas(block.GasLimit()) + + var ( + rejectedTxs []*RejectedTx + includedTxs types.Transactions + receipts types.Receipts + ) + + if !vmConfig.ReadOnly { + if err := InitializeBlockExecution(engine, chainReader, epochReader, block.Header(), block.Transactions(), block.Uncles(), chainConfig, ibs); err != nil { + return nil, err + } + } + + if chainConfig.DAOForkSupport && chainConfig.DAOForkBlock != nil && chainConfig.DAOForkBlock.Cmp(block.Number()) == 0 { + misc.ApplyDAOHardFork(ibs) + } + noop := state.NewNoopWriter() + //fmt.Printf("====txs processing start: %d====\n", block.NumberU64()) + for i, tx := range block.Transactions() { + ibs.Prepare(tx.Hash(), block.Hash(), i) + writeTrace := false + if vmConfig.Debug && vmConfig.Tracer == nil { + tracer, err := getTracer(i, tx.Hash()) + if err != nil { + return nil, fmt.Errorf("could not obtain tracer: %w", err) + } + vmConfig.Tracer = tracer + writeTrace = true + } + + receipt, _, err := ApplyTransaction(chainConfig, blockHashFunc, engine, nil, gp, ibs, noop, header, tx, usedGas, *vmConfig) if writeTrace { if ftracer, ok := vmConfig.Tracer.(vm.FlushableTracer); ok { ftracer.Flush(tx) @@ -296,7 +405,7 @@ func ExecuteBlockEphemerally( receiptSha := types.DeriveSha(receipts) if !statelessExec && chainConfig.IsByzantium(header.Number.Uint64()) && !vmConfig.NoReceipts && receiptSha != block.ReceiptHash() { - return nil, fmt.Errorf("mismatched receipt headers for block %d", block.NumberU64()) + return nil, fmt.Errorf("mismatched receipt headers for block %d (%s != %s)", block.NumberU64(), receiptSha.Hex(), block.ReceiptHash().Hex()) } if !statelessExec && *usedGas != header.GasUsed { @@ -312,7 +421,7 @@ func ExecuteBlockEphemerally( } if !vmConfig.ReadOnly { txs := block.Transactions() - if _, err := FinalizeBlockExecution(engine, stateReader, block.Header(), txs, block.Uncles(), stateWriter, chainConfig, ibs, receipts, epochReader, chainReader, false); err != nil { + if _, _, _, err := FinalizeBlockExecution(engine, stateReader, block.Header(), txs, block.Uncles(), stateWriter, chainConfig, ibs, receipts, epochReader, chainReader, false); err != nil { return nil, err } } @@ -323,33 +432,29 @@ func ExecuteBlockEphemerally( } blockLogs := ibs.Logs() - var stateSyncReceipt *types.ReceiptForStorage + stateSyncReceipt := &types.Receipt{} if chainConfig.Consensus == params.BorConsensus && len(blockLogs) > 0 { - var stateSyncLogs []*types.Log slices.SortStableFunc(blockLogs, func(i, j *types.Log) bool { return i.Index < j.Index }) if len(blockLogs) > len(logs) { - stateSyncLogs = blockLogs[len(logs):] // get state-sync logs from `state.Logs()` + stateSyncReceipt.Logs = blockLogs[len(logs):] // get state-sync logs from `state.Logs()` - types.DeriveFieldsForBorLogs(stateSyncLogs, block.Hash(), block.NumberU64(), uint(len(receipts)), uint(len(logs))) - - stateSyncReceipt = &types.ReceiptForStorage{ - Status: types.ReceiptStatusSuccessful, // make receipt status successful - Logs: stateSyncLogs, - } + // fill the state sync with the correct information + types.DeriveFieldsForBorReceipt(stateSyncReceipt, block.Hash(), block.NumberU64(), receipts) + stateSyncReceipt.Status = types.ReceiptStatusSuccessful } } execRs := &EphemeralExecResult{ - TxRoot: types.DeriveSha(includedTxs), - ReceiptRoot: receiptSha, - Bloom: bloom, - LogsHash: rlpHash(blockLogs), - Receipts: receipts, - Difficulty: (*math.HexOrDecimal256)(header.Difficulty), - GasUsed: math.HexOrDecimal64(*usedGas), - Rejected: rejectedTxs, - ReceiptForStorage: stateSyncReceipt, + TxRoot: types.DeriveSha(includedTxs), + ReceiptRoot: receiptSha, + Bloom: bloom, + LogsHash: rlpHash(blockLogs), + Receipts: receipts, + Difficulty: (*math.HexOrDecimal256)(header.Difficulty), + GasUsed: math.HexOrDecimal64(*usedGas), + Rejected: rejectedTxs, + StateSyncReceipt: stateSyncReceipt, } return execRs, nil @@ -363,8 +468,6 @@ func rlpHash(x interface{}) (h common.Hash) { } func SysCallContract(contract common.Address, data []byte, chainConfig params.ChainConfig, ibs *state.IntraBlockState, header *types.Header, engine consensus.Engine) (result []byte, err error) { - gp := new(GasPool).AddGas(50_000_000) - if chainConfig.DAOForkSupport && chainConfig.DAOForkBlock != nil && chainConfig.DAOForkBlock.Cmp(header.Number) == 0 { misc.ApplyDAOHardFork(ibs) } @@ -373,7 +476,7 @@ func SysCallContract(contract common.Address, data []byte, chainConfig params.Ch state.SystemAddress, &contract, 0, u256.Num0, - 50_000_000, u256.Num0, + math.MaxUint64, u256.Num0, nil, nil, data, nil, false, ) @@ -389,27 +492,21 @@ func SysCallContract(contract common.Address, data []byte, chainConfig params.Ch author = &state.SystemAddress txContext = NewEVMTxContext(msg) } - blockContext := NewEVMBlockContext(header, GetHashFn(header, nil), engine, author, nil) + blockContext := NewEVMBlockContext(header, GetHashFn(header, nil), engine, author) evm := vm.NewEVM(blockContext, txContext, ibs, &chainConfig, vmConfig) - if isBor { - ret, _, err := evm.Call( - vm.AccountRef(msg.From()), - *msg.To(), - msg.Data(), - msg.Gas(), - msg.Value(), - false, - ) - if err != nil { - return nil, nil - } - return ret, nil - } - res, err := ApplyMessage(evm, msg, gp, true /* refunds */, false /* gasBailout */) - if err != nil { - return nil, err + + ret, _, err := evm.Call( + vm.AccountRef(msg.From()), + *msg.To(), + msg.Data(), + msg.Gas(), + msg.Value(), + false, + ) + if isBor && err != nil { + return nil, nil } - return res.ReturnData, nil + return ret, err } // from the null sender, with 50M gas. @@ -433,7 +530,7 @@ func CallContract(contract common.Address, data []byte, chainConfig params.Chain return nil, fmt.Errorf("SysCallContract: %w ", err) } vmConfig := vm.Config{NoReceipts: true} - _, result, err = ApplyTransaction(&chainConfig, GetHashFn(header, nil), engine, &state.SystemAddress, gp, ibs, noop, header, tx, &gasUsed, vmConfig, nil) + _, result, err = ApplyTransaction(&chainConfig, GetHashFn(header, nil), engine, &state.SystemAddress, gp, ibs, noop, header, tx, &gasUsed, vmConfig) if err != nil { return result, fmt.Errorf("SysCallContract: %w ", err) } @@ -451,51 +548,30 @@ func CallContractTx(contract common.Address, data []byte, ibs *state.IntraBlockS func FinalizeBlockExecution(engine consensus.Engine, stateReader state.StateReader, header *types.Header, txs types.Transactions, uncles []*types.Header, stateWriter state.WriterWithChangeSets, cc *params.ChainConfig, ibs *state.IntraBlockState, receipts types.Receipts, e consensus.EpochReader, headerReader consensus.ChainHeaderReader, isMining bool, -) (newBlock *types.Block, err error) { +) (newBlock *types.Block, newTxs types.Transactions, newReceipt types.Receipts, err error) { syscall := func(contract common.Address, data []byte) ([]byte, error) { return SysCallContract(contract, data, *cc, ibs, header, engine) } if isMining { - newBlock, _, _, err = engine.FinalizeAndAssemble(cc, header, ibs, txs, uncles, receipts, e, headerReader, syscall, nil) + newBlock, newTxs, newReceipt, err = engine.FinalizeAndAssemble(cc, header, ibs, txs, uncles, receipts, e, headerReader, syscall, nil) } else { _, _, err = engine.Finalize(cc, header, ibs, txs, uncles, receipts, e, headerReader, syscall) } if err != nil { - return nil, err - } - - var originalSystemAcc *accounts.Account - if cc.ChainID.Uint64() == 77 { // hack for Sokol - don't understand why eip158 is enabled, but OE still save SystemAddress with nonce=0 - n := ibs.GetNonce(state.SystemAddress) //hack - because syscall must use ApplyMessage instead of ApplyTx (and don't create tx at all). But CallContract must create tx. - if n > 0 { - var err error - originalSystemAcc, err = stateReader.ReadAccountData(state.SystemAddress) - if err != nil { - return nil, err - } - } + return nil, nil, nil, err } if err := ibs.CommitBlock(cc.Rules(header.Number.Uint64()), stateWriter); err != nil { - return nil, fmt.Errorf("committing block %d failed: %w", header.Number.Uint64(), err) - } - - if originalSystemAcc != nil { // hack for Sokol - don't understand why eip158 is enabled, but OE still save SystemAddress with nonce=0 - acc := accounts.NewAccount() - acc.Nonce = 0 - if err := stateWriter.UpdateAccountData(state.SystemAddress, originalSystemAcc, &acc); err != nil { - return nil, err - } + return nil, nil, nil, fmt.Errorf("committing block %d failed: %w", header.Number.Uint64(), err) } if err := stateWriter.WriteChangeSets(); err != nil { - return nil, fmt.Errorf("writing changesets for block %d failed: %w", header.Number.Uint64(), err) + return nil, nil, nil, fmt.Errorf("writing changesets for block %d failed: %w", header.Number.Uint64(), err) } - return newBlock, nil + return newBlock, newTxs, newReceipt, nil } func InitializeBlockExecution(engine consensus.Engine, chain consensus.ChainHeaderReader, epochReader consensus.EpochReader, header *types.Header, txs types.Transactions, uncles []*types.Header, cc *params.ChainConfig, ibs *state.IntraBlockState) error { - // Finalize the block, applying any consensus engine specific extras (e.g. block rewards) engine.Initialize(cc, chain, epochReader, header, txs, uncles, func(contract common.Address, data []byte) ([]byte, error) { return SysCallContract(contract, data, *cc, ibs, header, engine) }) diff --git a/core/blockchain_insert.go b/core/blockchain_insert.go deleted file mode 100644 index 9d3d66e4d47..00000000000 --- a/core/blockchain_insert.go +++ /dev/null @@ -1,29 +0,0 @@ -// Copyright 2018 The go-ethereum Authors -// This file is part of the go-ethereum library. -// -// The go-ethereum library is free software: you can redistribute it and/or modify -// it under the terms of the GNU Lesser General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// The go-ethereum library is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Lesser General Public License for more details. -// -// You should have received a copy of the GNU Lesser General Public License -// along with the go-ethereum library. If not, see . - -package core - -import ( - "github.com/ledgerwatch/erigon/common/mclock" -) - -// InsertStats tracks and reports on block insertion. -type InsertStats struct { - queued, lastIndex, ignored int - UsedGas uint64 - Processed int - StartTime mclock.AbsTime -} diff --git a/core/bloombits/doc.go b/core/bloombits/doc.go deleted file mode 100644 index 3d159e74f77..00000000000 --- a/core/bloombits/doc.go +++ /dev/null @@ -1,18 +0,0 @@ -// Copyright 2017 The go-ethereum Authors -// This file is part of the go-ethereum library. -// -// The go-ethereum library is free software: you can redistribute it and/or modify -// it under the terms of the GNU Lesser General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// The go-ethereum library is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Lesser General Public License for more details. -// -// You should have received a copy of the GNU Lesser General Public License -// along with the go-ethereum library. If not, see . - -// Package bloombits implements bloom filtering on batches of data. -package bloombits diff --git a/core/bloombits/generator.go b/core/bloombits/generator.go deleted file mode 100644 index a7182f8c9ed..00000000000 --- a/core/bloombits/generator.go +++ /dev/null @@ -1,98 +0,0 @@ -// Copyright 2017 The go-ethereum Authors -// This file is part of the go-ethereum library. -// -// The go-ethereum library is free software: you can redistribute it and/or modify -// it under the terms of the GNU Lesser General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// The go-ethereum library is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Lesser General Public License for more details. -// -// You should have received a copy of the GNU Lesser General Public License -// along with the go-ethereum library. If not, see . - -package bloombits - -import ( - "errors" - - "github.com/ledgerwatch/erigon/core/types" -) - -var ( - // errSectionOutOfBounds is returned if the user tried to add more bloom filters - // to the batch than available space, or if tries to retrieve above the capacity. - errSectionOutOfBounds = errors.New("section out of bounds") - - // errBloomBitOutOfBounds is returned if the user tried to retrieve specified - // bit bloom above the capacity. - errBloomBitOutOfBounds = errors.New("bloom bit out of bounds") -) - -// Generator takes a number of bloom filters and generates the rotated bloom bits -// to be used for batched filtering. -type Generator struct { - blooms [types.BloomBitLength][]byte // Rotated blooms for per-bit matching - sections uint // Number of sections to batch together - nextSec uint // Next section to set when adding a bloom -} - -// NewGenerator creates a rotated bloom generator that can iteratively fill a -// batched bloom filter's bits. -func NewGenerator(sections uint) (*Generator, error) { - if sections%8 != 0 { - return nil, errors.New("section count not multiple of 8") - } - b := &Generator{sections: sections} - for i := 0; i < types.BloomBitLength; i++ { - b.blooms[i] = make([]byte, sections/8) - } - return b, nil -} - -// AddBloom takes a single bloom filter and sets the corresponding bit column -// in memory accordingly. -func (b *Generator) AddBloom(index uint, bloom types.Bloom) error { - // Make sure we're not adding more bloom filters than our capacity - if b.nextSec >= b.sections { - return errSectionOutOfBounds - } - if b.nextSec != index { - return errors.New("bloom filter with unexpected index") - } - // Rotate the bloom and insert into our collection - byteIndex := b.nextSec / 8 - bitIndex := byte(7 - b.nextSec%8) - for byt := 0; byt < types.BloomByteLength; byt++ { - bloomByte := bloom[types.BloomByteLength-1-byt] - if bloomByte == 0 { - continue - } - base := 8 * byt - b.blooms[base+7][byteIndex] |= ((bloomByte >> 7) & 1) << bitIndex - b.blooms[base+6][byteIndex] |= ((bloomByte >> 6) & 1) << bitIndex - b.blooms[base+5][byteIndex] |= ((bloomByte >> 5) & 1) << bitIndex - b.blooms[base+4][byteIndex] |= ((bloomByte >> 4) & 1) << bitIndex - b.blooms[base+3][byteIndex] |= ((bloomByte >> 3) & 1) << bitIndex - b.blooms[base+2][byteIndex] |= ((bloomByte >> 2) & 1) << bitIndex - b.blooms[base+1][byteIndex] |= ((bloomByte >> 1) & 1) << bitIndex - b.blooms[base][byteIndex] |= (bloomByte & 1) << bitIndex - } - b.nextSec++ - return nil -} - -// Bitset returns the bit vector belonging to the given bit index after all -// blooms have been added. -func (b *Generator) Bitset(idx uint) ([]byte, error) { - if b.nextSec != b.sections { - return nil, errors.New("bloom not fully generated yet") - } - if idx >= types.BloomBitLength { - return nil, errBloomBitOutOfBounds - } - return b.blooms[idx], nil -} diff --git a/core/bloombits/generator_test.go b/core/bloombits/generator_test.go deleted file mode 100644 index f4032b4baba..00000000000 --- a/core/bloombits/generator_test.go +++ /dev/null @@ -1,99 +0,0 @@ -// Copyright 2017 The go-ethereum Authors -// This file is part of the go-ethereum library. -// -// The go-ethereum library is free software: you can redistribute it and/or modify -// it under the terms of the GNU Lesser General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// The go-ethereum library is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Lesser General Public License for more details. -// -// You should have received a copy of the GNU Lesser General Public License -// along with the go-ethereum library. If not, see . - -package bloombits - -import ( - "bytes" - "math/rand" - "testing" - - "github.com/ledgerwatch/erigon/core/types" -) - -// Tests that batched bloom bits are correctly rotated from the input bloom -// filters. -func TestGenerator(t *testing.T) { - // Generate the input and the rotated output - var input, output [types.BloomBitLength][types.BloomByteLength]byte - - for i := 0; i < types.BloomBitLength; i++ { - for j := 0; j < types.BloomBitLength; j++ { - bit := byte(rand.Int() % 2) - - input[i][j/8] |= bit << byte(7-j%8) - output[types.BloomBitLength-1-j][i/8] |= bit << byte(7-i%8) - } - } - // Crunch the input through the generator and verify the result - gen, err := NewGenerator(types.BloomBitLength) - if err != nil { - t.Fatalf("failed to create bloombit generator: %v", err) - } - for i, bloom := range input { - if err := gen.AddBloom(uint(i), bloom); err != nil { - t.Fatalf("bloom %d: failed to add: %v", i, err) - } - } - for i, want := range output { - have, err := gen.Bitset(uint(i)) - if err != nil { - t.Fatalf("output %d: failed to retrieve bits: %v", i, err) - } - if !bytes.Equal(have, want[:]) { - t.Errorf("output %d: bit vector mismatch have %x, want %x", i, have, want) - } - } -} - -func BenchmarkGenerator(b *testing.B) { - var input [types.BloomBitLength][types.BloomByteLength]byte - b.Run("empty", func(b *testing.B) { - b.ReportAllocs() - b.ResetTimer() - for i := 0; i < b.N; i++ { - // Crunch the input through the generator and verify the result - gen, err := NewGenerator(types.BloomBitLength) - if err != nil { - b.Fatalf("failed to create bloombit generator: %v", err) - } - for j, bloom := range input { - if err := gen.AddBloom(uint(j), bloom); err != nil { - b.Fatalf("bloom %d: failed to add: %v", i, err) - } - } - } - }) - for i := 0; i < types.BloomBitLength; i++ { - rand.Read(input[i][:]) - } - b.Run("random", func(b *testing.B) { - b.ReportAllocs() - b.ResetTimer() - for i := 0; i < b.N; i++ { - // Crunch the input through the generator and verify the result - gen, err := NewGenerator(types.BloomBitLength) - if err != nil { - b.Fatalf("failed to create bloombit generator: %v", err) - } - for j, bloom := range input { - if err := gen.AddBloom(uint(j), bloom); err != nil { - b.Fatalf("bloom %d: failed to add: %v", i, err) - } - } - } - }) -} diff --git a/core/bloombits/matcher.go b/core/bloombits/matcher.go deleted file mode 100644 index f04d5583052..00000000000 --- a/core/bloombits/matcher.go +++ /dev/null @@ -1,644 +0,0 @@ -// Copyright 2017 The go-ethereum Authors -// This file is part of the go-ethereum library. -// -// The go-ethereum library is free software: you can redistribute it and/or modify -// it under the terms of the GNU Lesser General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// The go-ethereum library is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Lesser General Public License for more details. -// -// You should have received a copy of the GNU Lesser General Public License -// along with the go-ethereum library. If not, see . - -package bloombits - -import ( - "bytes" - "context" - "errors" - "math" - "sort" - "sync" - "sync/atomic" - "time" - - "github.com/ledgerwatch/erigon/common/bitutil" - "github.com/ledgerwatch/erigon/common/debug" - "github.com/ledgerwatch/erigon/crypto" -) - -// bloomIndexes represents the bit indexes inside the bloom filter that belong -// to some key. -type bloomIndexes [3]uint - -// calcBloomIndexes returns the bloom filter bit indexes belonging to the given key. -func calcBloomIndexes(b []byte) bloomIndexes { - b = crypto.Keccak256(b) - - var idxs bloomIndexes - for i := 0; i < len(idxs); i++ { - idxs[i] = (uint(b[2*i])<<8)&2047 + uint(b[2*i+1]) - } - return idxs -} - -// partialMatches with a non-nil vector represents a section in which some sub- -// matchers have already found potential matches. Subsequent sub-matchers will -// binary AND their matches with this vector. If vector is nil, it represents a -// section to be processed by the first sub-matcher. -type partialMatches struct { - section uint64 - bitset []byte -} - -// Retrieval represents a request for retrieval task assignments for a given -// bit with the given number of fetch elements, or a response for such a request. -// It can also have the actual results set to be used as a delivery data struct. -// -// The contest and error fields are used by the light client to terminate matching -// early if an error is encountered on some path of the pipeline. -type Retrieval struct { - Bit uint - Sections []uint64 - Bitsets [][]byte - - Context context.Context - Error error -} - -// Matcher is a pipelined system of schedulers and logic matchers which perform -// binary AND/OR operations on the bit-streams, creating a stream of potential -// blocks to inspect for data content. -type Matcher struct { - sectionSize uint64 // Size of the data batches to filter on - - filters [][]bloomIndexes // Filter the system is matching for - schedulers map[uint]*scheduler // Retrieval schedulers for loading bloom bits - - retrievers chan chan uint // Retriever processes waiting for bit allocations - counters chan chan uint // Retriever processes waiting for task count reports - retrievals chan chan *Retrieval // Retriever processes waiting for task allocations - deliveries chan *Retrieval // Retriever processes waiting for task response deliveries - - running uint32 // Atomic flag whether a session is live or not -} - -// NewMatcher creates a new pipeline for retrieving bloom bit streams and doing -// address and topic filtering on them. Setting a filter component to `nil` is -// allowed and will result in that filter rule being skipped (OR 0x11...1). -func NewMatcher(sectionSize uint64, filters [][][]byte) *Matcher { - // Create the matcher instance - m := &Matcher{ - sectionSize: sectionSize, - schedulers: make(map[uint]*scheduler), - retrievers: make(chan chan uint), - counters: make(chan chan uint), - retrievals: make(chan chan *Retrieval), - deliveries: make(chan *Retrieval), - } - // Calculate the bloom bit indexes for the groups we're interested in - m.filters = nil - - for _, filter := range filters { - // Gather the bit indexes of the filter rule, special casing the nil filter - if len(filter) == 0 { - continue - } - bloomBits := make([]bloomIndexes, len(filter)) - for i, clause := range filter { - if clause == nil { - bloomBits = nil - break - } - bloomBits[i] = calcBloomIndexes(clause) - } - // Accumulate the filter rules if no nil rule was within - if bloomBits != nil { - m.filters = append(m.filters, bloomBits) - } - } - // For every bit, create a scheduler to load/download the bit vectors - for _, bloomIndexLists := range m.filters { - for _, bloomIndexList := range bloomIndexLists { - for _, bloomIndex := range bloomIndexList { - m.addScheduler(bloomIndex) - } - } - } - return m -} - -// addScheduler adds a bit stream retrieval scheduler for the given bit index if -// it has not existed before. If the bit is already selected for filtering, the -// existing scheduler can be used. -func (m *Matcher) addScheduler(idx uint) { - if _, ok := m.schedulers[idx]; ok { - return - } - m.schedulers[idx] = newScheduler(idx) -} - -// Start starts the matching process and returns a stream of bloom matches in -// a given range of blocks. If there are no more matches in the range, the result -// channel is closed. -func (m *Matcher) Start(ctx context.Context, begin, end uint64, results chan uint64) (*MatcherSession, error) { - // Make sure we're not creating concurrent sessions - if atomic.SwapUint32(&m.running, 1) == 1 { - return nil, errors.New("matcher already running") - } - defer atomic.StoreUint32(&m.running, 0) - - // Initiate a new matching round - session := &MatcherSession{ - matcher: m, - quit: make(chan struct{}), - ctx: ctx, - } - for _, scheduler := range m.schedulers { - scheduler.reset() - } - sink := m.run(begin, end, cap(results), session) - - // Read the output from the result sink and deliver to the user - session.pend.Add(1) - go func() { - defer debug.LogPanic() - defer session.pend.Done() - defer close(results) - - for { - select { - case <-session.quit: - return - - case res, ok := <-sink: - // New match result found - if !ok { - return - } - // Calculate the first and last blocks of the section - sectionStart := res.section * m.sectionSize - - first := sectionStart - if begin > first { - first = begin - } - last := sectionStart + m.sectionSize - 1 - if end < last { - last = end - } - // Iterate over all the blocks in the section and return the matching ones - for i := first; i <= last; i++ { - // Skip the entire byte if no matches are found inside (and we're processing an entire byte!) - next := res.bitset[(i-sectionStart)/8] - if next == 0 { - if i%8 == 0 { - i += 7 - } - continue - } - // Some bit it set, do the actual submatching - if bit := 7 - i%8; next&(1<= req.section }) - requests[req.bit] = append(queue[:index], append([]uint64{req.section}, queue[index:]...)...) - - // If it's a new bit and we have waiting fetchers, allocate to them - if len(queue) == 0 { - assign(req.bit) - } - - case fetcher := <-retrievers: - // New retriever arrived, find the lowest section-ed bit to assign - bit, best := uint(0), uint64(math.MaxUint64) - for idx := range unallocs { - if requests[idx][0] < best { - bit, best = idx, requests[idx][0] - } - } - // Stop tracking this bit (and alloc notifications if no more work is available) - delete(unallocs, bit) - if len(unallocs) == 0 { - retrievers = nil - } - allocs++ - fetcher <- bit - - case fetcher := <-m.counters: - // New task count request arrives, return number of items - fetcher <- uint(len(requests[<-fetcher])) - - case fetcher := <-m.retrievals: - // New fetcher waiting for tasks to retrieve, assign - task := <-fetcher - if want := len(task.Sections); want >= len(requests[task.Bit]) { - task.Sections = requests[task.Bit] - delete(requests, task.Bit) - } else { - task.Sections = append(task.Sections[:0], requests[task.Bit][:want]...) - requests[task.Bit] = append(requests[task.Bit][:0], requests[task.Bit][want:]...) - } - fetcher <- task - - // If anything was left unallocated, try to assign to someone else - if len(requests[task.Bit]) > 0 { - assign(task.Bit) - } - - case result := <-m.deliveries: - // New retrieval task response from fetcher, split out missing sections and - // deliver complete ones - var ( - sections = make([]uint64, 0, len(result.Sections)) - bitsets = make([][]byte, 0, len(result.Bitsets)) - missing = make([]uint64, 0, len(result.Sections)) - ) - for i, bitset := range result.Bitsets { - if len(bitset) == 0 { - missing = append(missing, result.Sections[i]) - continue - } - sections = append(sections, result.Sections[i]) - bitsets = append(bitsets, bitset) - } - m.schedulers[result.Bit].deliver(sections, bitsets) - allocs-- - - // Reschedule missing sections and allocate bit if newly available - if len(missing) > 0 { - queue := requests[result.Bit] - for _, section := range missing { - index := sort.Search(len(queue), func(i int) bool { return queue[i] >= section }) - queue = append(queue[:index], append([]uint64{section}, queue[index:]...)...) - } - requests[result.Bit] = queue - - if len(queue) == len(missing) { - assign(result.Bit) - } - } - - // End the session when all pending deliveries have arrived. - if shutdown == nil && allocs == 0 { - return - } - } - } -} - -// MatcherSession is returned by a started matcher to be used as a terminator -// for the actively running matching operation. -type MatcherSession struct { - matcher *Matcher - - closer sync.Once // Sync object to ensure we only ever close once - quit chan struct{} // Quit channel to request pipeline termination - - ctx context.Context // Context used by the light client to abort filtering - err atomic.Value // Global error to track retrieval failures deep in the chain - - pend sync.WaitGroup -} - -// Close stops the matching process and waits for all subprocesses to terminate -// before returning. The timeout may be used for graceful shutdown, allowing the -// currently running retrievals to complete before this time. -func (s *MatcherSession) Close() { - s.closer.Do(func() { - // Signal termination and wait for all goroutines to tear down - close(s.quit) - s.pend.Wait() - }) -} - -// Error returns any failure encountered during the matching session. -func (s *MatcherSession) Error() error { - if err := s.err.Load(); err != nil { - return err.(error) - } - return nil -} - -// allocateRetrieval assigns a bloom bit index to a client process that can either -// immediately request and fetch the section contents assigned to this bit or wait -// a little while for more sections to be requested. -func (s *MatcherSession) allocateRetrieval() (uint, bool) { - fetcher := make(chan uint) - - select { - case <-s.quit: - return 0, false - case s.matcher.retrievers <- fetcher: - bit, ok := <-fetcher - return bit, ok - } -} - -// pendingSections returns the number of pending section retrievals belonging to -// the given bloom bit index. -func (s *MatcherSession) pendingSections(bit uint) int { - fetcher := make(chan uint) - - select { - case <-s.quit: - return 0 - case s.matcher.counters <- fetcher: - fetcher <- bit - return int(<-fetcher) - } -} - -// allocateSections assigns all or part of an already allocated bit-task queue -// to the requesting process. -func (s *MatcherSession) allocateSections(bit uint, count int) []uint64 { - fetcher := make(chan *Retrieval) - - select { - case <-s.quit: - return nil - case s.matcher.retrievals <- fetcher: - task := &Retrieval{ - Bit: bit, - Sections: make([]uint64, count), - } - fetcher <- task - return (<-fetcher).Sections - } -} - -// deliverSections delivers a batch of section bit-vectors for a specific bloom -// bit index to be injected into the processing pipeline. -func (s *MatcherSession) deliverSections(bit uint, sections []uint64, bitsets [][]byte) { - s.matcher.deliveries <- &Retrieval{Bit: bit, Sections: sections, Bitsets: bitsets} -} - -// Multiplex polls the matcher session for retrieval tasks and multiplexes it into -// the requested retrieval queue to be serviced together with other sessions. -// -// This method will block for the lifetime of the session. Even after termination -// of the session, any request in-flight need to be responded to! Empty responses -// are fine though in that case. -func (s *MatcherSession) Multiplex(batch int, wait time.Duration, mux chan chan *Retrieval) { - for { - // Allocate a new bloom bit index to retrieve data for, stopping when done - bit, ok := s.allocateRetrieval() - if !ok { - return - } - // Bit allocated, throttle a bit if we're below our batch limit - if s.pendingSections(bit) < batch { - select { - case <-s.quit: - // Session terminating, we can't meaningfully service, abort - s.allocateSections(bit, 0) - s.deliverSections(bit, []uint64{}, [][]byte{}) - return - - case <-time.After(wait): - // Throttling up, fetch whatever's available - } - } - // Allocate as much as we can handle and request servicing - sections := s.allocateSections(bit, batch) - request := make(chan *Retrieval) - - select { - case <-s.quit: - // Session terminating, we can't meaningfully service, abort - s.deliverSections(bit, sections, make([][]byte, len(sections))) - return - - case mux <- request: - // Retrieval accepted, something must arrive before we're aborting - request <- &Retrieval{Bit: bit, Sections: sections, Context: s.ctx} - - result := <-request - if result.Error != nil { - s.err.Store(result.Error) - s.Close() - } - s.deliverSections(result.Bit, result.Sections, result.Bitsets) - } - } -} diff --git a/core/bloombits/matcher_test.go b/core/bloombits/matcher_test.go deleted file mode 100644 index ece7aa5ca53..00000000000 --- a/core/bloombits/matcher_test.go +++ /dev/null @@ -1,292 +0,0 @@ -// Copyright 2017 The go-ethereum Authors -// This file is part of the go-ethereum library. -// -// The go-ethereum library is free software: you can redistribute it and/or modify -// it under the terms of the GNU Lesser General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// The go-ethereum library is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Lesser General Public License for more details. -// -// You should have received a copy of the GNU Lesser General Public License -// along with the go-ethereum library. If not, see . - -package bloombits - -import ( - "context" - "math/rand" - "sync/atomic" - "testing" - "time" - - "github.com/ledgerwatch/erigon/common" -) - -const testSectionSize = 4096 - -// Tests that wildcard filter rules (nil) can be specified and are handled well. -func TestMatcherWildcards(t *testing.T) { - t.Parallel() - matcher := NewMatcher(testSectionSize, [][][]byte{ - {common.Address{}.Bytes(), common.Address{0x01}.Bytes()}, // Default address is not a wildcard - {common.Hash{}.Bytes(), common.Hash{0x01}.Bytes()}, // Default hash is not a wildcard - {common.Hash{0x01}.Bytes()}, // Plain rule, sanity check - {common.Hash{0x01}.Bytes(), nil}, // Wildcard suffix, drop rule - {nil, common.Hash{0x01}.Bytes()}, // Wildcard prefix, drop rule - {nil, nil}, // Wildcard combo, drop rule - {}, // Inited wildcard rule, drop rule - nil, // Proper wildcard rule, drop rule - }) - if len(matcher.filters) != 3 { - t.Fatalf("filter system size mismatch: have %d, want %d", len(matcher.filters), 3) - } - if len(matcher.filters[0]) != 2 { - t.Fatalf("address clause size mismatch: have %d, want %d", len(matcher.filters[0]), 2) - } - if len(matcher.filters[1]) != 2 { - t.Fatalf("combo topic clause size mismatch: have %d, want %d", len(matcher.filters[1]), 2) - } - if len(matcher.filters[2]) != 1 { - t.Fatalf("singletone topic clause size mismatch: have %d, want %d", len(matcher.filters[2]), 1) - } -} - -// Tests the matcher pipeline on a single continuous workflow without interrupts. -func TestMatcherContinuous(t *testing.T) { - t.Parallel() - testMatcherDiffBatches(t, [][]bloomIndexes{{{10, 20, 30}}}, 0, 100000, false, 75) - testMatcherDiffBatches(t, [][]bloomIndexes{{{32, 3125, 100}}, {{40, 50, 10}}}, 0, 100000, false, 81) - testMatcherDiffBatches(t, [][]bloomIndexes{{{4, 8, 11}, {7, 8, 17}}, {{9, 9, 12}, {15, 20, 13}}, {{18, 15, 15}, {12, 10, 4}}}, 0, 10000, false, 36) -} - -// Tests the matcher pipeline on a constantly interrupted and resumed work pattern -// with the aim of ensuring data items are requested only once. -func TestMatcherIntermittent(t *testing.T) { - t.Parallel() - testMatcherDiffBatches(t, [][]bloomIndexes{{{10, 20, 30}}}, 0, 100000, true, 75) - testMatcherDiffBatches(t, [][]bloomIndexes{{{32, 3125, 100}}, {{40, 50, 10}}}, 0, 100000, true, 81) - testMatcherDiffBatches(t, [][]bloomIndexes{{{4, 8, 11}, {7, 8, 17}}, {{9, 9, 12}, {15, 20, 13}}, {{18, 15, 15}, {12, 10, 4}}}, 0, 10000, true, 36) -} - -// Tests the matcher pipeline on random input to hopefully catch anomalies. -func TestMatcherRandom(t *testing.T) { - t.Parallel() - for i := 0; i < 10; i++ { - testMatcherBothModes(t, makeRandomIndexes([]int{1}, 50), 0, 10000, 0) - testMatcherBothModes(t, makeRandomIndexes([]int{3}, 50), 0, 10000, 0) - testMatcherBothModes(t, makeRandomIndexes([]int{2, 2, 2}, 20), 0, 10000, 0) - testMatcherBothModes(t, makeRandomIndexes([]int{5, 5, 5}, 50), 0, 10000, 0) - testMatcherBothModes(t, makeRandomIndexes([]int{4, 4, 4}, 20), 0, 10000, 0) - } -} - -// Tests that the matcher can properly find matches if the starting block is -// shifter from a multiple of 8. This is needed to cover an optimisation with -// bitset matching https://github.com/ledgerwatch/erigon/issues/15309. -func TestMatcherShifted(t *testing.T) { - t.Parallel() - // Block 0 always matches in the tests, skip ahead of first 8 blocks with the - // start to get a potential zero byte in the matcher bitset. - - // To keep the second bitset byte zero, the filter must only match for the first - // time in block 16, so doing an all-16 bit filter should suffice. - - // To keep the starting block non divisible by 8, block number 9 is the first - // that would introduce a shift and not match block 0. - testMatcherBothModes(t, [][]bloomIndexes{{{16, 16, 16}}}, 9, 64, 0) -} - -// Tests that matching on everything doesn't crash (special case internally). -func TestWildcardMatcher(t *testing.T) { - t.Parallel() - testMatcherBothModes(t, nil, 0, 10000, 0) -} - -// makeRandomIndexes generates a random filter system, composed on multiple filter -// criteria, each having one bloom list component for the address and arbitrarily -// many topic bloom list components. -func makeRandomIndexes(lengths []int, max int) [][]bloomIndexes { - res := make([][]bloomIndexes, len(lengths)) - for i, topics := range lengths { - res[i] = make([]bloomIndexes, topics) - for j := 0; j < topics; j++ { - for k := 0; k < len(res[i][j]); k++ { - res[i][j][k] = uint(rand.Intn(max-1) + 2) - } - } - } - return res -} - -// testMatcherDiffBatches runs the given matches test in single-delivery and also -// in batches delivery mode, verifying that all kinds of deliveries are handled -// correctly withn. -func testMatcherDiffBatches(t *testing.T, filter [][]bloomIndexes, start, blocks uint64, intermittent bool, retrievals uint32) { - singleton := testMatcher(t, filter, start, blocks, intermittent, retrievals, 1) - batched := testMatcher(t, filter, start, blocks, intermittent, retrievals, 16) - - if singleton != batched { - t.Errorf("filter = %v blocks = %v intermittent = %v: request count mismatch, %v in signleton vs. %v in batched mode", filter, blocks, intermittent, singleton, batched) - } -} - -// testMatcherBothModes runs the given matcher test in both continuous as well as -// in intermittent mode, verifying that the request counts match each other. -func testMatcherBothModes(t *testing.T, filter [][]bloomIndexes, start, blocks uint64, retrievals uint32) { - continuous := testMatcher(t, filter, start, blocks, false, retrievals, 16) - intermittent := testMatcher(t, filter, start, blocks, true, retrievals, 16) - - if continuous != intermittent { - t.Errorf("filter = %v blocks = %v: request count mismatch, %v in continuous vs. %v in intermittent mode", filter, blocks, continuous, intermittent) - } -} - -// testMatcher is a generic tester to run the given matcher test and return the -// number of requests made for cross validation between different modes. -func testMatcher(t *testing.T, filter [][]bloomIndexes, start, blocks uint64, intermittent bool, retrievals uint32, maxReqCount int) uint32 { - // Create a new matcher an simulate our explicit random bitsets - matcher := NewMatcher(testSectionSize, nil) - matcher.filters = filter - - for _, rule := range filter { - for _, topic := range rule { - for _, bit := range topic { - matcher.addScheduler(bit) - } - } - } - // Track the number of retrieval requests made - var requested uint32 - - // Start the matching session for the filter and the retriever goroutines - quit := make(chan struct{}) - matches := make(chan uint64, 16) - - session, err := matcher.Start(context.Background(), start, blocks-1, matches) - if err != nil { - t.Fatalf("failed to stat matcher session: %v", err) - } - startRetrievers(session, quit, &requested, maxReqCount) - - // Iterate over all the blocks and verify that the pipeline produces the correct matches - for i := start; i < blocks; i++ { - if expMatch3(filter, i) { - match, ok := <-matches - if !ok { - t.Errorf("filter = %v blocks = %v intermittent = %v: expected #%v, results channel closed", filter, blocks, intermittent, i) - return 0 - } - if match != i { - t.Errorf("filter = %v blocks = %v intermittent = %v: expected #%v, got #%v", filter, blocks, intermittent, i, match) - } - // If we're testing intermittent mode, abort and restart the pipeline - if intermittent { - session.Close() - close(quit) - - quit = make(chan struct{}) - matches = make(chan uint64, 16) - - session, err = matcher.Start(context.Background(), i+1, blocks-1, matches) - if err != nil { - t.Fatalf("failed to stat matcher session: %v", err) - } - startRetrievers(session, quit, &requested, maxReqCount) - } - } - } - // Ensure the result channel is torn down after the last block - match, ok := <-matches - if ok { - t.Errorf("filter = %v blocks = %v intermittent = %v: expected closed channel, got #%v", filter, blocks, intermittent, match) - } - // Clean up the session and ensure we match the expected retrieval count - session.Close() - close(quit) - - if retrievals != 0 && requested != retrievals { - t.Errorf("filter = %v blocks = %v intermittent = %v: request count mismatch, have #%v, want #%v", filter, blocks, intermittent, requested, retrievals) - } - return requested -} - -// startRetrievers starts a batch of goroutines listening for section requests -// and serving them. -func startRetrievers(session *MatcherSession, quit chan struct{}, retrievals *uint32, batch int) { - requests := make(chan chan *Retrieval) - - for i := 0; i < 10; i++ { - // Start a multiplexer to test multiple threaded execution - go session.Multiplex(batch, 100*time.Microsecond, requests) - - // Start a services to match the above multiplexer - go func() { - for { - // Wait for a service request or a shutdown - select { - case <-quit: - return - - case request := <-requests: - task := <-request - - task.Bitsets = make([][]byte, len(task.Sections)) - for i, section := range task.Sections { - if rand.Int()%4 != 0 { // Handle occasional missing deliveries - task.Bitsets[i] = generateBitset(task.Bit, section) - atomic.AddUint32(retrievals, 1) - } - } - request <- task - } - } - }() - } -} - -// generateBitset generates the rotated bitset for the given bloom bit and section -// numbers. -func generateBitset(bit uint, section uint64) []byte { - bitset := make([]byte, testSectionSize/8) - for i := 0; i < len(bitset); i++ { - for b := 0; b < 8; b++ { - blockIdx := section*testSectionSize + uint64(i*8+b) - bitset[i] += bitset[i] - if (blockIdx % uint64(bit)) == 0 { - bitset[i]++ - } - } - } - return bitset -} - -func expMatch1(filter bloomIndexes, i uint64) bool { - for _, ii := range filter { - if (i % uint64(ii)) != 0 { - return false - } - } - return true -} - -func expMatch2(filter []bloomIndexes, i uint64) bool { - for _, ii := range filter { - if expMatch1(ii, i) { - return true - } - } - return false -} - -func expMatch3(filter [][]bloomIndexes, i uint64) bool { - for _, ii := range filter { - if !expMatch2(ii, i) { - return false - } - } - return true -} diff --git a/core/bloombits/scheduler.go b/core/bloombits/scheduler.go deleted file mode 100644 index 6449c7465a1..00000000000 --- a/core/bloombits/scheduler.go +++ /dev/null @@ -1,181 +0,0 @@ -// Copyright 2017 The go-ethereum Authors -// This file is part of the go-ethereum library. -// -// The go-ethereum library is free software: you can redistribute it and/or modify -// it under the terms of the GNU Lesser General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// The go-ethereum library is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Lesser General Public License for more details. -// -// You should have received a copy of the GNU Lesser General Public License -// along with the go-ethereum library. If not, see . - -package bloombits - -import ( - "sync" -) - -// request represents a bloom retrieval task to prioritize and pull from the local -// database or remotely from the network. -type request struct { - section uint64 // Section index to retrieve the a bit-vector from - bit uint // Bit index within the section to retrieve the vector of -} - -// response represents the state of a requested bit-vector through a scheduler. -type response struct { - cached []byte // Cached bits to dedup multiple requests - done chan struct{} // Channel to allow waiting for completion -} - -// scheduler handles the scheduling of bloom-filter retrieval operations for -// entire section-batches belonging to a single bloom bit. Beside scheduling the -// retrieval operations, this struct also deduplicates the requests and caches -// the results to minimize network/database overhead even in complex filtering -// scenarios. -type scheduler struct { - bit uint // Index of the bit in the bloom filter this scheduler is responsible for - responses map[uint64]*response // Currently pending retrieval requests or already cached responses - lock sync.Mutex // Lock protecting the responses from concurrent access -} - -// newScheduler creates a new bloom-filter retrieval scheduler for a specific -// bit index. -func newScheduler(idx uint) *scheduler { - return &scheduler{ - bit: idx, - responses: make(map[uint64]*response), - } -} - -// run creates a retrieval pipeline, receiving section indexes from sections and -// returning the results in the same order through the done channel. Concurrent -// runs of the same scheduler are allowed, leading to retrieval task deduplication. -func (s *scheduler) run(sections chan uint64, dist chan *request, done chan []byte, quit chan struct{}, wg *sync.WaitGroup) { - // Create a forwarder channel between requests and responses of the same size as - // the distribution channel (since that will block the pipeline anyway). - pend := make(chan uint64, cap(dist)) - - // Start the pipeline schedulers to forward between user -> distributor -> user - wg.Add(2) - go s.scheduleRequests(sections, dist, pend, quit, wg) - go s.scheduleDeliveries(pend, done, quit, wg) -} - -// reset cleans up any leftovers from previous runs. This is required before a -// restart to ensure the no previously requested but never delivered state will -// cause a lockup. -func (s *scheduler) reset() { - s.lock.Lock() - defer s.lock.Unlock() - - for section, res := range s.responses { - if res.cached == nil { - delete(s.responses, section) - } - } -} - -// scheduleRequests reads section retrieval requests from the input channel, -// deduplicates the stream and pushes unique retrieval tasks into the distribution -// channel for a database or network layer to honour. -func (s *scheduler) scheduleRequests(reqs chan uint64, dist chan *request, pend chan uint64, quit chan struct{}, wg *sync.WaitGroup) { - // Clean up the goroutine and pipeline when done - defer wg.Done() - defer close(pend) - - // Keep reading and scheduling section requests - for { - select { - case <-quit: - return - - case section, ok := <-reqs: - // New section retrieval requested - if !ok { - return - } - // Deduplicate retrieval requests - unique := false - - s.lock.Lock() - if s.responses[section] == nil { - s.responses[section] = &response{ - done: make(chan struct{}), - } - unique = true - } - s.lock.Unlock() - - // Schedule the section for retrieval and notify the deliverer to expect this section - if unique { - select { - case <-quit: - return - case dist <- &request{bit: s.bit, section: section}: - } - } - select { - case <-quit: - return - case pend <- section: - } - } - } -} - -// scheduleDeliveries reads section acceptance notifications and waits for them -// to be delivered, pushing them into the output data buffer. -func (s *scheduler) scheduleDeliveries(pend chan uint64, done chan []byte, quit chan struct{}, wg *sync.WaitGroup) { - // Clean up the goroutine and pipeline when done - defer wg.Done() - defer close(done) - - // Keep reading notifications and scheduling deliveries - for { - select { - case <-quit: - return - - case idx, ok := <-pend: - // New section retrieval pending - if !ok { - return - } - // Wait until the request is honoured - s.lock.Lock() - res := s.responses[idx] - s.lock.Unlock() - - select { - case <-quit: - return - case <-res.done: - } - // Deliver the result - select { - case <-quit: - return - case done <- res.cached: - } - } - } -} - -// deliver is called by the request distributor when a reply to a request arrives. -func (s *scheduler) deliver(sections []uint64, data [][]byte) { - s.lock.Lock() - defer s.lock.Unlock() - - for i, section := range sections { - if res := s.responses[section]; res != nil && res.cached == nil { // Avoid non-requests and double deliveries - res.cached = data[i] - close(res.done) - } - } -} diff --git a/core/bloombits/scheduler_test.go b/core/bloombits/scheduler_test.go deleted file mode 100644 index 707e8ea11d0..00000000000 --- a/core/bloombits/scheduler_test.go +++ /dev/null @@ -1,106 +0,0 @@ -// Copyright 2017 The go-ethereum Authors -// This file is part of the go-ethereum library. -// -// The go-ethereum library is free software: you can redistribute it and/or modify -// it under the terms of the GNU Lesser General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// The go-ethereum library is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Lesser General Public License for more details. -// -// You should have received a copy of the GNU Lesser General Public License -// along with the go-ethereum library. If not, see . - -package bloombits - -import ( - "bytes" - "math/big" - "math/rand" - "sync" - "sync/atomic" - "testing" - "time" -) - -// Tests that the scheduler can deduplicate and forward retrieval requests to -// underlying fetchers and serve responses back, irrelevant of the concurrency -// of the requesting clients or serving data fetchers. -func TestSchedulerSingleClientSingleFetcher(t *testing.T) { testScheduler(t, 1, 1, 5000) } -func TestSchedulerSingleClientMultiFetcher(t *testing.T) { testScheduler(t, 1, 10, 5000) } -func TestSchedulerMultiClientSingleFetcher(t *testing.T) { testScheduler(t, 10, 1, 5000) } -func TestSchedulerMultiClientMultiFetcher(t *testing.T) { testScheduler(t, 10, 10, 5000) } - -func testScheduler(t *testing.T, clients int, fetchers int, requests int) { - t.Parallel() - f := newScheduler(0) - - // Create a batch of handler goroutines that respond to bloom bit requests and - // deliver them to the scheduler. - var fetchPend sync.WaitGroup - fetchPend.Add(fetchers) - defer fetchPend.Wait() - - fetch := make(chan *request, 16) - defer close(fetch) - - var delivered uint32 - for i := 0; i < fetchers; i++ { - go func() { - defer fetchPend.Done() - - for req := range fetch { - time.Sleep(time.Duration(rand.Intn(int(100 * time.Microsecond)))) - atomic.AddUint32(&delivered, 1) - - f.deliver([]uint64{ - req.section + uint64(requests), // Non-requested data (ensure it doesn't go out of bounds) - req.section, // Requested data - req.section, // Duplicated data (ensure it doesn't double close anything) - }, [][]byte{ - {}, - new(big.Int).SetUint64(req.section).Bytes(), - new(big.Int).SetUint64(req.section).Bytes(), - }) - } - }() - } - // Start a batch of goroutines to concurrently run scheduling tasks - quit := make(chan struct{}) - - var pend sync.WaitGroup - pend.Add(clients) - - for i := 0; i < clients; i++ { - go func() { - defer pend.Done() - - in := make(chan uint64, 16) - out := make(chan []byte, 16) - - f.run(in, fetch, out, quit, &pend) - - go func() { - for j := 0; j < requests; j++ { - in <- uint64(j) - } - close(in) - }() - b := new(big.Int) - for j := 0; j < requests; j++ { - bits := <-out - if want := b.SetUint64(uint64(j)).Bytes(); !bytes.Equal(bits, want) { - t.Errorf("vector %d: delivered content mismatch: have %x, want %x", j, bits, want) - } - } - }() - } - pend.Wait() - - if have := atomic.LoadUint32(&delivered); int(have) != requests { - t.Errorf("request count mismatch: have %v, want %v", have, requests) - } -} diff --git a/core/chain_makers.go b/core/chain_makers.go index 21ad0882e45..bb535da4b7a 100644 --- a/core/chain_makers.go +++ b/core/chain_makers.go @@ -112,8 +112,7 @@ func (b *BlockGen) AddTxWithChain(getHeader func(hash common.Hash, number uint64 b.SetCoinbase(common.Address{}) } b.ibs.Prepare(tx.Hash(), common.Hash{}, len(b.txs)) - contractHasTEVM := func(_ common.Hash) (bool, error) { return false, nil } - receipt, _, err := ApplyTransaction(b.config, GetHashFn(b.header, getHeader), engine, &b.header.Coinbase, b.gasPool, b.ibs, state.NewNoopWriter(), b.header, tx, &b.header.GasUsed, vm.Config{}, contractHasTEVM) + receipt, _, err := ApplyTransaction(b.config, GetHashFn(b.header, getHeader), engine, &b.header.Coinbase, b.gasPool, b.ibs, state.NewNoopWriter(), b.header, tx, &b.header.GasUsed, vm.Config{}) if err != nil { panic(err) } @@ -126,8 +125,7 @@ func (b *BlockGen) AddFailedTxWithChain(getHeader func(hash common.Hash, number b.SetCoinbase(common.Address{}) } b.ibs.Prepare(tx.Hash(), common.Hash{}, len(b.txs)) - contractHasTEVM := func(common.Hash) (bool, error) { return false, nil } - receipt, _, err := ApplyTransaction(b.config, GetHashFn(b.header, getHeader), engine, &b.header.Coinbase, b.gasPool, b.ibs, state.NewNoopWriter(), b.header, tx, &b.header.GasUsed, vm.Config{}, contractHasTEVM) + receipt, _, err := ApplyTransaction(b.config, GetHashFn(b.header, getHeader), engine, &b.header.Coinbase, b.gasPool, b.ibs, state.NewNoopWriter(), b.header, tx, &b.header.GasUsed, vm.Config{}) _ = err // accept failed transactions b.txs = append(b.txs, tx) b.receipts = append(b.receipts, receipt) diff --git a/core/evm.go b/core/evm.go index 5c169621801..8504e95e7ae 100644 --- a/core/evm.go +++ b/core/evm.go @@ -30,7 +30,7 @@ import ( ) // NewEVMBlockContext creates a new context for use in the EVM. -func NewEVMBlockContext(header *types.Header, blockHashFunc func(n uint64) common.Hash, engine consensus.Engine, author *common.Address, contractHasTEVM func(contractHash common.Hash) (bool, error)) vm.BlockContext { +func NewEVMBlockContext(header *types.Header, blockHashFunc func(n uint64) common.Hash, engine consensus.Engine, author *common.Address) vm.BlockContext { // If we don't have an explicit author (i.e. not mining), extract from the header var beneficiary common.Address if author == nil { @@ -52,12 +52,6 @@ func NewEVMBlockContext(header *types.Header, blockHashFunc func(n uint64) commo prevRandDao = &header.MixDigest } - if contractHasTEVM == nil { - contractHasTEVM = func(_ common.Hash) (bool, error) { - return false, nil - } - } - var transferFunc vm.TransferFunc if engine != nil && engine.Type() == params.BorConsensus { transferFunc = BorTransfer @@ -66,17 +60,16 @@ func NewEVMBlockContext(header *types.Header, blockHashFunc func(n uint64) commo } return vm.BlockContext{ - CanTransfer: CanTransfer, - Transfer: transferFunc, - GetHash: blockHashFunc, - Coinbase: beneficiary, - BlockNumber: header.Number.Uint64(), - Time: header.Time, - Difficulty: new(big.Int).Set(header.Difficulty), - BaseFee: &baseFee, - GasLimit: header.GasLimit, - ContractHasTEVM: contractHasTEVM, - PrevRanDao: prevRandDao, + CanTransfer: CanTransfer, + Transfer: transferFunc, + GetHash: blockHashFunc, + Coinbase: beneficiary, + BlockNumber: header.Number.Uint64(), + Time: header.Time, + Difficulty: new(big.Int).Set(header.Difficulty), + BaseFee: &baseFee, + GasLimit: header.GasLimit, + PrevRanDao: prevRandDao, } } diff --git a/core/forkid/forkid_test.go b/core/forkid/forkid_test.go index c2c97de8239..cbf436dba97 100644 --- a/core/forkid/forkid_test.go +++ b/core/forkid/forkid_test.go @@ -145,6 +145,14 @@ func TestCreation(t *testing.T) { {1735371, ID{Hash: checksumToBytes(0xb96cbd13), Next: 0}}, // First MergeNetsplit block }, }, + // Gnosis test cases + { + params.GnosisChainConfig, + params.GnosisGenesisHash, + []testcase{ + {24000000, ID{Hash: checksumToBytes(0x018479D3), Next: 0}}, + }, + }, } for i, tt := range tests { for j, ttt := range tt.cases { diff --git a/core/genesis.go b/core/genesis.go index eb424e16e40..5dcc1e0f64d 100644 --- a/core/genesis.go +++ b/core/genesis.go @@ -28,6 +28,7 @@ import ( "math/big" "sync" + "github.com/c2h5oh/datasize" "github.com/holiman/uint256" "github.com/ledgerwatch/erigon-lib/kv" "github.com/ledgerwatch/erigon-lib/kv/mdbx" @@ -156,16 +157,20 @@ type GenesisMismatchError struct { } func (e *GenesisMismatchError) Error() string { - return fmt.Sprintf("database contains incompatible genesis (have %x, new %x)", e.Stored, e.New) + config := params.ChainConfigByGenesisHash(e.Stored) + if config == nil { + return fmt.Sprintf("database contains incompatible genesis (have %x, new %x)", e.Stored, e.New) + } + return fmt.Sprintf("database contains incompatible genesis (try with --chain=%s)", config.ChainName) } // CommitGenesisBlock writes or updates the genesis block in db. // The block that will be used is: // -// genesis == nil genesis != nil -// +------------------------------------------ -// db has no genesis | main-net default | genesis -// db has genesis | from DB | genesis (if compatible) +// genesis == nil genesis != nil +// +------------------------------------------ +// db has no genesis | main-net default | genesis +// db has genesis | from DB | genesis (if compatible) // // The stored chain configuration will be updated if it is compatible (i.e. does not // specify a fork block below the local head block). In case of a conflict, the @@ -309,13 +314,14 @@ func (g *Genesis) configOrDefault(genesisHash common.Hash) *params.ChainConfig { // ToBlock creates the genesis block and writes state of a genesis specification // to the given database (or discards it if nil). func (g *Genesis) ToBlock() (*types.Block, *state.IntraBlockState, error) { + _ = g.Alloc //nil-check var root common.Hash var statedb *state.IntraBlockState wg := sync.WaitGroup{} wg.Add(1) go func() { // we may run inside write tx, can't open 2nd write tx in same goroutine defer wg.Done() - tmpDB := mdbx.NewMDBX(log.New()).InMem().MustOpen() + tmpDB := mdbx.NewMDBX(log.New()).InMem().MapSize(2 * datasize.GB).MustOpen() defer tmpDB.Close() tx, err := tmpDB.BeginRw(context.Background()) if err != nil { @@ -458,6 +464,9 @@ func (g *Genesis) Write(tx kv.RwTx) (*types.Block, *state.IntraBlockState, error if err := rawdb.WriteBlock(tx, block); err != nil { return nil, nil, err } + if err := rawdb.TxNums.WriteForGenesis(tx, 1); err != nil { + return nil, nil, err + } if err := rawdb.WriteReceipts(tx, block.NumberU64(), nil); err != nil { return nil, nil, err } @@ -691,7 +700,7 @@ func DefaultMumbaiGenesisBlock() *Genesis { } } -//DefaultBorMainnet returns the Bor Mainnet network gensis block. +// DefaultBorMainnet returns the Bor Mainnet network gensis block. func DefaultBorMainnetGenesisBlock() *Genesis { return &Genesis{ Config: params.BorMainnetChainConfig, @@ -739,8 +748,9 @@ func DefaultGnosisGenesisBlock() *Genesis { } // Pre-calculated version of: -// DevnetSignPrivateKey = crypto.HexToECDSA(sha256.Sum256([]byte("erigon devnet key"))) -// DevnetEtherbase=crypto.PubkeyToAddress(DevnetSignPrivateKey.PublicKey) +// +// DevnetSignPrivateKey = crypto.HexToECDSA(sha256.Sum256([]byte("erigon devnet key"))) +// DevnetEtherbase=crypto.PubkeyToAddress(DevnetSignPrivateKey.PublicKey) var DevnetSignPrivateKey, _ = crypto.HexToECDSA("26e86e45f6fc45ec6e2ecd128cec80fa1d1505e5507dcd2ae58c3130a7a97b48") var DevnetEtherbase = common.HexToAddress("67b1d87101671b127f5f8714789c7192f7ad340e") @@ -760,19 +770,6 @@ func DeveloperGenesisBlock(period uint64, faucet common.Address) *Genesis { } } -func DefaultKilnDevnetGenesisBlock() *Genesis { - return &Genesis{ - Config: params.KilnDevnetChainConfig, - Nonce: 0x1234, - Timestamp: 0, - GasLimit: 0x400000, - Difficulty: big.NewInt(1), - Mixhash: common.HexToHash("0x0000000000000000000000000000000000000000000000000000000000000000"), - Coinbase: common.HexToAddress("0x0000000000000000000000000000000000000000"), - Alloc: readPrealloc("allocs/kiln-devnet.json"), - } -} - func readPrealloc(filename string) GenesisAlloc { f, err := allocs.Open(filename) if err != nil { @@ -816,8 +813,6 @@ func DefaultGenesisBlockByChainName(chain string) *Genesis { return DefaultBorMainnetGenesisBlock() case networkname.BorDevnetChainName: return DefaultBorDevnetGenesisBlock() - case networkname.KilnDevnetChainName: - return DefaultKilnDevnetGenesisBlock() case networkname.GnosisChainName: return DefaultGnosisGenesisBlock() default: diff --git a/core/mkalloc.go b/core/mkalloc.go index 3f72c78b16c..bb941e80558 100644 --- a/core/mkalloc.go +++ b/core/mkalloc.go @@ -17,12 +17,10 @@ //go:build none /* +The mkalloc tool creates the genesis allocation constants in genesis_alloc.go +It outputs a const declaration that contains an RLP-encoded list of (address, balance) tuples. - The mkalloc tool creates the genesis allocation constants in genesis_alloc.go - It outputs a const declaration that contains an RLP-encoded list of (address, balance) tuples. - - go run mkalloc.go genesis.json - + go run mkalloc.go genesis.json */ package main diff --git a/core/rawdb/accessors_chain.go b/core/rawdb/accessors_chain.go index c4fde5804e6..c02ea9ab9b1 100644 --- a/core/rawdb/accessors_chain.go +++ b/core/rawdb/accessors_chain.go @@ -20,12 +20,14 @@ import ( "bytes" "context" "encoding/binary" + "encoding/json" "fmt" "math" "math/big" - "strings" + "sort" "time" + "github.com/gballet/go-verkle" common2 "github.com/ledgerwatch/erigon-lib/common" libcommon "github.com/ledgerwatch/erigon-lib/common/cmp" "github.com/ledgerwatch/erigon-lib/common/dbg" @@ -38,7 +40,6 @@ import ( "github.com/ledgerwatch/erigon/rlp" "github.com/ledgerwatch/erigon/turbo/services" "github.com/ledgerwatch/log/v3" - "golang.org/x/exp/slices" ) // ReadCanonicalHash retrieves the hash assigned to a canonical block number. @@ -470,20 +471,14 @@ func WriteTransactions(db kv.RwTx, txs []types.Transaction, baseTxId uint64) err return nil } -func WriteRawTransactions(db kv.RwTx, txs [][]byte, baseTxId uint64) error { +func WriteRawTransactions(tx kv.RwTx, txs [][]byte, baseTxId uint64) error { txId := baseTxId - for _, tx := range txs { + for _, txn := range txs { txIdKey := make([]byte, 8) binary.BigEndian.PutUint64(txIdKey, txId) // If next Append returns KeyExists error - it means you need to open transaction in App code before calling this func. Batch is also fine. - if err := db.Append(kv.EthTx, txIdKey, tx); err != nil { - c, err := db.Cursor(kv.EthTx) - if err != nil { - kk, _, _ := c.Last() - c.Close() - return fmt.Errorf("txId=%d, baseTxId=%d, lastInDb=%d, %w", txId, baseTxId, binary.BigEndian.Uint64(kk), err) - } - return err + if err := tx.Append(kv.EthTx, txIdKey, txn); err != nil { + return fmt.Errorf("txId=%d, baseTxId=%d, %w", txId, baseTxId, err) } txId++ } @@ -647,21 +642,21 @@ func ReadSenders(db kv.Getter, hash common.Hash, number uint64) ([]common.Addres return senders, nil } -func WriteRawBodyIfNotExists(db kv.RwTx, hash common.Hash, number uint64, body *types.RawBody) error { +func WriteRawBodyIfNotExists(db kv.RwTx, hash common.Hash, number uint64, body *types.RawBody) (ok bool, lastTxnNum uint64, err error) { exists, err := db.Has(kv.BlockBody, dbutils.BlockBodyKey(number, hash)) if err != nil { - return err + return false, 0, err } if exists { - return nil + return false, 0, nil } return WriteRawBody(db, hash, number, body) } -func WriteRawBody(db kv.RwTx, hash common.Hash, number uint64, body *types.RawBody) error { +func WriteRawBody(db kv.RwTx, hash common.Hash, number uint64, body *types.RawBody) (ok bool, lastTxnNum uint64, err error) { baseTxId, err := db.IncrementSequence(kv.EthTx, uint64(len(body.Transactions))+2) if err != nil { - return err + return false, 0, err } data := types.BodyForStorage{ BaseTxId: baseTxId, @@ -669,12 +664,13 @@ func WriteRawBody(db kv.RwTx, hash common.Hash, number uint64, body *types.RawBo Uncles: body.Uncles, } if err = WriteBodyForStorage(db, hash, number, &data); err != nil { - return fmt.Errorf("WriteBodyForStorage: %w", err) + return false, 0, fmt.Errorf("WriteBodyForStorage: %w", err) } + lastTxnNum = baseTxId + uint64(len(body.Transactions)) + 2 if err = WriteRawTransactions(db, body.Transactions, baseTxId+1); err != nil { - return fmt.Errorf("WriteRawTransactions: %w", err) + return false, 0, fmt.Errorf("WriteRawTransactions: %w", err) } - return nil + return true, lastTxnNum, nil } func WriteBody(db kv.RwTx, hash common.Hash, number uint64, body *types.Body) error { @@ -718,7 +714,7 @@ func deleteBody(db kv.Deleter, hash common.Hash, number uint64) { } // MakeBodiesCanonical - move all txs of non-canonical blocks from NonCanonicalTxs table to EthTx table -func MakeBodiesCanonical(tx kv.RwTx, from uint64, ctx context.Context, logPrefix string, logEvery *time.Ticker) error { +func MakeBodiesCanonical(tx kv.RwTx, from uint64, ctx context.Context, logPrefix string, logEvery *time.Ticker, cb func(blockNum, lastTxnNum uint64) error) error { for blockNum := from; ; blockNum++ { h, err := ReadCanonicalHash(tx, blockNum) if err != nil { @@ -761,6 +757,12 @@ func MakeBodiesCanonical(tx kv.RwTx, from uint64, ctx context.Context, logPrefix if err := WriteBodyForStorage(tx, h, blockNum, bodyForStorage); err != nil { return err } + if cb != nil { + lastTxnNum := bodyForStorage.BaseTxId + uint64(bodyForStorage.TxAmount) + if err = cb(blockNum, lastTxnNum); err != nil { + return err + } + } select { case <-ctx.Done(): @@ -1313,6 +1315,7 @@ func TruncateBlocks(ctx context.Context, tx kv.RwTx, blockFrom uint64) error { if blockFrom < 1 { //protect genesis blockFrom = 1 } + sequenceTo := map[string]uint64{} for k, _, err := c.Last(); k != nil; k, _, err = c.Prev() { if err != nil { return err @@ -1344,9 +1347,7 @@ func TruncateBlocks(ctx context.Context, tx kv.RwTx, blockFrom uint64) error { }); err != nil { return err } - if err := ResetSequence(tx, bucket, b.BaseTxId); err != nil { - return err - } + sequenceTo[bucket] = b.BaseTxId } // Copying k because otherwise the same memory will be reused // for the next key and Delete below will end up deleting 1 more record than required @@ -1366,6 +1367,11 @@ func TruncateBlocks(ctx context.Context, tx kv.RwTx, blockFrom uint64) error { default: } } + for bucket, sequence := range sequenceTo { + if err := ResetSequence(tx, bucket, sequence); err != nil { + return err + } + } return nil } @@ -1620,48 +1626,42 @@ func IsPosBlock(db kv.Getter, blockHash common.Hash) (trans bool, err error) { return header.Difficulty.Cmp(common.Big0) == 0, nil } -var SapshotsKey = []byte("snapshots") +var SnapshotsKey = []byte("snapshots") +var SnapshotsHistoryKey = []byte("snapshots_history") func ReadSnapshots(tx kv.Tx) ([]string, error) { - v, err := tx.GetOne(kv.DatabaseInfo, SapshotsKey) + v, err := tx.GetOne(kv.DatabaseInfo, SnapshotsKey) if err != nil { return nil, err } - return strings.Split(string(v), ","), nil -} - -func WriteSnapshots(tx kv.RwTx, list []string) error { - return tx.Put(kv.DatabaseInfo, SapshotsKey, []byte(strings.Join(list, ","))) + var res []string + _ = json.Unmarshal(v, &res) + return res, nil } -// EnforceSnapshotsInvariant if DB has record - then file exists, if file exists - DB has record. -func EnforceSnapshotsInvariant(tx kv.RwTx, snListInFolder []string) (filtered []string, err error) { - snList, err := ReadSnapshots(tx) +func ReadHistorySnapshots(tx kv.Tx) ([]string, error) { + v, err := tx.GetOne(kv.DatabaseInfo, SnapshotsHistoryKey) if err != nil { - return filtered, err - } - exists := map[string]string{} - - for _, fName := range snListInFolder { - exists[fName] = "" + return nil, err } + var res []string + _ = json.Unmarshal(v, &res) + return res, nil +} - for _, fName := range snList { - if _, ok := exists[fName]; !ok { - delete(exists, fName) - continue - } - filtered = append(filtered, fName) - delete(exists, fName) - } - for fName := range exists { - filtered = append(filtered, fName) +func WriteSnapshots(tx kv.RwTx, list []string) error { + res, err := json.Marshal(list) + if err != nil { + return err } - slices.Sort(filtered) - if err = WriteSnapshots(tx, filtered); err != nil { - return filtered, err + return tx.Put(kv.DatabaseInfo, SnapshotsKey, res) +} +func WriteHistorySnapshots(tx kv.RwTx, list []string) error { + res, err := json.Marshal(list) + if err != nil { + return err } - return filtered, nil + return tx.Put(kv.DatabaseInfo, SnapshotsHistoryKey, res) } // PruneTable has `limit` parameter to avoid too large data deletes per one sync cycle - better delete by small portions to reduce db.FreeList size @@ -1727,3 +1727,144 @@ func PruneTableDupSort(tx kv.RwTx, table string, logPrefix string, pruneTo uint6 } return nil } + +type txNums struct{} + +var TxNums txNums + +func (txNums) Max(tx kv.Getter, blockNum uint64) (maxTxNum uint64, err error) { + var k [8]byte + binary.BigEndian.PutUint64(k[:], blockNum) + v, err := tx.GetOne(kv.MaxTxNum, k[:]) + if err != nil { + return 0, err + } + if len(v) == 0 { + return 0, nil + } + return binary.BigEndian.Uint64(v), nil +} +func (txNums) Min(tx kv.Getter, blockNum uint64) (maxTxNum uint64, err error) { + if blockNum == 0 { + return 0, nil + } + var k [8]byte + binary.BigEndian.PutUint64(k[:], blockNum-1) + v, err := tx.GetOne(kv.MaxTxNum, k[:]) + if err != nil { + return 0, err + } + if len(v) == 0 { + return 0, nil + } + return binary.BigEndian.Uint64(v) + 1, nil +} + +func (txNums) Append(tx kv.RwTx, blockNum, maxTxNum uint64) (err error) { + lastK, err := LastKey(tx, kv.MaxTxNum) + if err != nil { + return err + } + if len(lastK) != 0 { + lastBlockNum := binary.BigEndian.Uint64(lastK) + if lastBlockNum+1 != blockNum { + return fmt.Errorf("append with gap blockNum=%d, but current heigh=%d", blockNum, lastBlockNum) + } + } + + var k, v [8]byte + binary.BigEndian.PutUint64(k[:], blockNum) + binary.BigEndian.PutUint64(v[:], maxTxNum) + if err := tx.Append(kv.MaxTxNum, k[:], v[:]); err != nil { + return err + } + return nil +} +func (txNums) WriteForGenesis(tx kv.RwTx, maxTxNum uint64) (err error) { + var k, v [8]byte + binary.BigEndian.PutUint64(k[:], 0) + binary.BigEndian.PutUint64(v[:], maxTxNum) + return tx.Put(kv.MaxTxNum, k[:], v[:]) +} +func (txNums) Truncate(tx kv.RwTx, blockNum uint64) (err error) { + var seek [8]byte + binary.BigEndian.PutUint64(seek[:], blockNum) + c, err := tx.RwCursor(kv.MaxTxNum) + if err != nil { + return err + } + defer c.Close() + for k, _, err := c.Seek(seek[:]); k != nil; k, _, err = c.Next() { + if err != nil { + return err + } + if err = c.DeleteCurrent(); err != nil { + return err + } + + } + return nil +} +func (txNums) FindBlockNum(tx kv.Tx, endTxNumMinimax uint64) (ok bool, blockNum uint64, err error) { + var seek [8]byte + c, err := tx.Cursor(kv.MaxTxNum) + if err != nil { + return false, 0, err + } + defer c.Close() + + cnt, err := c.Count() + if err != nil { + return false, 0, err + } + + blockNum = uint64(sort.Search(int(cnt), func(i int) bool { + binary.BigEndian.PutUint64(seek[:], uint64(i)) + var v []byte + _, v, err = c.SeekExact(seek[:]) + return binary.BigEndian.Uint64(v) >= endTxNumMinimax + })) + if err != nil { + return false, 0, err + } + if blockNum == cnt { + return false, 0, nil + } + return true, blockNum, nil +} + +func ReadVerkleRoot(tx kv.Tx, blockNum uint64) (common.Hash, error) { + root, err := tx.GetOne(kv.VerkleRoots, dbutils.EncodeBlockNumber(blockNum)) + if err != nil { + return common.Hash{}, err + } + + return common.BytesToHash(root), nil +} + +func WriteVerkleRoot(tx kv.RwTx, blockNum uint64, root common.Hash) error { + return tx.Put(kv.VerkleRoots, dbutils.EncodeBlockNumber(blockNum), root[:]) +} + +func WriteVerkleNode(tx kv.RwTx, node verkle.VerkleNode) error { + var ( + root common.Hash + encoded []byte + err error + ) + root = node.Commitment().Bytes() + encoded, err = node.Serialize() + if err != nil { + return err + } + + return tx.Put(kv.VerkleTrie, root[:], encoded) +} + +func ReadVerkleNode(tx kv.RwTx, root common.Hash) (verkle.VerkleNode, error) { + encoded, err := tx.GetOne(kv.VerkleTrie, root[:]) + if err != nil { + return nil, err + } + return verkle.ParseNode(encoded, 0, root[:]) +} diff --git a/core/rawdb/accessors_config.go b/core/rawdb/accessors_config.go new file mode 100644 index 00000000000..33ef8e7ef1b --- /dev/null +++ b/core/rawdb/accessors_config.go @@ -0,0 +1,32 @@ +package rawdb + +import ( + "github.com/ledgerwatch/erigon-lib/kv" +) + +type ConfigKey []byte + +var ( + HistoryV3 = ConfigKey("history.v3") +) + +func (k ConfigKey) Enabled(tx kv.Tx) (bool, error) { return kv.GetBool(tx, kv.DatabaseInfo, k) } +func (k ConfigKey) WriteOnce(tx kv.RwTx, v bool) (bool, error) { + _, enabled, err := kv.EnsureNotChangedBool(tx, kv.DatabaseInfo, k, v) + return enabled, err +} +func (k ConfigKey) EnsureNotChanged(tx kv.RwTx, value bool) (ok, enabled bool, err error) { + return kv.EnsureNotChangedBool(tx, kv.DatabaseInfo, k, value) +} +func (k ConfigKey) ForceWrite(tx kv.RwTx, enabled bool) error { + if enabled { + if err := tx.Put(kv.DatabaseInfo, k, []byte{1}); err != nil { + return err + } + } else { + if err := tx.Put(kv.DatabaseInfo, k, []byte{0}); err != nil { + return err + } + } + return nil +} diff --git a/core/rawdb/bor_receipts.go b/core/rawdb/bor_receipts.go index 9df622909d9..598255ab0cb 100644 --- a/core/rawdb/bor_receipts.go +++ b/core/rawdb/bor_receipts.go @@ -1,12 +1,16 @@ package rawdb import ( + "bytes" "errors" + "fmt" + "math/big" "github.com/ledgerwatch/erigon-lib/kv" "github.com/ledgerwatch/erigon/common" "github.com/ledgerwatch/erigon/common/dbutils" "github.com/ledgerwatch/erigon/core/types" + "github.com/ledgerwatch/erigon/ethdb/cbor" "github.com/ledgerwatch/erigon/rlp" "github.com/ledgerwatch/log/v3" ) @@ -34,75 +38,43 @@ func ReadBorReceiptRLP(db kv.Getter, hash common.Hash, number uint64) rlp.RawVal return data } -// ReadRawBorReceipt retrieves the block receipt belonging to a block. -// The receipt metadata fields are not guaranteed to be populated, so they -// should not be used. Use ReadBorReceipt instead if the metadata is needed. -func ReadRawBorReceipt(db kv.Tx, hash common.Hash, number uint64) *types.Receipt { - // Retrieve the flattened receipt slice - data := ReadBorReceiptRLP(db, hash, number) - if len(data) == 0 { - return nil - } - - // Convert the receipts from their storage form to their internal representation - var storageReceipt types.ReceiptForStorage - if err := rlp.DecodeBytes(data, &storageReceipt); err != nil { - log.Error("Invalid receipt array RLP", "hash", hash, "err", err) - return nil - } - - return (*types.Receipt)(&storageReceipt) -} - // ReadBorReceipt retrieves all the bor block receipts belonging to a block, including // its correspoinding metadata fields. If it is unable to populate these metadata // fields then nil is returned. -func ReadBorReceipt(db kv.Tx, hash common.Hash, number uint64) *types.Receipt { - // We're deriving many fields from the block body, retrieve beside the receipt - borReceipt := ReadRawBorReceipt(db, hash, number) - if borReceipt == nil { - return nil - } - +func ReadBorReceipt(db kv.Tx, number uint64) (*types.Receipt, error) { // We're deriving many fields from the block body, retrieve beside the receipt - receipts := ReadRawReceipts(db, number) - if receipts == nil { - receipts = make(types.Receipts, 0) + data, err := db.GetOne(kv.BorReceipts, borReceiptKey(number)) + if err != nil { + return nil, fmt.Errorf("ReadBorReceipt failed getting bor receipt with blockNumber=%d, err=%s", number, err) } - - if err := types.DeriveFieldsForBorReceipt(borReceipt, hash, number, receipts); err != nil { - log.Error("Failed to derive bor receipt fields", "hash", hash, "number", number, "err", err) - return nil + if data == nil { + return nil, nil } - return borReceipt -} -// ReadBorReceiptLogs retrieves all the bor block receipt logs belonging to a block. -// If it is unable to populate these metadata fields then nil is returned. -func ReadBorReceiptLogs(db kv.Tx, blockHash common.Hash, blockNumber uint64, txIndex uint, logIndex uint) []*types.Log { - // We're deriving many fields from the block body, retrieve beside the receipt - borReceipt := ReadRawBorReceipt(db, blockHash, blockNumber) - if borReceipt == nil { - return nil + var borReceipt *types.Receipt + if err := rlp.DecodeBytes(data, borReceipt); err != nil { + return nil, err } - borLogs := borReceipt.Logs - - types.DeriveFieldsForBorLogs(borLogs, blockHash, blockNumber, txIndex, logIndex) - - return borLogs + return borReceipt, nil } -// WriteBorReceipt stores all the bor receipt belonging to a block. -func WriteBorReceipt(tx kv.RwTx, hash common.Hash, number uint64, borReceipt *types.ReceiptForStorage) error { +// WriteBorReceipt stores all the bor receipt belonging to a block (storing the state sync recipt and log). +func WriteBorReceipt(tx kv.RwTx, hash common.Hash, number uint64, borReceipt *types.Receipt) error { // Convert the bor receipt into their storage form and serialize them - bytes, err := rlp.EncodeToBytes(borReceipt) - if err != nil { + buf := bytes.NewBuffer(make([]byte, 0, 1024)) + cbor.Marshal(buf, borReceipt.Logs) + if err := tx.Append(kv.Log, dbutils.LogKey(number, uint32(borReceipt.TransactionIndex)), buf.Bytes()); err != nil { return err } + buf.Reset() + err := cbor.Marshal(buf, borReceipt) + if err != nil { + return err + } // Store the flattened receipt slice - if err := tx.Append(kv.BorReceipts, borReceiptKey(number), bytes); err != nil { + if err := tx.Append(kv.BorReceipts, borReceiptKey(number), buf.Bytes()); err != nil { return err } @@ -113,8 +85,19 @@ func WriteBorReceipt(tx kv.RwTx, hash common.Hash, number uint64, borReceipt *ty func DeleteBorReceipt(tx kv.RwTx, hash common.Hash, number uint64) { key := borReceiptKey(number) + // we delete Bor Receipt log too + borReceipt, err := ReadBorReceipt(tx, number) + if err != nil { + log.Error("Failted to read bor receipt", "err", err) + } + if borReceipt != nil { + if err := tx.Delete(kv.Log, dbutils.LogKey(number, uint32(borReceipt.TransactionIndex))); err != nil { + log.Error("Failed to delete bor log", "err", err) + } + } + if err := tx.Delete(kv.BorReceipts, key); err != nil { - log.Crit("Failed to delete bor receipt", "err", err) + log.Error("Failed to delete bor receipt", "err", err) } } @@ -143,7 +126,7 @@ func ReadBorTransactionWithBlockHash(db kv.Tx, borTxHash common.Hash, blockHash // ReadBorTransaction returns a specific bor (fake) transaction by txn hash, along with // its added positional metadata. func ReadBorTransaction(db kv.Tx, borTxHash common.Hash) (types.Transaction, common.Hash, uint64, uint64, error) { - blockNumber, err := ReadTxLookupEntry(db, borTxHash) + blockNumber, err := ReadBorTxLookupEntry(db, borTxHash) if err != nil { return nil, common.Hash{}, 0, 0, err } @@ -154,6 +137,19 @@ func ReadBorTransaction(db kv.Tx, borTxHash common.Hash) (types.Transaction, com return computeBorTransactionForBlockNumber(db, *blockNumber) } +func ReadBorTxLookupEntry(db kv.Tx, borTxHash common.Hash) (*uint64, error) { + blockNumBytes, err := db.GetOne(kv.BorTxLookup, borTxHash.Bytes()) + if err != nil { + return nil, err + } + if blockNumBytes == nil { + return nil, nil + } + + blockNum := (new(big.Int).SetBytes(blockNumBytes)).Uint64() + return &blockNum, nil +} + // ReadBorTransactionForBlockNumber returns a bor (fake) transaction by block number, along with // its added positional metadata. func ReadBorTransactionForBlockNumber(db kv.Tx, blockNumber uint64) (types.Transaction, common.Hash, uint64, uint64, error) { diff --git a/core/rawdb/rawdbreset/reset_stages.go b/core/rawdb/rawdbreset/reset_stages.go index c2ccc90d7b9..36c35ea6956 100644 --- a/core/rawdb/rawdbreset/reset_stages.go +++ b/core/rawdb/rawdbreset/reset_stages.go @@ -10,9 +10,12 @@ import ( "github.com/ledgerwatch/erigon/core/rawdb" "github.com/ledgerwatch/erigon/eth/stagedsync" "github.com/ledgerwatch/erigon/eth/stagedsync/stages" + "github.com/ledgerwatch/erigon/turbo/services" + "github.com/ledgerwatch/erigon/turbo/snapshotsync" + "github.com/ledgerwatch/log/v3" ) -func ResetState(db kv.RwDB, ctx context.Context, g *core.Genesis) error { +func ResetState(db kv.RwDB, ctx context.Context, chain string) error { // don't reset senders here if err := db.Update(ctx, stagedsync.ResetHashState); err != nil { return err @@ -36,13 +39,29 @@ func ResetState(db kv.RwDB, ctx context.Context, g *core.Genesis) error { return err } - if err := db.Update(ctx, func(tx kv.RwTx) error { return ResetExec(tx, g) }); err != nil { + if err := db.Update(ctx, func(tx kv.RwTx) error { return ResetExec(tx, chain) }); err != nil { return err } return nil } -func ResetBlocks(tx kv.RwTx) error { +func ResetBlocks(tx kv.RwTx, db kv.RoDB, snapshots *snapshotsync.RoSnapshots, br services.HeaderAndCanonicalReader, tmpdir string) error { + go func() { //inverted read-ahead - to warmup data + _ = db.View(context.Background(), func(tx kv.Tx) error { + c, err := tx.Cursor(kv.EthTx) + if err != nil { + return err + } + defer c.Close() + for k, _, err := c.Last(); k != nil; k, _, err = c.Prev() { + if err != nil { + return err + } + } + return nil + }) + }() + // keep Genesis if err := rawdb.TruncateBlocks(context.Background(), tx, 1); err != nil { return err @@ -53,6 +72,9 @@ func ResetBlocks(tx kv.RwTx) error { if err := stages.SaveStageProgress(tx, stages.Headers, 1); err != nil { return fmt.Errorf("saving Bodies progress failed: %w", err) } + if err := stages.SaveStageProgress(tx, stages.Snapshots, 0); err != nil { + return fmt.Errorf("saving Snapshots progress failed: %w", err) + } // remove all canonical markers from this point if err := rawdb.TruncateCanonicalHash(tx, 1, false); err != nil { @@ -79,6 +101,9 @@ func ResetBlocks(tx kv.RwTx) error { if err := tx.ClearBucket(kv.EthTx); err != nil { return err } + if err := tx.ClearBucket(kv.MaxTxNum); err != nil { + return err + } if err := rawdb.ResetSequence(tx, kv.EthTx, 0); err != nil { return err } @@ -86,6 +111,16 @@ func ResetBlocks(tx kv.RwTx) error { return err } + if snapshots != nil && snapshots.Cfg().Enabled && snapshots.BlocksAvailable() > 0 { + if err := stagedsync.FillDBFromSnapshots("fillind_db_from_snapshots", context.Background(), tx, tmpdir, snapshots, br); err != nil { + return err + } + _ = stages.SaveStageProgress(tx, stages.Snapshots, snapshots.BlocksAvailable()) + _ = stages.SaveStageProgress(tx, stages.Headers, snapshots.BlocksAvailable()) + _ = stages.SaveStageProgress(tx, stages.Bodies, snapshots.BlocksAvailable()) + _ = stages.SaveStageProgress(tx, stages.Senders, snapshots.BlocksAvailable()) + } + return nil } func ResetSenders(tx kv.RwTx) error { @@ -101,62 +136,84 @@ func ResetSenders(tx kv.RwTx) error { return nil } -func ResetExec(tx kv.RwTx, g *core.Genesis) error { - if err := tx.ClearBucket(kv.HashedAccounts); err != nil { - return err - } - if err := tx.ClearBucket(kv.HashedStorage); err != nil { +func ResetExec(tx kv.RwTx, chain string) (err error) { + if err = stages.SaveStageProgress(tx, stages.Execution, 0); err != nil { return err } - if err := tx.ClearBucket(kv.ContractCode); err != nil { + if err = stages.SaveStagePruneProgress(tx, stages.Execution, 0); err != nil { return err } - if err := tx.ClearBucket(kv.PlainState); err != nil { + if err = stages.SaveStageProgress(tx, stages.HashState, 0); err != nil { return err } - if err := tx.ClearBucket(kv.AccountChangeSet); err != nil { + if err = stages.SaveStagePruneProgress(tx, stages.HashState, 0); err != nil { return err } - if err := tx.ClearBucket(kv.StorageChangeSet); err != nil { + if err = stages.SaveStageProgress(tx, stages.IntermediateHashes, 0); err != nil { return err } - if err := tx.ClearBucket(kv.PlainContractCode); err != nil { + if err = stages.SaveStagePruneProgress(tx, stages.IntermediateHashes, 0); err != nil { return err } - if err := tx.ClearBucket(kv.Receipts); err != nil { - return err - } - if err := tx.ClearBucket(kv.Log); err != nil { - return err - } - if err := tx.ClearBucket(kv.IncarnationMap); err != nil { - return err - } - if err := tx.ClearBucket(kv.Code); err != nil { - return err - } - if err := tx.ClearBucket(kv.CallTraceSet); err != nil { - return err - } - if err := tx.ClearBucket(kv.Epoch); err != nil { - return err - } - if err := tx.ClearBucket(kv.PendingEpoch); err != nil { - return err - } - if err := tx.ClearBucket(kv.BorReceipts); err != nil { - return err + + stateBuckets := []string{ + kv.PlainState, kv.HashedAccounts, kv.HashedStorage, kv.TrieOfAccounts, kv.TrieOfStorage, + kv.Epoch, kv.PendingEpoch, kv.BorReceipts, + kv.Code, kv.PlainContractCode, kv.ContractCode, kv.IncarnationMap, } - if err := stages.SaveStageProgress(tx, stages.Execution, 0); err != nil { - return err + for _, b := range stateBuckets { + log.Info("Clear", "table", b) + if err := tx.ClearBucket(b); err != nil { + return err + } } - if err := stages.SaveStagePruneProgress(tx, stages.Execution, 0); err != nil { + + historyV3, err := rawdb.HistoryV3.Enabled(tx) + if err != nil { return err } + if historyV3 { + buckets := []string{ + kv.AccountHistoryKeys, kv.AccountIdx, kv.AccountHistoryVals, kv.AccountSettings, + kv.StorageKeys, kv.StorageVals, kv.StorageHistoryKeys, kv.StorageHistoryVals, kv.StorageSettings, kv.StorageIdx, + kv.CodeKeys, kv.CodeVals, kv.CodeHistoryKeys, kv.CodeHistoryVals, kv.CodeSettings, kv.CodeIdx, + kv.AccountHistoryKeys, kv.AccountIdx, kv.AccountHistoryVals, kv.AccountSettings, + kv.StorageHistoryKeys, kv.StorageIdx, kv.StorageHistoryVals, kv.StorageSettings, + kv.CodeHistoryKeys, kv.CodeIdx, kv.CodeHistoryVals, kv.CodeSettings, + kv.LogAddressKeys, kv.LogAddressIdx, + kv.LogTopicsKeys, kv.LogTopicsIdx, + kv.TracesFromKeys, kv.TracesFromIdx, + kv.TracesToKeys, kv.TracesToIdx, + } + for _, b := range buckets { + log.Info("Clear", "table", b) + if err := tx.ClearBucket(b); err != nil { + return err + } + } + } else { + if err := tx.ClearBucket(kv.AccountChangeSet); err != nil { + return err + } + if err := tx.ClearBucket(kv.StorageChangeSet); err != nil { + return err + } + if err := tx.ClearBucket(kv.Receipts); err != nil { + return err + } + if err := tx.ClearBucket(kv.Log); err != nil { + return err + } + if err := tx.ClearBucket(kv.CallTraceSet); err != nil { + return err + } - if _, _, err := g.WriteGenesisState(tx); err != nil { - return err + genesis := core.DefaultGenesisBlockByChainName(chain) + if _, _, err := genesis.WriteGenesisState(tx); err != nil { + return err + } } + return nil } diff --git a/core/rlp_test.go b/core/rlp_test.go index 2bb4f9e1aff..d36c621bebc 100644 --- a/core/rlp_test.go +++ b/core/rlp_test.go @@ -13,6 +13,7 @@ // // You should have received a copy of the GNU Lesser General Public License // along with the go-ethereum library. If not, see . +// //nolint:errcheck,prealloc package core diff --git a/core/skip_analysis.go b/core/skip_analysis.go index 53eaea84294..6ade8f9352a 100644 --- a/core/skip_analysis.go +++ b/core/skip_analysis.go @@ -35,7 +35,7 @@ import ( // 0xcdb5bf0b4b51093e1c994f471921f88623c9d3e1b6aa2782049f53a0048f2b32 (block 11079912) // 0x21ab7bf7245a87eae265124aaf180d91133377e47db2b1a4866493ec4b371150 (block 13119520) -var analysisBlocks map[string][]uint64 = map[string][]uint64{ +var analysisBlocks = map[string][]uint64{ networkname.MainnetChainName: {5_800_596, 6_426_298, 6_426_432, 11_079_912, 13_119_520, 15_081_051}, networkname.BSCChainName: {19_278_044}, networkname.BorMainnetChainName: {29_447_463}, diff --git a/core/state/access_list_test.go b/core/state/access_list_test.go index 119990e723d..6fc8982711f 100644 --- a/core/state/access_list_test.go +++ b/core/state/access_list_test.go @@ -1,15 +1,16 @@ package state import ( + "testing" + "github.com/ledgerwatch/erigon-lib/kv/memdb" "github.com/ledgerwatch/erigon/common" - "testing" ) func verifyAddrs(t *testing.T, s *IntraBlockState, astrings ...string) { t.Helper() // convert to common.Address form - var addresses []common.Address + addresses := make([]common.Address, 0, len(astrings)) var addressMap = make(map[common.Address]struct{}) for _, astring := range astrings { address := common.HexToAddress(astring) @@ -36,7 +37,7 @@ func verifySlots(t *testing.T, s *IntraBlockState, addrString string, slotString } var address = common.HexToAddress(addrString) // convert to common.Hash form - var slots []common.Hash + slots := make([]common.Hash, 0, len(slotStrings)) var slotMap = make(map[common.Hash]struct{}) for _, slotString := range slotStrings { s := common.HexToHash(slotString) diff --git a/core/state/database_test.go b/core/state/database_test.go index c9b052df165..c4d813a6054 100644 --- a/core/state/database_test.go +++ b/core/state/database_test.go @@ -128,7 +128,7 @@ func TestCreate2Revive(t *testing.T) { } err = m.DB.View(context.Background(), func(tx kv.Tx) error { - st := state.New(state.NewPlainStateReader(tx)) + st := state.New(m.NewStateReader(tx)) if !st.Exist(address) { t.Error("expected account to exist") } @@ -145,7 +145,7 @@ func TestCreate2Revive(t *testing.T) { } err = m.DB.View(context.Background(), func(tx kv.Tx) error { - st := state.New(state.NewPlainStateReader(tx)) + st := state.New(m.NewStateReader(tx)) if !st.Exist(contractAddress) { t.Error("expected contractAddress to exist at the block 1", contractAddress.String()) } @@ -157,22 +157,11 @@ func TestCreate2Revive(t *testing.T) { if err = m.InsertChain(chain.Slice(1, 2)); err != nil { t.Fatal(err) } - var it *contracts.ReviveDeployEventIterator - it, err = revive.FilterDeployEvent(nil) - if err != nil { - t.Fatal(err) - } - if !it.Next() { - t.Error("Expected DeployEvent") - } - if it.Event.D != create2address { - t.Errorf("Wrong create2address: %x, expected %x", it.Event.D, create2address) - } var key2 common.Hash var check2 uint256.Int err = m.DB.View(context.Background(), func(tx kv.Tx) error { - st := state.New(state.NewPlainStateReader(tx)) + st := state.New(m.NewStateReader(tx)) if !st.Exist(create2address) { t.Error("expected create2address to exist at the block 2", create2address.String()) } @@ -191,7 +180,7 @@ func TestCreate2Revive(t *testing.T) { t.Fatal(err) } err = m.DB.View(context.Background(), func(tx kv.Tx) error { - st := state.New(state.NewPlainStateReader(tx)) + st := state.New(m.NewStateReader(tx)) if st.Exist(create2address) { t.Error("expected create2address to be self-destructed at the block 3", create2address.String()) } @@ -203,18 +192,8 @@ func TestCreate2Revive(t *testing.T) { if err = m.InsertChain(chain.Slice(3, 4)); err != nil { t.Fatal(err) } - it, err = revive.FilterDeployEvent(nil) - if err != nil { - t.Fatal(err) - } - if !it.Next() { - t.Error("Expected DeployEvent") - } - if it.Event.D != create2address { - t.Errorf("Wrong create2address: %x, expected %x", it.Event.D, create2address) - } err = m.DB.View(context.Background(), func(tx kv.Tx) error { - st := state.New(state.NewPlainStateReader(tx)) + st := state.New(m.NewStateReader(tx)) if !st.Exist(create2address) { t.Error("expected create2address to exist at the block 2", create2address.String()) } @@ -355,7 +334,7 @@ func TestCreate2Polymorth(t *testing.T) { err = m.DB.View(context.Background(), func(tx kv.Tx) error { - st := state.New(state.NewPlainStateReader(tx)) + st := state.New(m.NewStateReader(tx)) if !st.Exist(address) { t.Error("expected account to exist") } @@ -373,7 +352,7 @@ func TestCreate2Polymorth(t *testing.T) { err = m.DB.View(context.Background(), func(tx kv.Tx) error { - st := state.New(state.NewPlainStateReader(tx)) + st := state.New(m.NewStateReader(tx)) if !st.Exist(contractAddress) { t.Error("expected contractAddress to exist at the block 1", contractAddress.String()) } @@ -385,19 +364,9 @@ func TestCreate2Polymorth(t *testing.T) { if err = m.InsertChain(chain.Slice(1, 2)); err != nil { t.Fatal(err) } - var it *contracts.PolyDeployEventIterator - it, err = poly.FilterDeployEvent(nil) - if err != nil { - t.Fatal(err) - } - if !it.Next() { - t.Error("Expected DeployEvent") - } - if it.Event.D != create2address { - t.Errorf("Wrong create2address: %x, expected %x", it.Event.D, create2address) - } + err = m.DB.View(context.Background(), func(tx kv.Tx) error { - st := state.New(state.NewPlainStateReader(tx)) + st := state.New(m.NewStateReader(tx)) if !st.Exist(create2address) { t.Error("expected create2address to exist at the block 2", create2address.String()) } @@ -416,7 +385,7 @@ func TestCreate2Polymorth(t *testing.T) { t.Fatal(err) } err = m.DB.View(context.Background(), func(tx kv.Tx) error { - st := state.New(state.NewPlainStateReader(tx)) + st := state.New(m.NewStateReader(tx)) if st.Exist(create2address) { t.Error("expected create2address to be self-destructed at the block 3", create2address.String()) } @@ -428,18 +397,8 @@ func TestCreate2Polymorth(t *testing.T) { if err = m.InsertChain(chain.Slice(3, 4)); err != nil { t.Fatal(err) } - it, err = poly.FilterDeployEvent(nil) - if err != nil { - t.Fatal(err) - } - if !it.Next() { - t.Error("Expected DeployEvent") - } - if it.Event.D != create2address { - t.Errorf("Wrong create2address: %x, expected %x", it.Event.D, create2address) - } err = m.DB.View(context.Background(), func(tx kv.Tx) error { - st := state.New(state.NewPlainStateReader(tx)) + st := state.New(m.NewStateReader(tx)) if !st.Exist(create2address) { t.Error("expected create2address to exist at the block 4", create2address.String()) } @@ -458,18 +417,8 @@ func TestCreate2Polymorth(t *testing.T) { if err = m.InsertChain(chain.Slice(4, 5)); err != nil { t.Fatal(err) } - it, err = poly.FilterDeployEvent(nil) - if err != nil { - t.Fatal(err) - } - if !it.Next() { - t.Error("Expected DeployEvent") - } - if it.Event.D != create2address { - t.Errorf("Wrong create2address: %x, expected %x", it.Event.D, create2address) - } err = m.DB.View(context.Background(), func(tx kv.Tx) error { - st := state.New(state.NewPlainStateReader(tx)) + st := state.New(m.NewStateReader(tx)) if !st.Exist(create2address) { t.Error("expected create2address to exist at the block 5", create2address.String()) } @@ -573,7 +522,7 @@ func TestReorgOverSelfDestruct(t *testing.T) { err = m.DB.View(context.Background(), func(tx kv.Tx) error { - st := state.New(state.NewPlainStateReader(tx)) + st := state.New(m.NewStateReader(tx)) if !st.Exist(address) { t.Error("expected account to exist") } @@ -591,7 +540,7 @@ func TestReorgOverSelfDestruct(t *testing.T) { var key0 common.Hash var correctValueX uint256.Int err = m.DB.View(context.Background(), func(tx kv.Tx) error { - st := state.New(state.NewPlainStateReader(tx)) + st := state.New(m.NewStateReader(tx)) if !st.Exist(contractAddress) { t.Error("expected contractAddress to exist at the block 1", contractAddress.String()) } @@ -608,7 +557,7 @@ func TestReorgOverSelfDestruct(t *testing.T) { } err = m.DB.View(context.Background(), func(tx kv.Tx) error { - st := state.New(state.NewPlainStateReader(tx)) + st := state.New(m.NewStateReader(tx)) if st.Exist(contractAddress) { t.Error("expected contractAddress to not exist at the block 3", contractAddress.String()) } @@ -621,7 +570,7 @@ func TestReorgOverSelfDestruct(t *testing.T) { t.Fatal(err) } err = m.DB.View(context.Background(), func(tx kv.Tx) error { - st := state.New(state.NewPlainStateReader(tx)) + st := state.New(m.NewStateReader(tx)) if !st.Exist(contractAddress) { t.Error("expected contractAddress to exist at the block 4", contractAddress.String()) } @@ -715,7 +664,7 @@ func TestReorgOverStateChange(t *testing.T) { } err = m.DB.View(context.Background(), func(tx kv.Tx) error { - st := state.New(state.NewPlainStateReader(tx)) + st := state.New(m.NewStateReader(tx)) if !st.Exist(address) { t.Error("expected account to exist") } @@ -734,7 +683,7 @@ func TestReorgOverStateChange(t *testing.T) { var key0 common.Hash var correctValueX uint256.Int err = m.DB.View(context.Background(), func(tx kv.Tx) error { - st := state.New(state.NewPlainStateReader(tx)) + st := state.New(m.NewStateReader(tx)) if !st.Exist(contractAddress) { t.Error("expected contractAddress to exist at the block 1", contractAddress.String()) } @@ -755,7 +704,7 @@ func TestReorgOverStateChange(t *testing.T) { t.Fatal(err) } err = m.DB.View(context.Background(), func(tx kv.Tx) error { - st := state.New(state.NewPlainStateReader(tx)) + st := state.New(m.NewStateReader(tx)) if !st.Exist(contractAddress) { t.Error("expected contractAddress to exist at the block 4", contractAddress.String()) } @@ -843,7 +792,7 @@ func TestCreateOnExistingStorage(t *testing.T) { } err = m.DB.View(context.Background(), func(tx kv.Tx) error { - st := state.New(state.NewPlainStateReader(tx)) + st := state.New(m.NewStateReader(tx)) if !st.Exist(address) { t.Error("expected account to exist") } @@ -862,7 +811,7 @@ func TestCreateOnExistingStorage(t *testing.T) { var key0 common.Hash var check0 uint256.Int err = m.DB.View(context.Background(), func(tx kv.Tx) error { - st := state.New(state.NewPlainStateReader(tx)) + st := state.New(m.NewStateReader(tx)) if !st.Exist(contractAddress) { t.Error("expected contractAddress to exist at the block 1", contractAddress.String()) } @@ -979,7 +928,7 @@ func TestEip2200Gas(t *testing.T) { var balanceBefore *uint256.Int err = m.DB.View(context.Background(), func(tx kv.Tx) error { - st := state.New(state.NewPlainStateReader(tx)) + st := state.New(m.NewStateReader(tx)) if !st.Exist(address) { t.Error("expected account to exist") } @@ -997,7 +946,7 @@ func TestEip2200Gas(t *testing.T) { } err = m.DB.View(context.Background(), func(tx kv.Tx) error { - st := state.New(state.NewPlainStateReader(tx)) + st := state.New(m.NewStateReader(tx)) if !st.Exist(contractAddress) { t.Error("expected contractAddress to exist at the block 1", contractAddress.String()) } @@ -1012,7 +961,7 @@ func TestEip2200Gas(t *testing.T) { require.NoError(t, err) } -//Create contract, drop trie, reload trie from disk and add block with contract call +// Create contract, drop trie, reload trie from disk and add block with contract call func TestWrongIncarnation(t *testing.T) { // Configure and generate a sample block chain var ( @@ -1067,7 +1016,7 @@ func TestWrongIncarnation(t *testing.T) { } err = m.DB.View(context.Background(), func(tx kv.Tx) error { - st := state.New(state.NewPlainStateReader(tx)) + st := state.New(m.NewStateReader(tx)) if !st.Exist(address) { t.Error("expected account to exist") } @@ -1097,7 +1046,7 @@ func TestWrongIncarnation(t *testing.T) { t.Fatal("Incorrect incarnation", acc.Incarnation) } - st := state.New(state.NewPlainStateReader(tx)) + st := state.New(m.NewStateReader(tx)) if !st.Exist(contractAddress) { t.Error("expected contractAddress to exist at the block 1", contractAddress.String()) } @@ -1125,7 +1074,7 @@ func TestWrongIncarnation(t *testing.T) { require.NoError(t, err) } -//create acc, deploy to it contract, reorg to state without contract +// create acc, deploy to it contract, reorg to state without contract func TestWrongIncarnation2(t *testing.T) { // Configure and generate a sample block chain var ( @@ -1216,7 +1165,7 @@ func TestWrongIncarnation2(t *testing.T) { } err = m.DB.View(context.Background(), func(tx kv.Tx) error { - st := state.New(state.NewPlainStateReader(tx)) + st := state.New(m.NewStateReader(tx)) if !st.Exist(address) { t.Error("expected account to exist") } @@ -1236,7 +1185,7 @@ func TestWrongIncarnation2(t *testing.T) { var acc accounts.Account err = m.DB.View(context.Background(), func(tx kv.Tx) error { - st := state.New(state.NewPlainStateReader(tx)) + st := state.New(m.NewStateReader(tx)) if !st.Exist(contractAddress) { t.Error("expected contractAddress to exist at the block 1", contractAddress.String()) } @@ -1537,7 +1486,7 @@ func TestRecreateAndRewind(t *testing.T) { var key0 common.Hash var check0 uint256.Int err = m.DB.View(context.Background(), func(tx kv.Tx) error { - st := state.New(state.NewPlainStateReader(tx)) + st := state.New(m.NewStateReader(tx)) if !st.Exist(phoenixAddress) { t.Errorf("expected phoenix %x to exist after first insert", phoenixAddress) } @@ -1556,7 +1505,7 @@ func TestRecreateAndRewind(t *testing.T) { } err = m.DB.View(context.Background(), func(tx kv.Tx) error { - st := state.New(state.NewPlainStateReader(tx)) + st := state.New(m.NewStateReader(tx)) if !st.Exist(phoenixAddress) { t.Errorf("expected phoenix %x to exist after second insert", phoenixAddress) } @@ -1574,7 +1523,7 @@ func TestRecreateAndRewind(t *testing.T) { t.Fatal(err) } err = m.DB.View(context.Background(), func(tx kv.Tx) error { - st := state.New(state.NewPlainStateReader(tx)) + st := state.New(m.NewStateReader(tx)) if !st.Exist(phoenixAddress) { t.Errorf("expected phoenix %x to exist after second insert", phoenixAddress) } diff --git a/core/state/history_reader_22.go b/core/state/history_reader_22.go index d0844f868de..4ed2105112f 100644 --- a/core/state/history_reader_22.go +++ b/core/state/history_reader_22.go @@ -6,57 +6,48 @@ import ( "github.com/ledgerwatch/erigon-lib/kv" libstate "github.com/ledgerwatch/erigon-lib/state" "github.com/ledgerwatch/erigon/common" + "github.com/ledgerwatch/erigon/common/dbutils" "github.com/ledgerwatch/erigon/core/types/accounts" ) -func bytesToUint64(buf []byte) (x uint64) { - for i, b := range buf { - x = x<<8 + uint64(b) - if i == 7 { - return - } - } - return -} - -// Implements StateReader and StateWriter +// HistoryReader22 Implements StateReader and StateWriter type HistoryReader22 struct { - ac *libstate.AggregatorContext - ri *libstate.ReadIndices + ac *libstate.Aggregator22Context txNum uint64 trace bool + tx kv.Tx } -func NewHistoryReader22(ac *libstate.AggregatorContext, ri *libstate.ReadIndices) *HistoryReader22 { - return &HistoryReader22{ac: ac, ri: ri} -} - -func (hr *HistoryReader22) SetTx(tx kv.RwTx) { - hr.ri.SetTx(tx) -} - -func (hr *HistoryReader22) SetTxNum(txNum uint64) { - hr.txNum = txNum - if hr.ri != nil { - hr.ri.SetTxNum(txNum) - } -} - -func (hr *HistoryReader22) FinishTx() error { - return hr.ri.FinishTx() +func NewHistoryReader22(ac *libstate.Aggregator22Context) *HistoryReader22 { + return &HistoryReader22{ac: ac} } -func (hr *HistoryReader22) SetTrace(trace bool) { - hr.trace = trace -} +func (hr *HistoryReader22) SetTx(tx kv.Tx) { hr.tx = tx } +func (hr *HistoryReader22) SetTxNum(txNum uint64) { hr.txNum = txNum } +func (hr *HistoryReader22) SetTrace(trace bool) { hr.trace = trace } func (hr *HistoryReader22) ReadAccountData(address common.Address) (*accounts.Account, error) { - if hr.ri != nil { - if err := hr.ri.ReadAccountData(address.Bytes()); err != nil { - return nil, err + enc, ok, err := hr.ac.ReadAccountDataNoStateWithRecent(address.Bytes(), hr.txNum) + if err != nil { + return nil, err + } + if ok { + if len(enc) == 0 { + if hr.trace { + fmt.Printf("ReadAccountData [%x] => []\n", address) + } + return nil, nil } + var a accounts.Account + if err := accounts.Deserialise2(&a, enc); err != nil { + return nil, fmt.Errorf("ReadAccountData(%x): %w", address, err) + } + if hr.trace { + fmt.Printf("ReadAccountData [%x] => [nonce: %d, balance: %d, codeHash: %x]\n", address, a.Nonce, &a.Balance, a.CodeHash) + } + return &a, nil } - enc, err := hr.ac.ReadAccountDataBeforeTxNum(address.Bytes(), hr.txNum, nil /* roTx */) + enc, err = hr.tx.GetOne(kv.PlainState, address.Bytes()) if err != nil { return nil, err } @@ -67,34 +58,10 @@ func (hr *HistoryReader22) ReadAccountData(address common.Address) (*accounts.Ac return nil, nil } var a accounts.Account - a.Reset() - pos := 0 - nonceBytes := int(enc[pos]) - pos++ - if nonceBytes > 0 { - a.Nonce = bytesToUint64(enc[pos : pos+nonceBytes]) - pos += nonceBytes - } - balanceBytes := int(enc[pos]) - pos++ - if balanceBytes > 0 { - a.Balance.SetBytes(enc[pos : pos+balanceBytes]) - pos += balanceBytes - } - codeHashBytes := int(enc[pos]) - pos++ - if codeHashBytes > 0 { - copy(a.CodeHash[:], enc[pos:pos+codeHashBytes]) - pos += codeHashBytes - } - if pos >= len(enc) { - fmt.Printf("panic ReadAccountData(%x)=>[%x]\n", address, enc) - } - incBytes := int(enc[pos]) - pos++ - if incBytes > 0 { - a.Incarnation = bytesToUint64(enc[pos : pos+incBytes]) + if err := a.DecodeForStorage(enc); err != nil { + return nil, fmt.Errorf("ReadAccountData(%x): %w", address, err) } + if hr.trace { fmt.Printf("ReadAccountData [%x] => [nonce: %d, balance: %d, codeHash: %x]\n", address, a.Nonce, &a.Balance, a.CodeHash) } @@ -102,15 +69,17 @@ func (hr *HistoryReader22) ReadAccountData(address common.Address) (*accounts.Ac } func (hr *HistoryReader22) ReadAccountStorage(address common.Address, incarnation uint64, key *common.Hash) ([]byte, error) { - if hr.ri != nil { - if err := hr.ri.ReadAccountStorage(address.Bytes(), key.Bytes()); err != nil { - return nil, err - } - } - enc, err := hr.ac.ReadAccountStorageBeforeTxNum(address.Bytes(), key.Bytes(), hr.txNum, nil /* roTx */) + enc, ok, err := hr.ac.ReadAccountStorageNoStateWithRecent(address.Bytes(), key.Bytes(), hr.txNum) if err != nil { return nil, err } + if !ok { + k := dbutils.PlainGenerateCompositeStorageKey(address[:], incarnation, key.Bytes()) + enc, err = hr.tx.GetOne(kv.PlainState, k) + if err != nil { + return nil, err + } + } if hr.trace { if enc == nil { fmt.Printf("ReadAccountStorage [%x] [%x] => []\n", address, key.Bytes()) @@ -125,28 +94,37 @@ func (hr *HistoryReader22) ReadAccountStorage(address common.Address, incarnatio } func (hr *HistoryReader22) ReadAccountCode(address common.Address, incarnation uint64, codeHash common.Hash) ([]byte, error) { - if hr.ri != nil { - if err := hr.ri.ReadAccountCode(address.Bytes()); err != nil { - return nil, err - } + if codeHash == emptyCodeHashH { + return nil, nil } - enc, err := hr.ac.ReadAccountCodeBeforeTxNum(address.Bytes(), hr.txNum, nil /* roTx */) + enc, ok, err := hr.ac.ReadAccountCodeNoStateWithRecent(address.Bytes(), hr.txNum) if err != nil { return nil, err } + if !ok { + enc, err = hr.tx.GetOne(kv.Code, codeHash[:]) + if err != nil { + return nil, err + } + } if hr.trace { - fmt.Printf("ReadAccountCode [%x] => [%x]\n", address, enc) + fmt.Printf("ReadAccountCode [%x %x] => [%x]\n", address, codeHash, enc) } return enc, nil } func (hr *HistoryReader22) ReadAccountCodeSize(address common.Address, incarnation uint64, codeHash common.Hash) (int, error) { - if hr.ri != nil { - if err := hr.ri.ReadAccountCodeSize(address.Bytes()); err != nil { + size, ok, err := hr.ac.ReadAccountCodeSizeNoStateWithRecent(address.Bytes(), hr.txNum) + if err != nil { + return 0, err + } + if !ok { + enc, err := hr.tx.GetOne(kv.Code, codeHash[:]) + if err != nil { return 0, err } + size = len(enc) } - size, err := hr.ac.ReadAccountCodeSizeBeforeTxNum(address.Bytes(), hr.txNum, nil /* roTx */) if err != nil { return 0, err } diff --git a/core/state/history_reader_23.go b/core/state/history_reader_23.go new file mode 100644 index 00000000000..97318c223b4 --- /dev/null +++ b/core/state/history_reader_23.go @@ -0,0 +1,140 @@ +package state + +import ( + "fmt" + + "github.com/ledgerwatch/erigon-lib/kv" + libstate "github.com/ledgerwatch/erigon-lib/state" + "github.com/ledgerwatch/erigon/common" + "github.com/ledgerwatch/erigon/core/types/accounts" +) + +func bytesToUint64(buf []byte) (x uint64) { + for i, b := range buf { + x = x<<8 + uint64(b) + if i == 7 { + return + } + } + return +} + +// HistoryReader23 Implements StateReader and StateWriter +type HistoryReader23 struct { + ac *libstate.AggregatorContext + ri *libstate.ReadIndices + txNum uint64 + trace bool + tx kv.Tx +} + +func NewHistoryReader23(ac *libstate.AggregatorContext, ri *libstate.ReadIndices) *HistoryReader23 { + return &HistoryReader23{ac: ac, ri: ri} +} + +func (hr *HistoryReader23) SetTx(tx kv.Tx) { hr.tx = tx } + +func (hr *HistoryReader23) SetRwTx(tx kv.RwTx) { + hr.ri.SetTx(tx) +} + +func (hr *HistoryReader23) SetTxNum(txNum uint64) { + hr.txNum = txNum + if hr.ri != nil { + hr.ri.SetTxNum(txNum) + } +} + +func (hr *HistoryReader23) FinishTx() error { + return hr.ri.FinishTx() +} + +func (hr *HistoryReader23) SetTrace(trace bool) { + hr.trace = trace +} + +func (hr *HistoryReader23) ReadAccountData(address common.Address) (*accounts.Account, error) { + if hr.ri != nil { + if err := hr.ri.ReadAccountData(address.Bytes()); err != nil { + return nil, err + } + } + enc, err := hr.ac.ReadAccountDataBeforeTxNum(address.Bytes(), hr.txNum, hr.tx /* roTx */) + if err != nil { + return nil, err + } + if len(enc) == 0 { + if hr.trace { + fmt.Printf("ReadAccountData [%x] => []\n", address) + } + return nil, nil + } + var a accounts.Account + if err := accounts.Deserialise2(&a, enc); err != nil { + return nil, fmt.Errorf("ReadAccountData(%x): %w", address, err) + } + + if hr.trace { + fmt.Printf("ReadAccountData [%x] => [nonce: %d, balance: %d, codeHash: %x]\n", address, a.Nonce, &a.Balance, a.CodeHash) + } + return &a, nil +} + +func (hr *HistoryReader23) ReadAccountStorage(address common.Address, incarnation uint64, key *common.Hash) ([]byte, error) { + if hr.ri != nil { + if err := hr.ri.ReadAccountStorage(address.Bytes(), key.Bytes()); err != nil { + return nil, err + } + } + enc, err := hr.ac.ReadAccountStorageBeforeTxNum(address.Bytes(), key.Bytes(), hr.txNum, hr.tx /* roTx */) + if err != nil { + return nil, err + } + if hr.trace { + if enc == nil { + fmt.Printf("ReadAccountStorage [%x] [%x] => []\n", address, key.Bytes()) + } else { + fmt.Printf("ReadAccountStorage [%x] [%x] => [%x]\n", address, key.Bytes(), enc) + } + } + if enc == nil { + return nil, nil + } + return enc, nil +} + +func (hr *HistoryReader23) ReadAccountCode(address common.Address, incarnation uint64, codeHash common.Hash) ([]byte, error) { + if hr.ri != nil { + if err := hr.ri.ReadAccountCode(address.Bytes()); err != nil { + return nil, err + } + } + enc, err := hr.ac.ReadAccountCodeBeforeTxNum(address.Bytes(), hr.txNum, nil /* roTx */) + if err != nil { + return nil, err + } + if hr.trace { + fmt.Printf("ReadAccountCode [%x] => [%x]\n", address, enc) + } + return enc, nil +} + +func (hr *HistoryReader23) ReadAccountCodeSize(address common.Address, incarnation uint64, codeHash common.Hash) (int, error) { + if hr.ri != nil { + if err := hr.ri.ReadAccountCodeSize(address.Bytes()); err != nil { + return 0, err + } + } + size, err := hr.ac.ReadAccountCodeSizeBeforeTxNum(address.Bytes(), hr.txNum, nil /* roTx */) + if err != nil { + return 0, err + } + if hr.trace { + fmt.Printf("ReadAccountCodeSize [%x] => [%d]\n", address, size) + } + return size, nil +} + +func (hr *HistoryReader23) ReadAccountIncarnation(address common.Address) (uint64, error) { + return 0, nil +} diff --git a/core/state/history_reader_nostate.go b/core/state/history_reader_nostate.go index c077349293e..de8e81e3df2 100644 --- a/core/state/history_reader_nostate.go +++ b/core/state/history_reader_nostate.go @@ -3,6 +3,7 @@ package state import ( "encoding/binary" "fmt" + "math" "github.com/ledgerwatch/erigon-lib/kv" libstate "github.com/ledgerwatch/erigon-lib/state" @@ -46,11 +47,25 @@ func (hr *HistoryReaderNoState) SetTrace(trace bool) { } func (hr *HistoryReaderNoState) ReadAccountData(address common.Address) (*accounts.Account, error) { - enc, noState, stateTxNum, err := hr.ac.ReadAccountDataNoState(address.Bytes(), hr.txNum) + txKey, err := hr.tx.GetOne(kv.XAccount, address.Bytes()) if err != nil { return nil, err } + var stateTxNum uint64 = math.MaxUint64 + if txKey != nil { + stateTxNum = binary.BigEndian.Uint64(txKey) + } + var enc []byte + noState := false + if stateTxNum >= hr.txNum { + if enc, noState, err = hr.ac.ReadAccountDataNoState(address.Bytes(), hr.txNum); err != nil { + return nil, err + } + } if !noState { + if txKey == nil { + return nil, nil + } if !hr.rs.Done(stateTxNum) { hr.readError = true hr.stateTxNum = stateTxNum @@ -58,6 +73,9 @@ func (hr *HistoryReaderNoState) ReadAccountData(address common.Address) (*accoun } enc = hr.rs.Get(kv.PlainStateR, address.Bytes(), nil, stateTxNum) if enc == nil { + if hr.tx == nil { + return nil, fmt.Errorf("hr.tx is nil") + } if cap(hr.composite) < 8+20 { hr.composite = make([]byte, 8+20) } else if len(hr.composite) != 8+20 { @@ -84,7 +102,7 @@ func (hr *HistoryReaderNoState) ReadAccountData(address common.Address) (*accoun } if len(enc) == 0 { if hr.trace { - fmt.Printf("ReadAccountData [%x] => [], noState=%t, stateTxNum=%d, txNum: %d\n", address, noState, stateTxNum, hr.txNum) + fmt.Printf("ReadAccountData [%x] => [], noState=%t, txNum: %d\n", address, noState, hr.txNum) } return nil, nil } @@ -115,17 +133,38 @@ func (hr *HistoryReaderNoState) ReadAccountData(address common.Address) (*accoun a.Incarnation = bytesToUint64(enc[pos : pos+incBytes]) } if hr.trace { - fmt.Printf("ReadAccountData [%x] => [nonce: %d, balance: %d, codeHash: %x], noState=%t, stateTxNum=%d, txNum: %d\n", address, a.Nonce, &a.Balance, a.CodeHash, noState, stateTxNum, hr.txNum) + fmt.Printf("ReadAccountData [%x] => [nonce: %d, balance: %d, codeHash: %x], noState=%t, txNum: %d\n", address, a.Nonce, &a.Balance, a.CodeHash, noState, hr.txNum) } return &a, nil } func (hr *HistoryReaderNoState) ReadAccountStorage(address common.Address, incarnation uint64, key *common.Hash) ([]byte, error) { - enc, noState, stateTxNum, err := hr.ac.ReadAccountStorageNoState(address.Bytes(), key.Bytes(), hr.txNum) + if cap(hr.composite) < 20+32 { + hr.composite = make([]byte, 20+32) + } else { + hr.composite = hr.composite[:20+32] + } + copy(hr.composite, address.Bytes()) + copy(hr.composite[20:], key.Bytes()) + txKey, err := hr.tx.GetOne(kv.XStorage, hr.composite) if err != nil { return nil, err } + var stateTxNum uint64 = math.MaxUint64 + if txKey != nil { + stateTxNum = binary.BigEndian.Uint64(txKey) + } + var enc []byte + noState := false + if stateTxNum >= hr.txNum { + if enc, noState, err = hr.ac.ReadAccountStorageNoState(address.Bytes(), key.Bytes(), hr.txNum); err != nil { + return nil, err + } + } if !noState { + if txKey == nil { + return nil, nil + } if !hr.rs.Done(stateTxNum) { hr.readError = true hr.stateTxNum = stateTxNum @@ -134,6 +173,9 @@ func (hr *HistoryReaderNoState) ReadAccountStorage(address common.Address, incar enc = hr.rs.Get(kv.PlainStateR, address.Bytes(), key.Bytes(), stateTxNum) if enc == nil { + if hr.tx == nil { + return nil, fmt.Errorf("hr.tx is nil") + } if cap(hr.composite) < 8+20+8+32 { hr.composite = make([]byte, 8+20+8+32) } else if len(hr.composite) != 8+20+8+32 { @@ -163,11 +205,25 @@ func (hr *HistoryReaderNoState) ReadAccountStorage(address common.Address, incar } func (hr *HistoryReaderNoState) ReadAccountCode(address common.Address, incarnation uint64, codeHash common.Hash) ([]byte, error) { - enc, noState, stateTxNum, err := hr.ac.ReadAccountCodeNoState(address.Bytes(), hr.txNum) + txKey, err := hr.tx.GetOne(kv.XCode, address.Bytes()) if err != nil { return nil, err } + var stateTxNum uint64 = math.MaxUint64 + if txKey != nil { + stateTxNum = binary.BigEndian.Uint64(txKey) + } + var enc []byte + noState := false + if stateTxNum >= hr.txNum { + if enc, noState, err = hr.ac.ReadAccountCodeNoState(address.Bytes(), hr.txNum); err != nil { + return nil, err + } + } if !noState { + if txKey == nil { + return nil, nil + } if !hr.rs.Done(stateTxNum) { hr.readError = true hr.stateTxNum = stateTxNum @@ -175,6 +231,10 @@ func (hr *HistoryReaderNoState) ReadAccountCode(address common.Address, incarnat } enc = hr.rs.Get(kv.CodeR, codeHash.Bytes(), nil, stateTxNum) if enc == nil { + if hr.tx == nil { + fmt.Printf("ReadAccountCode [%x] %d\n", address, incarnation) + return nil, fmt.Errorf("hr.tx is nil") + } if cap(hr.composite) < 8+32 { hr.composite = make([]byte, 8+32) } else if len(hr.composite) != 8+32 { @@ -189,17 +249,31 @@ func (hr *HistoryReaderNoState) ReadAccountCode(address common.Address, incarnat } } if hr.trace { - fmt.Printf("ReadAccountCode [%x] => [%x], noState=%t, stateTxNum=%d, txNum: %d\n", address, enc, noState, stateTxNum, hr.txNum) + fmt.Printf("ReadAccountCode [%x] => [%x], noState=%t, txNum: %d\n", address, enc, noState, hr.txNum) } return enc, nil } func (hr *HistoryReaderNoState) ReadAccountCodeSize(address common.Address, incarnation uint64, codeHash common.Hash) (int, error) { - size, noState, stateTxNum, err := hr.ac.ReadAccountCodeSizeNoState(address.Bytes(), hr.txNum) + txKey, err := hr.tx.GetOne(kv.XCode, address.Bytes()) if err != nil { return 0, err } + var stateTxNum uint64 = math.MaxUint64 + if txKey != nil { + stateTxNum = binary.BigEndian.Uint64(txKey) + } + var size int + noState := false + if stateTxNum >= hr.txNum { + if size, noState, err = hr.ac.ReadAccountCodeSizeNoState(address.Bytes(), hr.txNum); err != nil { + return 0, err + } + } if !noState { + if txKey == nil { + return 0, nil + } if !hr.rs.Done(stateTxNum) { hr.readError = true hr.stateTxNum = stateTxNum diff --git a/core/state/intra_block_state.go b/core/state/intra_block_state.go index ba4859c9103..b2944b2eb1e 100644 --- a/core/state/intra_block_state.go +++ b/core/state/intra_block_state.go @@ -172,7 +172,7 @@ func (sdb *IntraBlockState) AddRefund(gas uint64) { func (sdb *IntraBlockState) SubRefund(gas uint64) { sdb.journal.append(refundChange{prev: sdb.refund}) if gas > sdb.refund { - sdb.setErrorUnsafe(fmt.Errorf("Refund counter below zero")) + sdb.setErrorUnsafe(fmt.Errorf("refund counter below zero")) } sdb.refund -= gas } @@ -583,8 +583,8 @@ func (sdb *IntraBlockState) createObject(addr common.Address, previous *stateObj // CreateAccount is called during the EVM CREATE operation. The situation might arise that // a contract does the following: // -// 1. sends funds to sha(account ++ (nonce + 1)) -// 2. tx_create(sha(account ++ nonce)) (note that this gets the address of 1) +// 1. sends funds to sha(account ++ (nonce + 1)) +// 2. tx_create(sha(account ++ nonce)) (note that this gets the address of 1) // // Carrying over the balance ensures that Ether doesn't disappear. func (sdb *IntraBlockState) CreateAccount(addr common.Address, contractCreation bool) { @@ -659,8 +659,8 @@ func (sdb *IntraBlockState) GetRefund() uint64 { return sdb.refund } -func updateAccount(EIP161Enabled bool, stateWriter StateWriter, addr common.Address, stateObject *stateObject, isDirty bool) error { - emptyRemoval := EIP161Enabled && stateObject.empty() +func updateAccount(EIP161Enabled bool, isAura bool, stateWriter StateWriter, addr common.Address, stateObject *stateObject, isDirty bool) error { + emptyRemoval := EIP161Enabled && stateObject.empty() && (!isAura || addr != SystemAddress) if stateObject.suicided || (isDirty && emptyRemoval) { if err := stateWriter.DeleteAccount(addr, &stateObject.original); err != nil { return err @@ -732,7 +732,7 @@ func (sdb *IntraBlockState) FinalizeTx(chainRules *params.Rules, stateWriter Sta continue } - if err := updateAccount(chainRules.IsSpuriousDragon, stateWriter, addr, so, true); err != nil { + if err := updateAccount(chainRules.IsSpuriousDragon, chainRules.IsAura, stateWriter, addr, so, true); err != nil { return err } @@ -788,7 +788,7 @@ func (sdb *IntraBlockState) MakeWriteSet(chainRules *params.Rules, stateWriter S } for addr, stateObject := range sdb.stateObjects { _, isDirty := sdb.stateObjectsDirty[addr] - if err := updateAccount(chainRules.IsSpuriousDragon, stateWriter, addr, stateObject, isDirty); err != nil { + if err := updateAccount(chainRules.IsSpuriousDragon, chainRules.IsAura, stateWriter, addr, stateObject, isDirty); err != nil { return err } } diff --git a/core/state/plain_readonly.go b/core/state/plain_readonly.go index ac9dea345e3..1686976a4c2 100644 --- a/core/state/plain_readonly.go +++ b/core/state/plain_readonly.go @@ -202,6 +202,9 @@ func (s *PlainState) ReadAccountCode(address common.Address, incarnation uint64, return nil, nil } code, err := s.tx.GetOne(kv.Code, codeHash[:]) + if s.trace { + fmt.Printf("ReadAccountCode [%x %x] => [%x]\n", address, codeHash, code) + } if len(code) == 0 { return nil, nil } diff --git a/core/state/rw22.go b/core/state/rw22.go index 1f75b73d48d..39cc6411de3 100644 --- a/core/state/rw22.go +++ b/core/state/rw22.go @@ -3,16 +3,17 @@ package state import ( "bytes" "container/heap" + "context" "encoding/binary" "fmt" - "math/bits" - "sort" "sync" "unsafe" "github.com/google/btree" "github.com/holiman/uint256" - libcommon "github.com/ledgerwatch/erigon-lib/common" + common2 "github.com/ledgerwatch/erigon-lib/common" + "github.com/ledgerwatch/erigon-lib/common/length" + "github.com/ledgerwatch/erigon-lib/etl" "github.com/ledgerwatch/erigon-lib/kv" libstate "github.com/ledgerwatch/erigon-lib/state" "github.com/ledgerwatch/erigon/common" @@ -20,6 +21,7 @@ import ( "github.com/ledgerwatch/erigon/core/types" "github.com/ledgerwatch/erigon/core/types/accounts" "github.com/ledgerwatch/erigon/params" + "github.com/ledgerwatch/erigon/turbo/shards" ) // ReadWriteSet contains ReadSet, WriteSet and BalanceIncrease of a transaction, @@ -29,25 +31,28 @@ type TxTask struct { TxNum uint64 BlockNum uint64 Rules *params.Rules - Header *types.Header Block *types.Block BlockHash common.Hash Sender *common.Address TxIndex int // -1 for block initialisation Final bool Tx types.Transaction + TxAsMessage types.Message BalanceIncreaseSet map[common.Address]uint256.Int ReadLists map[string]*KvList WriteLists map[string]*KvList AccountPrevs map[string][]byte AccountDels map[string]*accounts.Account StoragePrevs map[string][]byte - CodePrevs map[string][]byte + CodePrevs map[string]uint64 ResultsSize int64 Error error + Logs []*types.Log + TraceFroms map[common.Address]struct{} + TraceTos map[common.Address]struct{} } -type TxTaskQueue []TxTask +type TxTaskQueue []*TxTask func (h TxTaskQueue) Len() int { return len(h) @@ -62,7 +67,7 @@ func (h TxTaskQueue) Swap(i, j int) { } func (h *TxTaskQueue) Push(a interface{}) { - *h = append(*h, a.(TxTask)) + *h = append(*h, a.(*TxTask)) } func (h *TxTaskQueue) Pop() interface{} { @@ -76,7 +81,7 @@ const CodeSizeTable = "CodeSize" type State22 struct { lock sync.RWMutex receiveWork *sync.Cond - triggers map[uint64]TxTask + triggers map[uint64]*TxTask senderTxNums map[common.Address]uint64 triggerLock sync.RWMutex queue TxTaskQueue @@ -98,7 +103,7 @@ func stateItemLess(i, j StateItem) bool { func NewState22() *State22 { rs := &State22{ - triggers: map[uint64]TxTask{}, + triggers: map[uint64]*TxTask{}, senderTxNums: map[common.Address]uint64{}, changes: map[string]*btree.BTreeG[StateItem]{}, } @@ -162,19 +167,19 @@ func (rs *State22) Flush(rwTx kv.RwTx) error { return nil } -func (rs *State22) Schedule() (TxTask, bool) { +func (rs *State22) Schedule() (*TxTask, bool) { rs.queueLock.Lock() defer rs.queueLock.Unlock() for !rs.finished && rs.queue.Len() == 0 { rs.receiveWork.Wait() } if rs.queue.Len() > 0 { - return heap.Pop(&rs.queue).(TxTask), true + return heap.Pop(&rs.queue).(*TxTask), true } - return TxTask{}, false + return nil, false } -func (rs *State22) RegisterSender(txTask TxTask) bool { +func (rs *State22) RegisterSender(txTask *TxTask) bool { rs.triggerLock.Lock() defer rs.triggerLock.Unlock() lastTxNum, deferral := rs.senderTxNums[*txTask.Sender] @@ -211,11 +216,14 @@ func (rs *State22) CommitTxNum(sender *common.Address, txNum uint64) uint64 { return count } -func (rs *State22) AddWork(txTask TxTask) { +func (rs *State22) AddWork(txTask *TxTask) { txTask.BalanceIncreaseSet = nil txTask.ReadLists = nil txTask.WriteLists = nil txTask.ResultsSize = 0 + txTask.Logs = nil + txTask.TraceFroms = nil + txTask.TraceTos = nil rs.queueLock.Lock() defer rs.queueLock.Unlock() heap.Push(&rs.queue, txTask) @@ -229,77 +237,13 @@ func (rs *State22) Finish() { rs.receiveWork.Broadcast() } -func serialise2(a *accounts.Account) []byte { - var l int - l++ - if a.Nonce > 0 { - l += (bits.Len64(a.Nonce) + 7) / 8 - } - l++ - if !a.Balance.IsZero() { - l += a.Balance.ByteLen() - } - l++ - if !a.IsEmptyCodeHash() { - l += 32 - } - l++ - if a.Incarnation > 0 { - l += (bits.Len64(a.Incarnation) + 7) / 8 - } - value := make([]byte, l) - pos := 0 - if a.Nonce == 0 { - value[pos] = 0 - pos++ - } else { - nonceBytes := (bits.Len64(a.Nonce) + 7) / 8 - value[pos] = byte(nonceBytes) - var nonce = a.Nonce - for i := nonceBytes; i > 0; i-- { - value[pos+i] = byte(nonce) - nonce >>= 8 - } - pos += nonceBytes + 1 - } - if a.Balance.IsZero() { - value[pos] = 0 - pos++ - } else { - balanceBytes := a.Balance.ByteLen() - value[pos] = byte(balanceBytes) - pos++ - a.Balance.WriteToSlice(value[pos : pos+balanceBytes]) - pos += balanceBytes - } - if a.IsEmptyCodeHash() { - value[pos] = 0 - pos++ - } else { - value[pos] = 32 - pos++ - copy(value[pos:pos+32], a.CodeHash[:]) - pos += 32 - } - if a.Incarnation == 0 { - value[pos] = 0 - } else { - incBytes := (bits.Len64(a.Incarnation) + 7) / 8 - value[pos] = byte(incBytes) - var inc = a.Incarnation - for i := incBytes; i > 0; i-- { - value[pos+i] = byte(inc) - inc >>= 8 - } - } - return value -} - -func (rs *State22) Apply(emptyRemoval bool, roTx kv.Tx, txTask TxTask, agg *libstate.Aggregator22) error { +func (rs *State22) Apply(roTx kv.Tx, txTask *TxTask, agg *libstate.Aggregator22) error { + emptyRemoval := txTask.Rules.IsSpuriousDragon rs.lock.Lock() defer rs.lock.Unlock() agg.SetTxNum(txTask.TxNum) - for addr, increase := range txTask.BalanceIncreaseSet { + for addr := range txTask.BalanceIncreaseSet { + increase := txTask.BalanceIncreaseSet[addr] enc0 := rs.get(kv.PlainState, addr.Bytes()) if enc0 == nil { var err error @@ -314,7 +258,7 @@ func (rs *State22) Apply(emptyRemoval bool, roTx kv.Tx, txTask TxTask, agg *libs } if len(enc0) > 0 { // Need to convert before balance increase - enc0 = serialise2(&a) + enc0 = accounts.Serialise2(&a) } a.Balance.Add(&a.Balance, &increase) var enc1 []byte @@ -335,7 +279,7 @@ func (rs *State22) Apply(emptyRemoval bool, roTx kv.Tx, txTask TxTask, agg *libs addr1 := make([]byte, len(addr)+8) copy(addr1, addr) binary.BigEndian.PutUint64(addr1[len(addr):], original.Incarnation) - prev := serialise2(original) + prev := accounts.Serialise2(original) if err := agg.AddAccountPrev(addr, prev); err != nil { return err } @@ -364,30 +308,31 @@ func (rs *State22) Apply(emptyRemoval bool, roTx kv.Tx, txTask TxTask, agg *libs if !bytes.HasPrefix(k, addr1) { k = nil } - rs.changes[kv.PlainState].AscendGreaterOrEqual(StateItem{key: addr1}, func(item StateItem) bool { - if !bytes.HasPrefix(item.key, addr1) { - return false - } - for ; e == nil && k != nil && bytes.Compare(k, item.key) <= 0; k, v, e = cursor.Next() { - if !bytes.HasPrefix(k, addr1) { - k = nil + psChanges := rs.changes[kv.PlainState] + if psChanges != nil { + psChanges.AscendGreaterOrEqual(StateItem{key: addr1}, func(item StateItem) bool { + if !bytes.HasPrefix(item.key, addr1) { + return false } - if !bytes.Equal(k, item.key) { - if e = agg.AddStoragePrev(addr, libcommon.Copy(k[28:]), libcommon.Copy(v)); e != nil { - return false + for ; e == nil && k != nil && bytes.HasPrefix(k, addr1) && bytes.Compare(k, item.key) <= 0; k, v, e = cursor.Next() { + if !bytes.Equal(k, item.key) { + // Skip the cursor item when the key is equal, i.e. prefer the item from the changes tree + if e = agg.AddStoragePrev(addr, k[28:], v); e != nil { + return false + } } } - } - if e != nil { - return false - } - if e = agg.AddStoragePrev(addr, item.key[28:], item.val); e != nil { - return false - } - return true - }) + if e != nil { + return false + } + if e = agg.AddStoragePrev(addr, item.key[28:], item.val); e != nil { + return false + } + return true + }) + } for ; e == nil && k != nil && bytes.HasPrefix(k, addr1); k, v, e = cursor.Next() { - if e = agg.AddStoragePrev(addr, libcommon.Copy(k[28:]), libcommon.Copy(v)); e != nil { + if e = agg.AddStoragePrev(addr, k[28:], v); e != nil { return e } } @@ -406,22 +351,161 @@ func (rs *State22) Apply(emptyRemoval bool, roTx kv.Tx, txTask TxTask, agg *libs return err } } - for addrS, val := range txTask.CodePrevs { - if err := agg.AddCodePrev([]byte(addrS), val); err != nil { + for addrS, incarnation := range txTask.CodePrevs { + addr := []byte(addrS) + k := dbutils.PlainGenerateStoragePrefix(addr, incarnation) + codeHash := rs.get(kv.PlainContractCode, k) + if codeHash == nil { + var err error + codeHash, err = roTx.GetOne(kv.PlainContractCode, k) + if err != nil { + return err + } + } + var codePrev []byte + if codeHash != nil { + codePrev = rs.get(kv.Code, codeHash) + if codePrev == nil { + var err error + codePrev, err = roTx.GetOne(kv.Code, codeHash) + if err != nil { + return err + } + } + } + if err := agg.AddCodePrev(addr, codePrev); err != nil { return err } } + if txTask.TraceFroms != nil { + for addr := range txTask.TraceFroms { + if err := agg.AddTraceFrom(addr.Bytes()); err != nil { + return err + } + } + } + if txTask.TraceTos != nil { + for addr := range txTask.TraceTos { + if err := agg.AddTraceTo(addr.Bytes()); err != nil { + return err + } + } + } + for _, log := range txTask.Logs { + if err := agg.AddLogAddr(log.Address[:]); err != nil { + return fmt.Errorf("adding event log for addr %x: %w", log.Address, err) + } + for _, topic := range log.Topics { + if err := agg.AddLogTopic(topic[:]); err != nil { + return fmt.Errorf("adding event log for topic %x: %w", topic, err) + } + } + } if err := agg.FinishTx(); err != nil { return err } - if txTask.WriteLists == nil { - return nil + if txTask.WriteLists != nil { + for table, list := range txTask.WriteLists { + for i, key := range list.Keys { + val := list.Vals[i] + rs.put(table, key, val) + } + } } - for table, list := range txTask.WriteLists { - for i, key := range list.Keys { - val := list.Vals[i] - rs.put(table, key, val) + return nil +} + +func recoverCodeHashPlain(acc *accounts.Account, db kv.Tx, key []byte) { + var address common.Address + copy(address[:], key) + if acc.Incarnation > 0 && acc.IsEmptyCodeHash() { + if codeHash, err2 := db.GetOne(kv.PlainContractCode, dbutils.PlainGenerateStoragePrefix(address[:], acc.Incarnation)); err2 == nil { + copy(acc.CodeHash[:], codeHash) + } + } +} + +func (rs *State22) Unwind(ctx context.Context, tx kv.RwTx, txUnwindTo uint64, agg *libstate.Aggregator22, accumulator *shards.Accumulator) error { + agg.SetTx(tx) + var currentInc uint64 + if err := agg.Unwind(ctx, txUnwindTo, func(k, v []byte, table etl.CurrentTableReader, next etl.LoadNextFunc) error { + if len(k) == length.Addr { + if len(v) > 0 { + var acc accounts.Account + if err := accounts.Deserialise2(&acc, v); err != nil { + return fmt.Errorf("%w, %x", err, v) + } + currentInc = acc.Incarnation + // Fetch the code hash + recoverCodeHashPlain(&acc, tx, k) + var address common.Address + copy(address[:], k) + + // cleanup contract code bucket + original, err := NewPlainStateReader(tx).ReadAccountData(address) + if err != nil { + return fmt.Errorf("read account for %x: %w", address, err) + } + if original != nil { + // clean up all the code incarnations original incarnation and the new one + for incarnation := original.Incarnation; incarnation > acc.Incarnation && incarnation > 0; incarnation-- { + err = tx.Delete(kv.PlainContractCode, dbutils.PlainGenerateStoragePrefix(address[:], incarnation)) + if err != nil { + return fmt.Errorf("writeAccountPlain for %x: %w", address, err) + } + } + } + + newV := make([]byte, acc.EncodingLengthForStorage()) + acc.EncodeForStorage(newV) + if accumulator != nil { + accumulator.ChangeAccount(address, acc.Incarnation, newV) + } + if err := next(k, k, newV); err != nil { + return err + } + } else { + var address common.Address + copy(address[:], k) + original, err := NewPlainStateReader(tx).ReadAccountData(address) + if err != nil { + return err + } + if original != nil { + currentInc = original.Incarnation + } else { + currentInc = 1 + } + + if accumulator != nil { + accumulator.DeleteAccount(address) + } + if err := next(k, k, nil); err != nil { + return err + } + } + return nil + } + if accumulator != nil { + var address common.Address + var location common.Hash + copy(address[:], k[:length.Addr]) + copy(location[:], k[length.Addr:]) + accumulator.ChangeStorage(address, currentInc, location, common2.Copy(v)) + } + newKeys := dbutils.PlainGenerateCompositeStorageKey(k[:20], currentInc, k[20:]) + if len(v) > 0 { + if err := next(k, newKeys, v); err != nil { + return err + } + } else { + if err := next(k, newKeys, nil); err != nil { + return err + } } + return nil + }); err != nil { + return err } return nil } @@ -465,8 +549,6 @@ func (rs *State22) ReadsValid(readLists map[string]*KvList) bool { } else if !bytes.Equal(val, item.val) { return false } - } else { - //fmt.Printf("key [%x] => [%x] not present in changes\n", key, val) } } } @@ -498,7 +580,7 @@ type StateWriter22 struct { accountPrevs map[string][]byte accountDels map[string]*accounts.Account storagePrevs map[string][]byte - codePrevs map[string][]byte + codePrevs map[string]uint64 } func NewStateWriter22(rs *State22) *StateWriter22 { @@ -513,7 +595,7 @@ func NewStateWriter22(rs *State22) *StateWriter22 { accountPrevs: map[string][]byte{}, accountDels: map[string]*accounts.Account{}, storagePrevs: map[string][]byte{}, - codePrevs: map[string][]byte{}, + codePrevs: map[string]uint64{}, } } @@ -531,17 +613,14 @@ func (w *StateWriter22) ResetWriteSet() { w.accountPrevs = map[string][]byte{} w.accountDels = map[string]*accounts.Account{} w.storagePrevs = map[string][]byte{} - w.codePrevs = map[string][]byte{} + w.codePrevs = map[string]uint64{} } func (w *StateWriter22) WriteSet() map[string]*KvList { - for _, list := range w.writeLists { - sort.Sort(list) - } return w.writeLists } -func (w *StateWriter22) PrevAndDels() (map[string][]byte, map[string]*accounts.Account, map[string][]byte, map[string][]byte) { +func (w *StateWriter22) PrevAndDels() (map[string][]byte, map[string]*accounts.Account, map[string][]byte, map[string]uint64) { return w.accountPrevs, w.accountDels, w.storagePrevs, w.codePrevs } @@ -553,7 +632,7 @@ func (w *StateWriter22) UpdateAccountData(address common.Address, original, acco w.writeLists[kv.PlainState].Vals = append(w.writeLists[kv.PlainState].Vals, value) var prev []byte if original.Initialised { - prev = serialise2(original) + prev = accounts.Serialise2(original) } w.accountPrevs[string(address.Bytes())] = prev return nil @@ -567,7 +646,7 @@ func (w *StateWriter22) UpdateAccountCode(address common.Address, incarnation ui w.writeLists[kv.PlainContractCode].Keys = append(w.writeLists[kv.PlainContractCode].Keys, dbutils.PlainGenerateStoragePrefix(address[:], incarnation)) w.writeLists[kv.PlainContractCode].Vals = append(w.writeLists[kv.PlainContractCode].Vals, codeHash.Bytes()) } - w.codePrevs[string(address.Bytes())] = nil + w.codePrevs[string(address.Bytes())] = incarnation return nil } @@ -603,14 +682,12 @@ func (w *StateWriter22) CreateContract(address common.Address) error { } type StateReader22 struct { - tx kv.Tx - txNum uint64 - trace bool - rs *State22 - readError bool - stateTxNum uint64 - composite []byte - readLists map[string]*KvList + tx kv.Tx + txNum uint64 + trace bool + rs *State22 + composite []byte + readLists map[string]*KvList } func NewStateReader22(rs *State22) *StateReader22 { @@ -643,9 +720,6 @@ func (r *StateReader22) ResetReadSet() { } func (r *StateReader22) ReadSet() map[string]*KvList { - for _, list := range r.readLists { - sort.Sort(list) - } return r.readLists } @@ -663,7 +737,7 @@ func (r *StateReader22) ReadAccountData(address common.Address) (*accounts.Accou } } r.readLists[kv.PlainState].Keys = append(r.readLists[kv.PlainState].Keys, address.Bytes()) - r.readLists[kv.PlainState].Vals = append(r.readLists[kv.PlainState].Vals, common.CopyBytes(enc)) + r.readLists[kv.PlainState].Vals = append(r.readLists[kv.PlainState].Vals, common2.Copy(enc)) if len(enc) == 0 { return nil, nil } @@ -695,8 +769,8 @@ func (r *StateReader22) ReadAccountStorage(address common.Address, incarnation u return nil, err } } - r.readLists[kv.PlainState].Keys = append(r.readLists[kv.PlainState].Keys, common.CopyBytes(r.composite)) - r.readLists[kv.PlainState].Vals = append(r.readLists[kv.PlainState].Vals, common.CopyBytes(enc)) + r.readLists[kv.PlainState].Keys = append(r.readLists[kv.PlainState].Keys, common2.Copy(r.composite)) + r.readLists[kv.PlainState].Vals = append(r.readLists[kv.PlainState].Vals, common2.Copy(enc)) if r.trace { if enc == nil { fmt.Printf("ReadAccountStorage [%x] [%x] => [], txNum: %d\n", address, key.Bytes(), r.txNum) @@ -720,7 +794,7 @@ func (r *StateReader22) ReadAccountCode(address common.Address, incarnation uint } } r.readLists[kv.Code].Keys = append(r.readLists[kv.Code].Keys, address.Bytes()) - r.readLists[kv.Code].Vals = append(r.readLists[kv.Code].Vals, common.CopyBytes(enc)) + r.readLists[kv.Code].Vals = append(r.readLists[kv.Code].Vals, common2.Copy(enc)) if r.trace { fmt.Printf("ReadAccountCode [%x] => [%x], txNum: %d\n", address, enc, r.txNum) } @@ -757,7 +831,7 @@ func (r *StateReader22) ReadAccountIncarnation(address common.Address) (uint64, } } r.readLists[kv.IncarnationMap].Keys = append(r.readLists[kv.IncarnationMap].Keys, address.Bytes()) - r.readLists[kv.IncarnationMap].Vals = append(r.readLists[kv.IncarnationMap].Vals, common.CopyBytes(enc)) + r.readLists[kv.IncarnationMap].Vals = append(r.readLists[kv.IncarnationMap].Vals, common2.Copy(enc)) if len(enc) == 0 { return 0, nil } diff --git a/core/state/state_recon_writer.go b/core/state/state_recon_writer.go index 4363c25c14c..80facf93876 100644 --- a/core/state/state_recon_writer.go +++ b/core/state/state_recon_writer.go @@ -43,18 +43,18 @@ func (i ReconStateItem) Less(than btree.Item) bool { type ReconState struct { lock sync.RWMutex doneBitmap roaring64.Bitmap - triggers map[uint64][]TxTask - workCh chan TxTask + triggers map[uint64][]*TxTask + workCh chan *TxTask queue TxTaskQueue changes map[string]*btree.BTree // table => [] (txNum; key1; key2; val) sizeEstimate uint64 rollbackCount uint64 } -func NewReconState(workCh chan TxTask) *ReconState { +func NewReconState(workCh chan *TxTask) *ReconState { rs := &ReconState{ workCh: workCh, - triggers: map[uint64][]TxTask{}, + triggers: map[uint64][]*TxTask{}, changes: map[string]*btree.BTree{}, } return rs @@ -102,7 +102,7 @@ func (rs *ReconState) Flush(rwTx kv.RwTx) error { composite = make([]byte, 8+len(item.key1)) } else { composite = make([]byte, 8+len(item.key1)+8+len(item.key2)) - binary.BigEndian.PutUint64(composite[8+len(item.key1):], 1) + binary.BigEndian.PutUint64(composite[8+len(item.key1):], FirstContractIncarnation) copy(composite[8+len(item.key1)+8:], item.key2) } binary.BigEndian.PutUint64(composite, item.txNum) @@ -121,7 +121,7 @@ func (rs *ReconState) Flush(rwTx kv.RwTx) error { return nil } -func (rs *ReconState) Schedule() (TxTask, bool) { +func (rs *ReconState) Schedule() (*TxTask, bool) { rs.lock.Lock() defer rs.lock.Unlock() for rs.queue.Len() < 16 { @@ -133,9 +133,9 @@ func (rs *ReconState) Schedule() (TxTask, bool) { heap.Push(&rs.queue, txTask) } if rs.queue.Len() > 0 { - return heap.Pop(&rs.queue).(TxTask), true + return heap.Pop(&rs.queue).(*TxTask), true } - return TxTask{}, false + return nil, false } func (rs *ReconState) CommitTxNum(txNum uint64) { @@ -150,7 +150,7 @@ func (rs *ReconState) CommitTxNum(txNum uint64) { rs.doneBitmap.Add(txNum) } -func (rs *ReconState) RollbackTx(txTask TxTask, dependency uint64) { +func (rs *ReconState) RollbackTx(txTask *TxTask, dependency uint64) { rs.lock.Lock() defer rs.lock.Unlock() if rs.doneBitmap.Contains(dependency) { @@ -188,9 +188,11 @@ func (rs *ReconState) SizeEstimate() uint64 { } type StateReconWriter struct { - ac *libstate.Aggregator22Context - rs *ReconState - txNum uint64 + ac *libstate.Aggregator22Context + rs *ReconState + txNum uint64 + tx kv.Tx + composite []byte } func NewStateReconWriter(ac *libstate.Aggregator22Context, rs *ReconState) *StateReconWriter { @@ -204,16 +206,25 @@ func (w *StateReconWriter) SetTxNum(txNum uint64) { w.txNum = txNum } +func (w *StateReconWriter) SetTx(tx kv.Tx) { + w.tx = tx +} + func (w *StateReconWriter) UpdateAccountData(address common.Address, original, account *accounts.Account) error { - found, txNum := w.ac.MaxAccountsTxNum(address.Bytes()) - if !found { + txKey, err := w.tx.GetOne(kv.XAccount, address.Bytes()) + if err != nil { + return err + } + if txKey == nil { return nil } - if txNum != w.txNum { - //fmt.Printf("no change account [%x] txNum = %d\n", address, txNum) + if stateTxNum := binary.BigEndian.Uint64(txKey); stateTxNum != w.txNum { return nil } value := make([]byte, account.EncodingLengthForStorage()) + if account.Incarnation > 0 { + account.Incarnation = FirstContractIncarnation + } account.EncodeForStorage(value) //fmt.Printf("account [%x]=>{Balance: %d, Nonce: %d, Root: %x, CodeHash: %x} txNum: %d\n", address, &account.Balance, account.Nonce, account.Root, account.CodeHash, w.txNum) w.rs.Put(kv.PlainStateR, address[:], nil, value, w.txNum) @@ -221,17 +232,19 @@ func (w *StateReconWriter) UpdateAccountData(address common.Address, original, a } func (w *StateReconWriter) UpdateAccountCode(address common.Address, incarnation uint64, codeHash common.Hash, code []byte) error { - found, txNum := w.ac.MaxCodeTxNum(address.Bytes()) - if !found { + txKey, err := w.tx.GetOne(kv.XCode, address.Bytes()) + if err != nil { + return err + } + if txKey == nil { return nil } - if txNum != w.txNum { - //fmt.Printf("no change code [%x] txNum = %d\n", address, txNum) + if stateTxNum := binary.BigEndian.Uint64(txKey); stateTxNum != w.txNum { return nil } w.rs.Put(kv.CodeR, codeHash[:], nil, code, w.txNum) if len(code) > 0 { - //fmt.Printf("code [%x] => [%x] CodeHash: %x, txNum: %d\n", address, code, codeHash, w.txNum) + //fmt.Printf("code [%x] => %d CodeHash: %x, txNum: %d\n", address, len(code), codeHash, w.txNum) w.rs.Put(kv.PlainContractR, dbutils.PlainGenerateStoragePrefix(address[:], FirstContractIncarnation), nil, codeHash[:], w.txNum) } return nil @@ -242,17 +255,30 @@ func (w *StateReconWriter) DeleteAccount(address common.Address, original *accou } func (w *StateReconWriter) WriteAccountStorage(address common.Address, incarnation uint64, key *common.Hash, original, value *uint256.Int) error { - found, txNum := w.ac.MaxStorageTxNum(address.Bytes(), key.Bytes()) - if !found { - //fmt.Printf("no found storage [%x] [%x]\n", address, *key) + if cap(w.composite) < 20+32 { + w.composite = make([]byte, 20+32) + } else { + w.composite = w.composite[:20+32] + } + copy(w.composite, address.Bytes()) + copy(w.composite[20:], key.Bytes()) + txKey, err := w.tx.GetOne(kv.XStorage, w.composite) + if err != nil { + return err + } + if txKey == nil { return nil } - if txNum != w.txNum { - //fmt.Printf("no change storage [%x] [%x] txNum = %d\n", address, *key, txNum) + if stateTxNum := binary.BigEndian.Uint64(txKey); stateTxNum != w.txNum { + return nil + } + found := w.ac.IsMaxStorageTxNum(address.Bytes(), key.Bytes(), w.txNum) + if !found { + //fmt.Printf("no found storage [%x] [%x]\n", address, *key) return nil } if !value.IsZero() { - //fmt.Printf("storage [%x] [%x] => [%x], txNum: %d\n", address, *key, v, w.txNum) + //fmt.Printf("storage [%x] [%x] => [%x], txNum: %d\n", address, *key, value.Bytes(), w.txNum) w.rs.Put(kv.PlainStateR, address.Bytes(), key.Bytes(), value.Bytes(), w.txNum) } return nil diff --git a/core/state_processor.go b/core/state_processor.go index c203685bb9e..9700485dc1f 100644 --- a/core/state_processor.go +++ b/core/state_processor.go @@ -86,7 +86,7 @@ func applyTransaction(config *params.ChainConfig, gp *GasPool, statedb *state.In // and uses the input parameters for its environment. It returns the receipt // for the transaction, gas used and an error if the transaction failed, // indicating the block was invalid. -func ApplyTransaction(config *params.ChainConfig, blockHashFunc func(n uint64) common.Hash, engine consensus.Engine, author *common.Address, gp *GasPool, ibs *state.IntraBlockState, stateWriter state.StateWriter, header *types.Header, tx types.Transaction, usedGas *uint64, cfg vm.Config, contractHasTEVM func(contractHash common.Hash) (bool, error)) (*types.Receipt, []byte, error) { +func ApplyTransaction(config *params.ChainConfig, blockHashFunc func(n uint64) common.Hash, engine consensus.Engine, author *common.Address, gp *GasPool, ibs *state.IntraBlockState, stateWriter state.StateWriter, header *types.Header, tx types.Transaction, usedGas *uint64, cfg vm.Config) (*types.Receipt, []byte, error) { // Create a new context to be used in the EVM environment // Add addresses to access list if applicable @@ -98,7 +98,7 @@ func ApplyTransaction(config *params.ChainConfig, blockHashFunc func(n uint64) c if tx.IsStarkNet() { vmenv = &vm.CVMAdapter{Cvm: vm.NewCVM(ibs)} } else { - blockContext := NewEVMBlockContext(header, blockHashFunc, engine, author, contractHasTEVM) + blockContext := NewEVMBlockContext(header, blockHashFunc, engine, author) vmenv = vm.NewEVM(blockContext, vm.TxContext{}, ibs, config, cfg) } diff --git a/core/state_transition.go b/core/state_transition.go index 43a17bab06d..33d4892b553 100644 --- a/core/state_transition.go +++ b/core/state_transition.go @@ -45,8 +45,10 @@ The state transitioning model does all the necessary work to work out a valid ne 3) Create a new state object if the recipient is \0*32 4) Value transfer == If contract creation == - 4a) Attempt to run transaction data - 4b) If valid, use result as code for the new state object + + 4a) Attempt to run transaction data + 4b) If valid, use result as code for the new state object + == end == 5) Run Script section 6) Derive new state root @@ -257,7 +259,7 @@ func (st *StateTransition) buyGas(gasBailout bool) error { return fmt.Errorf("%w: address %v", ErrInsufficientFunds, st.msg.From().Hex()) } } - var subBalance bool = false + var subBalance = false if have, want := st.state.GetBalance(st.msg.From()), balanceCheck; have.Cmp(want) < 0 { if !gasBailout { return fmt.Errorf("%w: address %v have %v want %v", ErrInsufficientFunds, st.msg.From().Hex(), have, want) @@ -282,7 +284,7 @@ func (st *StateTransition) buyGas(gasBailout bool) error { func CheckEip1559TxGasFeeCap(from common.Address, gasFeeCap, tip, baseFee *uint256.Int) error { if gasFeeCap.Cmp(tip) < 0 { return fmt.Errorf("%w: address %v, tip: %s, gasFeeCap: %s", ErrTipAboveFeeCap, - from.Hex(), gasFeeCap, tip) + from.Hex(), tip, gasFeeCap) } if baseFee != nil && gasFeeCap.Cmp(baseFee) < 0 { return fmt.Errorf("%w: address %v, gasFeeCap: %s baseFee: %s", ErrFeeCapTooLow, @@ -332,13 +334,13 @@ func (st *StateTransition) preCheck(gasBailout bool) error { // TransitionDb will transition the state by applying the current message and // returning the evm execution result with following fields. // -// - used gas: -// total gas used (including gas being refunded) -// - returndata: -// the returned data from evm -// - concrete execution error: -// various **EVM** error which aborts the execution, -// e.g. ErrOutOfGas, ErrExecutionReverted +// - used gas: +// total gas used (including gas being refunded) +// - returndata: +// the returned data from evm +// - concrete execution error: +// various **EVM** error which aborts the execution, +// e.g. ErrOutOfGas, ErrExecutionReverted // // However if any consensus issue encountered, return the error directly with // nil evm execution result. @@ -375,8 +377,20 @@ func (st *StateTransition) TransitionDb(refunds bool, gasBailout bool) (*Executi homestead := st.evm.ChainRules().IsHomestead istanbul := st.evm.ChainRules().IsIstanbul london := st.evm.ChainRules().IsLondon + nano := st.evm.ChainRules().IsNano contractCreation := msg.To() == nil + if nano { + for _, blackListAddr := range types.NanoBlackList { + if blackListAddr == sender.Address() { + return nil, fmt.Errorf("block blacklist account") + } + if msg.To() != nil && *msg.To() == blackListAddr { + return nil, fmt.Errorf("block blacklist account") + } + } + } + // Check clauses 4-5, subtract intrinsic gas if everything is correct gas, err := IntrinsicGas(st.data, st.msg.AccessList(), contractCreation, homestead, istanbul) if err != nil { diff --git a/core/systemcontracts/const.go b/core/systemcontracts/const.go index 7cb6a5d2122..22382ee9a7c 100644 --- a/core/systemcontracts/const.go +++ b/core/systemcontracts/const.go @@ -14,4 +14,5 @@ var ( GovHubContract = common.HexToAddress("0x0000000000000000000000000000000000001007") TokenManagerContract = common.HexToAddress("0x0000000000000000000000000000000000001008") CrossChainContract = common.HexToAddress("0x0000000000000000000000000000000000002000") + StakingContract = common.HexToAddress("0x0000000000000000000000000000000000002001") ) diff --git a/core/systemcontracts/upgrade.go b/core/systemcontracts/upgrade.go index a3c77bb662d..41c37fce348 100644 --- a/core/systemcontracts/upgrade.go +++ b/core/systemcontracts/upgrade.go @@ -46,6 +46,8 @@ var ( brunoUpgrade = make(map[string]*Upgrade) eulerUpgrade = make(map[string]*Upgrade) + + gibbsUpgrade = make(map[string]*Upgrade) ) func init() { @@ -361,6 +363,38 @@ func init() { }, } + gibbsUpgrade[chapelNet] = &Upgrade{ + UpgradeName: "gibbs", + Configs: []*UpgradeConfig{ + { + ContractAddr: TokenHubContract, + CommitUrl: "https://github.com/bnb-chain/bsc-genesis-contract/commit/9d45b31c12b2c04757284717f4351cb44e81a3a7", + Code: "60806040526004361061036f5760003560e01c80639a854bbd116101c6578063bd466461116100f7578063f014847211610095578063fc1a598f1161006f578063fc1a598f14610d58578063fc3e590814610a3b578063fd6a687914610d8b578063ff9c0027146106ec576103b7565b8063f014847214610d19578063f9a2bbc714610d2e578063fa9e915914610d43576103b7565b8063d9e6dae9116100d1578063d9e6dae91461066e578063dc927faf14610cda578063e1c7392a14610cef578063ebf71d5314610d04576103b7565b8063bd46646114610c0d578063c81b166214610c40578063c8509d8114610c55576103b7565b8063aa7415f511610164578063b99328c51161013e578063b99328c514610b77578063b9fd21e314610bb0578063ba35ead614610bc5578063bbface1f14610bda576103b7565b8063aa7415f514610a50578063ab51bb9614610a97578063ac43175114610aac576103b7565b8063a1a11bf5116101a0578063a1a11bf5146109fc578063a496fba214610a11578063a78abc1614610a26578063a7c9f02d14610a3b576103b7565b80639a854bbd146109995780639a99b4f0146109ae5780639dc09262146109e7576103b7565b806361368475116102a0578063727be1f81161023e578063831d65d111610218578063831d65d1146108c05780638b87b21f146105ed5780638eff336c1461094557806396713da914610984576103b7565b8063727be1f81461086c57806375d47a0a146108965780637942fd05146108ab576103b7565b80636e47b4821161027a5780636e47b4821461082d57806370fd5bad146106ec578063718a8aa81461084257806371d3086314610857576103b7565b8063613684751461066e57806366dea52a146106ec5780636e05652014610701576103b7565b806343a368b91161030d57806350432d32116102e757806350432d321461068357806351e806721461069857806359b92789146106ad5780635d499b1b146106d7576103b7565b806343a368b91461062d578063493279b1146106425780634bf6c8821461066e576103b7565b8063149d14d911610349578063149d14d9146105155780633d7132231461053c5780633dffc387146105ed57806343756e5c14610618576103b7565b80630bee7a67146103bc5780630e2374a5146103ea5780631182b8751461041b576103b7565b366103b75734156103b5576040805133815234602082015281517f6c98249d85d88c3753a04a22230f595e4dc8d3dc86c34af35deeeedc861b89db929181900390910190a15b005b600080fd5b3480156103c857600080fd5b506103d1610da0565b6040805163ffffffff9092168252519081900360200190f35b3480156103f657600080fd5b506103ff610da5565b604080516001600160a01b039092168252519081900360200190f35b34801561042757600080fd5b506104a06004803603604081101561043e57600080fd5b60ff8235169190810190604081016020820135600160201b81111561046257600080fd5b82018360208201111561047457600080fd5b803590602001918460018302840111600160201b8311171561049557600080fd5b509092509050610dab565b6040805160208082528351818301528351919283929083019185019080838360005b838110156104da5781810151838201526020016104c2565b50505050905090810190601f1680156105075780820380516001836020036101000a031916815260200191505b509250505060405180910390f35b34801561052157600080fd5b5061052a610ed9565b60408051918252519081900360200190f35b34801561054857600080fd5b506103ff6004803603602081101561055f57600080fd5b810190602081018135600160201b81111561057957600080fd5b82018360208201111561058b57600080fd5b803590602001918460018302840111600160201b831117156105ac57600080fd5b91908080601f016020809104026020016040519081016040528093929190818152602001838380828437600092019190915250929550610edf945050505050565b3480156105f957600080fd5b50610602610f03565b6040805160ff9092168252519081900360200190f35b34801561062457600080fd5b506103ff610f08565b34801561063957600080fd5b5061052a610f0e565b34801561064e57600080fd5b50610657610f1a565b6040805161ffff9092168252519081900360200190f35b34801561067a57600080fd5b50610602610f1f565b34801561068f57600080fd5b5061052a610f24565b3480156106a457600080fd5b506103ff610f2f565b3480156106b957600080fd5b506103ff600480360360208110156106d057600080fd5b5035610f35565b3480156106e357600080fd5b5061052a610f50565b3480156106f857600080fd5b50610602610f59565b6108196004803603608081101561071757600080fd5b810190602081018135600160201b81111561073157600080fd5b82018360208201111561074357600080fd5b803590602001918460208302840111600160201b8311171561076457600080fd5b919390929091602081019035600160201b81111561078157600080fd5b82018360208201111561079357600080fd5b803590602001918460208302840111600160201b831117156107b457600080fd5b919390929091602081019035600160201b8111156107d157600080fd5b8201836020820111156107e357600080fd5b803590602001918460208302840111600160201b8311171561080457600080fd5b91935091503567ffffffffffffffff16610f5e565b604080519115158252519081900360200190f35b34801561083957600080fd5b506103ff611433565b34801561084e57600080fd5b50610602611439565b34801561086357600080fd5b5061052a61143e565b34801561087857600080fd5b506108196004803603602081101561088f57600080fd5b5035611444565b3480156108a257600080fd5b506103ff6114be565b3480156108b757600080fd5b506106026114c4565b3480156108cc57600080fd5b506103b5600480360360408110156108e357600080fd5b60ff8235169190810190604081016020820135600160201b81111561090757600080fd5b82018360208201111561091957600080fd5b803590602001918460018302840111600160201b8311171561093a57600080fd5b5090925090506114c9565b34801561095157600080fd5b506103b56004803603606081101561096857600080fd5b508035906001600160a01b036020820135169060400135611612565b34801561099057600080fd5b50610602611698565b3480156109a557600080fd5b5061052a61169d565b3480156109ba57600080fd5b5061052a600480360360408110156109d157600080fd5b506001600160a01b0381351690602001356116a9565b3480156109f357600080fd5b506103ff6117e7565b348015610a0857600080fd5b506103ff6117ed565b348015610a1d57600080fd5b506106026117f3565b348015610a3257600080fd5b506108196117f8565b348015610a4757600080fd5b50610602611801565b61081960048036036080811015610a6657600080fd5b5080356001600160a01b03908116916020810135909116906040810135906060013567ffffffffffffffff16611806565b348015610aa357600080fd5b506103d16117f3565b348015610ab857600080fd5b506103b560048036036040811015610acf57600080fd5b810190602081018135600160201b811115610ae957600080fd5b820183602082011115610afb57600080fd5b803590602001918460018302840111600160201b83111715610b1c57600080fd5b919390929091602081019035600160201b811115610b3957600080fd5b820183602082011115610b4b57600080fd5b803590602001918460018302840111600160201b83111715610b6c57600080fd5b509092509050611ec7565b348015610b8357600080fd5b506103b560048036036040811015610b9a57600080fd5b50803590602001356001600160a01b0316612136565b348015610bbc57600080fd5b5061052a6121ac565b348015610bd157600080fd5b5061052a6121b6565b348015610be657600080fd5b5061052a60048036036020811015610bfd57600080fd5b50356001600160a01b03166121bc565b348015610c1957600080fd5b5061052a60048036036020811015610c3057600080fd5b50356001600160a01b03166121ce565b348015610c4c57600080fd5b506103ff6121e9565b348015610c6157600080fd5b506103b560048036036040811015610c7857600080fd5b60ff8235169190810190604081016020820135600160201b811115610c9c57600080fd5b820183602082011115610cae57600080fd5b803590602001918460018302840111600160201b83111715610ccf57600080fd5b5090925090506121ef565b348015610ce657600080fd5b506103ff6122bf565b348015610cfb57600080fd5b506103b56122c5565b348015610d1057600080fd5b50610602612365565b348015610d2557600080fd5b5061060261236a565b348015610d3a57600080fd5b506103ff61236f565b348015610d4f57600080fd5b5061052a612375565b348015610d6457600080fd5b506104a060048036036020811015610d7b57600080fd5b50356001600160a01b031661237b565b348015610d9757600080fd5b506103ff6124a2565b606481565b61200181565b60005460609060ff16610df3576040805162461bcd60e51b81526020600482015260196024820152600080516020614905833981519152604482015290519081900360640190fd5b3361200014610e335760405162461bcd60e51b815260040180806020018281038252602f8152602001806148b3602f913960400191505060405180910390fd5b60ff841660021415610e8557610e7e83838080601f0160208091040260200160405190810160405280939291908181526020018383808284376000920191909152506124a892505050565b9050610ed2565b6040805162461bcd60e51b815260206004820152601860248201527f756e7265636f676e697a65642073796e207061636b6167650000000000000000604482015290519081900360640190fd5b9392505050565b60015490565b6020818101516000908152600490915260409020546001600160a01b03165b919050565b600181565b61100181565b670de0b6b3a764000081565b606181565b600881565b66071afd498d000081565b61200081565b6000908152600460205260409020546001600160a01b031690565b6402540be40081565b600281565b6000805460ff16610fa4576040805162461bcd60e51b81526020600482015260196024820152600080516020614905833981519152604482015290519081900360640190fd5b868514610fe25760405162461bcd60e51b815260040180806020018281038252603b815260200180614878603b913960400191505060405180910390fd5b8683146110205760405162461bcd60e51b815260040180806020018281038252603f81526020018061474b603f913960400191505060405180910390fd5b426078018267ffffffffffffffff16101561106c5760405162461bcd60e51b815260040180806020018281038252602481526020018061463b6024913960400191505060405180910390fd5b6402540be4003406156110b05760405162461bcd60e51b815260040180806020018281038252604081526020018061497b6040913960400191505060405180910390fd5b604080518681526020808802820101909152859060009081906060908480156110e3578160200160208202803683370190505b50905060005b848110156111be576402540be4008b8b8381811061110357fe5b905060200201358161111157fe5b061561114e5760405162461bcd60e51b815260040180806020018281038252603c81526020018061478a603c913960400191505060405180910390fd5b6111738b8b8381811061115d57fe5b90506020020135856125cc90919063ffffffff16565b935061119f6402540be4008c8c8481811061118a57fe5b9050602002013561262690919063ffffffff16565b8282815181106111ab57fe5b60209081029190910101526001016110e9565b506001546111e3906111d6908663ffffffff61266816565b849063ffffffff6125cc16565b3410156112215760405162461bcd60e51b81526004018080602001828103825260568152602001806149256056913960600191505060405180910390fd5b611231348463ffffffff6126c116565b915061123b614461565b6040518060c001604052806221272160e91b60001b815260200160006001600160a01b031681526020018381526020018e8e808060200260200160405190810160405280939291908181526020018383602002808284376000920191909152505050908252506040805160208c810282810182019093528c82529283019290918d918d91829185019084908082843760009201919091525050509082525067ffffffffffffffff8916602090910152905061200063f7a251d760036112ff84612703565b611314876402540be40063ffffffff61262616565b6040518463ffffffff1660e01b8152600401808460ff1660ff16815260200180602001838152602001828103825284818151815260200191508051906020019080838360005b8381101561137257818101518382015260200161135a565b50505050905090810190601f16801561139f5780820380516001836020036101000a031916815260200191505b50945050505050600060405180830381600087803b1580156113c057600080fd5b505af11580156113d4573d6000803e3d6000fd5b505060408051600081523360208201528082018890526060810187905290517f74eab09b0e53aefc23f2e1b16da593f95c2dd49c6f5a23720463d10d9c330b2a9350908190036080019150a15060019c9b505050505050505050505050565b61100581565b601081565b60015481565b600033612001146114865760405162461bcd60e51b815260040180806020018281038252603381526020018061457b6033913960400191505060405180910390fd5b6040516120019083156108fc029084906000818181858888f193505050501580156114b5573d6000803e3d6000fd5b50600192915050565b61100881565b600b81565b60005460ff1661150e576040805162461bcd60e51b81526020600482015260196024820152600080516020614905833981519152604482015290519081900360640190fd5b336120001461154e5760405162461bcd60e51b815260040180806020018281038252602f8152602001806148b3602f913960400191505060405180910390fd5b60ff83166003141561159e5761159982828080601f0160208091040260200160405190810160405280939291908181526020018383808284376000920191909152506129be92505050565b61160d565b7f41ce201247b6ceb957dcdb217d0b8acb50b9ea0e12af9af4f5e7f38902101605838383604051808460ff1660ff168152602001806020018281038252848482818152602001925080828437600083820152604051601f909101601f1916909201829003965090945050505050a15b505050565b33611008146116525760405162461bcd60e51b81526004018080602001828103825260238152602001806148e26023913960400191505060405180910390fd5b600083815260046020908152604080832080546001600160a01b039096166001600160a01b03199096168617905593825260038152838220949094556002909352912055565b600981565b677ce66c50e284000081565b6000805460ff166116ef576040805162461bcd60e51b81526020600482015260196024820152600080516020614905833981519152604482015290519081900360640190fd5b336110051461172f5760405162461bcd60e51b815260040180806020018281038252602f8152602001806145ae602f913960400191505060405180910390fd5b600047831061173e5747611740565b825b9050670de0b6b3a764000081111561175c5760009150506117e1565b80156117de576040516001600160a01b0385169082156108fc029083906000818181858888f19350505050158015611798573d6000803e3d6000fd5b50604080516001600160a01b03861681526020810183905281517ff8b71c64315fc33b2ead2adfa487955065152a8ac33d9d5193aafd7f45dc15a0929181900390910190a15b90505b92915050565b61100781565b61100681565b600081565b60005460ff1681565b600381565b6000805460ff1661184c576040805162461bcd60e51b81526020600482015260196024820152600080516020614905833981519152604482015290519081900360640190fd5b426078018267ffffffffffffffff1610156118985760405162461bcd60e51b815260040180806020018281038252602481526020018061463b6024913960400191505060405180910390fd5b6402540be4003406156118dc5760405162461bcd60e51b815260040180806020018281038252604081526020018061497b6040913960400191505060405180910390fd5b600080806001600160a01b0388166119bb5760015461190290879063ffffffff6125cc16565b3410156119405760405162461bcd60e51b81526004018080602001828103825260618152602001806146c56061913960800191505060405180910390fd5b6402540be4008606156119845760405162461bcd60e51b815260040180806020018281038252603c81526020018061478a603c913960400191505060405180910390fd5b611994348763ffffffff6126c116565b90506119ab866402540be40063ffffffff61262616565b6221272160e91b93509150611c5e565b6001600160a01b038816600090815260036020526040902054925082611a125760405162461bcd60e51b81526004018080602001828103825260318152602001806146946031913960400191505060405180910390fd5b600154341015611a535760405162461bcd60e51b815260040180806020018281038252603f8152602001806147e7603f913960400191505060405180910390fd5b506001600160a01b0387166000908152600260205260409020543490600881111580611a9e5750600881118015611a9e5750611a9c876007198301600a0a63ffffffff612a1a16565b155b611ad95760405162461bcd60e51b815260040180806020018281038252603c81526020018061478a603c913960400191505060405180910390fd5b611ae38782612a5c565b9250611aee84612a9c565b15611b36576305f5e100831015611b365760405162461bcd60e51b815260040180806020018281038252603a8152602001806145dd603a913960400191505060405180910390fd5b600881101580611b505750600881108015611b5057508683115b611b8b5760405162461bcd60e51b81526004018080602001828103825260258152602001806147266025913960400191505060405180910390fd5b677ce66c50e2840000831115611bd25760405162461bcd60e51b815260040180806020018281038252603581526020018061465f6035913960400191505060405180910390fd5b604080516323b872dd60e01b81523360048201523060248201526044810189905290516001600160a01b038b16916323b872dd9160648083019260209291908290030181600087803b158015611c2757600080fd5b505af1158015611c3b573d6000803e3d6000fd5b505050506040513d6020811015611c5157600080fd5b5051611c5c57600080fd5b505b611c66614461565b6040805160c0810182528581526001600160a01b038b166020820152815160018082528184018452919283019181602001602082028036833750505081526040805160018082528183019092526020928301929091908083019080368337505050815260408051600180825281830190925260209283019290919080830190803683370190505081526020018767ffffffffffffffff168152509050828160400151600081518110611d1457fe5b602002602001018181525050878160600151600081518110611d3257fe5b60200260200101906001600160a01b031690816001600160a01b031681525050338160800151600081518110611d6457fe5b6001600160a01b039092166020928302919091019091015261200063f7a251d76003611d8f84612703565b611da4866402540be40063ffffffff61262616565b6040518463ffffffff1660e01b8152600401808460ff1660ff16815260200180602001838152602001828103825284818151815260200191508051906020019080838360005b83811015611e02578181015183820152602001611dea565b50505050905090810190601f168015611e2f5780820380516001836020036101000a031916815260200191505b50945050505050600060405180830381600087803b158015611e5057600080fd5b505af1158015611e64573d6000803e3d6000fd5b5050604080516001600160a01b038d1681523360208201528082018b90526060810186905290517f74eab09b0e53aefc23f2e1b16da593f95c2dd49c6f5a23720463d10d9c330b2a9350908190036080019150a150600198975050505050505050565b3361100714611f075760405162461bcd60e51b815260040180806020018281038252602e815260200180614826602e913960400191505060405180910390fd5b60208114611f5c576040805162461bcd60e51b815260206004820152601b60248201527f65787065637465642076616c7565206c656e6774682069732033320000000000604482015290519081900360640190fd5b606084848080601f01602080910402602001604051908101604052809392919081815260200183838082843760009201919091525050604080516020601f8801819004810282018101909252868152939450606093925086915085908190840183828082843760009201919091525050505060208301519091506772656c617946656560c01b811415612064576020820151670de0b6b3a7640000811180159061200b57506402540be4008106155b61205c576040805162461bcd60e51b815260206004820152601960248201527f7468652072656c6179466565206f7574206f662072616e676500000000000000604482015290519081900360640190fd5b6001556120a1565b6040805162461bcd60e51b815260206004820152600d60248201526c756e6b6e6f776e20706172616d60981b604482015290519081900360640190fd5b7f6cdb0ac70ab7f2e2d035cca5be60d89906f2dede7648ddbd7402189c1eeed17a878787876040518080602001806020018381038352878782818152602001925080828437600083820152601f01601f191690910184810383528581526020019050858580828437600083820152604051601f909101601f19169092018290039850909650505050505050a150505050505050565b33611008146121765760405162461bcd60e51b81526004018080602001828103825260238152602001806148e26023913960400191505060405180910390fd5b600091825260046020908152604080842080546001600160a01b03191690556001600160a01b0392909216835260039052812055565b6221272160e91b81565b61c35081565b60026020526000908152604090205481565b6001600160a01b031660009081526003602052604090205490565b61100281565b60005460ff16612234576040805162461bcd60e51b81526020600482015260196024820152600080516020614905833981519152604482015290519081900360640190fd5b33612000146122745760405162461bcd60e51b815260040180806020018281038252602f8152602001806148b3602f913960400191505060405180910390fd5b60ff83166003141561159e5761159982828080601f016020809104026020016040519081016040528093929190818152602001838380828437600092019190915250612ba292505050565b61100381565b60005460ff161561231d576040805162461bcd60e51b815260206004820152601960248201527f74686520636f6e747261637420616c726561647920696e697400000000000000604482015290519081900360640190fd5b66071afd498d000060019081556000808052600260205260127fac33ff75c19e70fe83507db0d683fd3465c996598dc972688b7ace676c89077b55805460ff19169091179055565b600481565b600581565b61100081565b61271081565b6001600160a01b03811660009081526003602090815260409182902054825182815280840190935260609290918391906020820181803683375050506020810183905290506000805b60208160ff16101561240b57828160ff16815181106123df57fe5b01602001516001600160f81b031916156123fe57600190910190612403565b61240b565b6001016123c4565b5060608160ff166040519080825280601f01601f19166020018201604052801561243c576020820181803683370190505b50905060005b8260ff168160ff16101561249857838160ff168151811061245f57fe5b602001015160f81c60f81b828260ff168151811061247957fe5b60200101906001600160f81b031916908160001a905350600101612442565b5095945050505050565b61100481565b60606124b26144ad565b60006124bd84612ca0565b9150915080612513576040805162461bcd60e51b815260206004820152601f60248201527f756e7265636f676e697a6564207472616e73666572496e207061636b61676500604482015290519081900360640190fd5b600061251e83612ddf565b905063ffffffff8116156125b2576040808401516020808601516001600160a01b031660009081526002909152918220546125599190612a5c565b90506125636144e2565b60405180608001604052808660000151815260200183815260200186608001516001600160a01b031681526020018463ffffffff1681525090506125a68161312c565b95505050505050610efe565b50506040805160008152602081019091529150610efe9050565b6000828201838110156117de576040805162461bcd60e51b815260206004820152601b60248201527f536166654d6174683a206164646974696f6e206f766572666c6f770000000000604482015290519081900360640190fd5b60006117de83836040518060400160405280601a81526020017f536166654d6174683a206469766973696f6e206279207a65726f000000000000815250613208565b600082612677575060006117e1565b8282028284828161268457fe5b04146117de5760405162461bcd60e51b81526004018080602001828103825260218152602001806147c66021913960400191505060405180910390fd5b60006117de83836040518060400160405280601e81526020017f536166654d6174683a207375627472616374696f6e206f766572666c6f7700008152506132aa565b60408051600680825260e08201909252606091829190816020015b606081526020019060019003908161271e575050835190915061274090613304565b8160008151811061274d57fe5b602002602001018190525061276e83602001516001600160a01b0316613317565b8160018151811061277b57fe5b6020026020010181905250600083604001515190506060816040519080825280602002602001820160405280156127c657816020015b60608152602001906001900390816127b15790505b50905060005b82811015612813576127f4866040015182815181106127e757fe5b6020026020010151613304565b82828151811061280057fe5b60209081029190910101526001016127cc565b5061281d8161333a565b8360028151811061282a57fe5b602002602001018190525060608260405190808252806020026020018201604052801561286b57816020015b60608152602001906001900390816128565790505b50905060005b838110156128c1576128a28760600151828151811061288c57fe5b60200260200101516001600160a01b0316613317565b8282815181106128ae57fe5b6020908102919091010152600101612871565b506128cb8161333a565b846003815181106128d857fe5b602002602001018190525060608360405190808252806020026020018201604052801561291957816020015b60608152602001906001900390816129045790505b50905060005b848110156129595761293a8860800151828151811061288c57fe5b82828151811061294657fe5b602090810291909101015260010161291f565b506129638161333a565b8560048151811061297057fe5b60200260200101819052506129928760a0015167ffffffffffffffff16613304565b8560058151811061299f57fe5b60200260200101819052506129b38561333a565b979650505050505050565b6129c6614509565b60006129d1836133c4565b9150915080612a115760405162461bcd60e51b81526004018080602001828103825260248152602001806148546024913960400191505060405180910390fd5b61160d8261358f565b60006117de83836040518060400160405280601881526020017f536166654d6174683a206d6f64756c6f206279207a65726f0000000000000000815250613a13565b60006008821115612a8557612a7e836007198401600a0a63ffffffff61262616565b90506117e1565b6117de836008849003600a0a63ffffffff61266816565b604080516020808252818301909252600091606091906020820181803683375050506020810184905290506000805b60208160ff161015612b1257828160ff1681518110612ae657fe5b01602001516001600160f81b03191615612b0557600190910190612b0a565b612b12565b600101612acb565b50600760ff82161015612b2a57600092505050610efe565b816005820360ff1681518110612b3c57fe5b6020910101516001600160f81b031916602d60f81b14612b6157600092505050610efe565b816001820360ff1681518110612b7357fe5b6020910101516001600160f81b031916604d60f81b14612b9857600092505050610efe565b5060019392505050565b612baa614461565b6000612bb583613a75565b9150915080612bf55760405162461bcd60e51b81526004018080602001828103825260248152602001806146176024913960400191505060405180910390fd5b612bfd614509565b602080840180516001600160a01b0390811684526040808701518585015291511660009081526002909252812054905b846040015151811015612c7e57612c5b85604001518281518110612c4d57fe5b602002602001015183613cd9565b85604001518281518110612c6b57fe5b6020908102919091010152600101612c2d565b506080840151604083015260056060830152612c998261358f565b5050505050565b612ca86144ad565b6000612cb26144ad565b612cba614540565b612ccb612cc686613d12565b613d37565b90506000805b612cda83613d81565b15612dd25780612cfc57612cf5612cf084613da2565b613df0565b8452612dca565b8060011415612d2957612d16612d1184613da2565b613ea7565b6001600160a01b03166020850152612dca565b8060021415612d4857612d3e612cf084613da2565b6040850152612dca565b8060031415612d7057612d5d612d1184613da2565b6001600160a01b03166060850152612dca565b8060041415612d9857612d85612d1184613da2565b6001600160a01b03166080850152612dca565b8060051415612dc557612dad612cf084613da2565b67ffffffffffffffff1660a085015260019150612dca565b612dd2565b600101612cd1565b5091935090915050915091565b60208101516000906001600160a01b0316612f16578160a0015167ffffffffffffffff16421115612e1257506001610efe565b8160400151471015612e2657506003610efe565b606082015160408084015190516000926001600160a01b0316916127109184818181858888f193505050503d8060008114612e7d576040519150601f19603f3d011682016040523d82523d6000602084013e612e82565b606091505b5050905080612e95575060049050610efe565b7f471eb9cc1ffe55ffadf15b32595415eb9d80f22e761d24bd6dffc607e1284d5983602001518460600151856040015160405180846001600160a01b03166001600160a01b03168152602001836001600160a01b03166001600160a01b03168152602001828152602001935050505060405180910390a15060009050610efe565b8160a0015167ffffffffffffffff16421115612f3457506001610efe565b81516020808401516001600160a01b031660009081526003909152604090205414612f6157506002610efe565b602080830151604080516370a0823160e01b815230600482015290516000936001600160a01b03909316926370a082319261c3509260248083019392829003018187803b158015612fb157600080fd5b5086fa158015612fc5573d6000803e3d6000fd5b50505050506040513d6020811015612fdc57600080fd5b50516040840151909150811015612ff7575060039050610efe565b600083602001516001600160a01b031663a9059cbb61c350866060015187604001516040518463ffffffff1660e01b815260040180836001600160a01b03166001600160a01b0316815260200182815260200192505050602060405180830381600088803b15801561306857600080fd5b5087f115801561307c573d6000803e3d6000fd5b50505050506040513d602081101561309357600080fd5b505190508015613120577f471eb9cc1ffe55ffadf15b32595415eb9d80f22e761d24bd6dffc607e1284d5984602001518560600151866040015160405180846001600160a01b03166001600160a01b03168152602001836001600160a01b03166001600160a01b03168152602001828152602001935050505060405180910390a15060009150610efe9050565b5060059150610efe9050565b60408051600480825260a08201909252606091829190816020015b6060815260200190600190039081613147575050835190915061316990613304565b8160008151811061317657fe5b602002602001018190525061318e8360200151613304565b8160018151811061319b57fe5b60200260200101819052506131bc83604001516001600160a01b0316613317565b816002815181106131c957fe5b60200260200101819052506131e7836060015163ffffffff16613304565b816003815181106131f457fe5b6020026020010181905250610ed28161333a565b600081836132945760405162461bcd60e51b81526004018080602001828103825283818151815260200191508051906020019080838360005b83811015613259578181015183820152602001613241565b50505050905090810190601f1680156132865780820380516001836020036101000a031916815260200191505b509250505060405180910390fd5b5060008385816132a057fe5b0495945050505050565b600081848411156132fc5760405162461bcd60e51b8152602060048201818152835160248401528351909283926044909101919085019080838360008315613259578181015183820152602001613241565b505050900390565b60606117e161331283613ec1565b613fa7565b60408051600560a21b8318601482015260348101909152606090610ed281613fa7565b606081516000141561335b5750604080516000815260208101909152610efe565b60608260008151811061336a57fe5b602002602001015190506000600190505b83518110156133ab576133a18285838151811061339457fe5b6020026020010151613ff9565b915060010161337b565b50610ed26133be825160c060ff16614076565b82613ff9565b6133cc614509565b60006133d6614509565b6133de614540565b6133ea612cc686613d12565b90506000805b6133f983613d81565b15612dd2578061341f5761340f612d1184613da2565b6001600160a01b03168452613587565b80600114156134c057606061343b61343685613da2565b61416e565b90508051604051908082528060200260200182016040528015613468578160200160208202803683370190505b50602086015260005b81518110156134b95761349682828151811061348957fe5b6020026020010151613df0565b866020015182815181106134a657fe5b6020908102919091010152600101613471565b5050613587565b80600214156135625760606134d761343685613da2565b90508051604051908082528060200260200182016040528015613504578160200160208202803683370190505b50604086015260005b81518110156134b95761353282828151811061352557fe5b6020026020010151613ea7565b8660400151828151811061354257fe5b6001600160a01b039092166020928302919091019091015260010161350d565b8060031415612dc557613577612cf084613da2565b63ffffffff166060850152600191505b6001016133f0565b80516001600160a01b03166137b95760005b8160200151518110156137b3576000826040015182815181106135c057fe5b60200260200101516001600160a01b0316612710846020015184815181106135e457fe5b60209081029190910101516040516000818181858888f193505050503d806000811461362c576040519150601f19603f3d011682016040523d82523d6000602084013e613631565b606091505b50509050806136f4577f203f9f67a785f4f81be4d48b109aa0c498d1bc8097ecc2627063f480cc5fe73e83600001518460400151848151811061367057fe5b60200260200101518560200151858151811061368857fe5b6020026020010151866060015160405180856001600160a01b03166001600160a01b03168152602001846001600160a01b03166001600160a01b031681526020018381526020018263ffffffff1663ffffffff16815260200194505050505060405180910390a16137aa565b7fd468d4fa5e8fb4adc119b29a983fd0785e04af5cb8b7a3a69a47270c54b6901a83600001518460400151848151811061372a57fe5b60200260200101518560200151858151811061374257fe5b6020026020010151866060015160405180856001600160a01b03166001600160a01b03168152602001846001600160a01b03166001600160a01b031681526020018381526020018263ffffffff1663ffffffff16815260200194505050505060405180910390a15b506001016135a1565b50613a10565b60005b816020015151811015613a0e57600082600001516001600160a01b031663a9059cbb61c350856040015185815181106137f157fe5b60200260200101518660200151868151811061380957fe5b60200260200101516040518463ffffffff1660e01b815260040180836001600160a01b03166001600160a01b0316815260200182815260200192505050602060405180830381600088803b15801561386057600080fd5b5087f1158015613874573d6000803e3d6000fd5b50505050506040513d602081101561388b57600080fd5b50519050801561394f577fd468d4fa5e8fb4adc119b29a983fd0785e04af5cb8b7a3a69a47270c54b6901a8360000151846040015184815181106138cb57fe5b6020026020010151856020015185815181106138e357fe5b6020026020010151866060015160405180856001600160a01b03166001600160a01b03168152602001846001600160a01b03166001600160a01b031681526020018381526020018263ffffffff1663ffffffff16815260200194505050505060405180910390a1613a05565b7f203f9f67a785f4f81be4d48b109aa0c498d1bc8097ecc2627063f480cc5fe73e83600001518460400151848151811061398557fe5b60200260200101518560200151858151811061399d57fe5b6020026020010151866060015160405180856001600160a01b03166001600160a01b03168152602001846001600160a01b03166001600160a01b031681526020018381526020018263ffffffff1663ffffffff16815260200194505050505060405180910390a15b506001016137bc565b505b50565b60008183613a625760405162461bcd60e51b8152602060048201818152835160248401528351909283926044909101919085019080838360008315613259578181015183820152602001613241565b50828481613a6c57fe5b06949350505050565b613a7d614461565b6000613a87614461565b613a8f614540565b613a9b612cc686613d12565b90506000805b613aaa83613d81565b15612dd25780613ac757613ac0612cf084613da2565b8452613cd1565b8060011415613aef57613adc612d1184613da2565b6001600160a01b03166020850152613cd1565b8060021415613b7e576060613b0661343685613da2565b90508051604051908082528060200260200182016040528015613b33578160200160208202803683370190505b50604086015260005b8151811015613b7757613b5482828151811061348957fe5b86604001518281518110613b6457fe5b6020908102919091010152600101613b3c565b5050613cd1565b8060031415613c13576060613b9561343685613da2565b90508051604051908082528060200260200182016040528015613bc2578160200160208202803683370190505b50606086015260005b8151811015613b7757613be382828151811061352557fe5b86606001518281518110613bf357fe5b6001600160a01b0390921660209283029190910190910152600101613bcb565b8060041415613ca8576060613c2a61343685613da2565b90508051604051908082528060200260200182016040528015613c57578160200160208202803683370190505b50608086015260005b8151811015613b7757613c7882828151811061352557fe5b86608001518281518110613c8857fe5b6001600160a01b0390921660209283029190910190910152600101613c60565b8060051415612dc557613cbd612cf084613da2565b67ffffffffffffffff1660a0850152600191505b600101613aa1565b60006008821115613cfb57612a7e836007198401600a0a63ffffffff61266816565b6117de836008849003600a0a63ffffffff61262616565b613d1a614560565b506040805180820190915281518152602082810190820152919050565b613d3f614540565b613d488261423f565b613d5157600080fd5b6000613d60836020015161426f565b60208085015160408051808201909152868152920190820152915050919050565b6000613d8b614560565b505080518051602091820151919092015191011190565b613daa614560565b613db382613d81565b613dbc57600080fd5b60208201516000613dcc826142d2565b80830160209586015260408051808201909152908152938401919091525090919050565b805160009015801590613e0557508151602110155b613e0e57600080fd5b6000613e1d836020015161426f565b90508083600001511015613e78576040805162461bcd60e51b815260206004820152601a60248201527f6c656e677468206973206c657373207468616e206f6666736574000000000000604482015290519081900360640190fd5b825160208085015183018051928490039291831015613e9e57826020036101000a820491505b50949350505050565b8051600090601514613eb857600080fd5b6117e182613df0565b604080516020808252818301909252606091829190602082018180368337505050602081018490529050600067ffffffffffffffff198416613f0557506018613f29565b6fffffffffffffffffffffffffffffffff198416613f2557506010613f29565b5060005b6020811015613f5f57818181518110613f3e57fe5b01602001516001600160f81b03191615613f5757613f5f565b600101613f29565b60008160200390506060816040519080825280601f01601f191660200182016040528015613f94576020820181803683370190505b5080830196909652508452509192915050565b606081516001148015613fd95750607f60f81b82600081518110613fc757fe5b01602001516001600160f81b03191611155b15613fe5575080610efe565b6117e1613ff78351608060ff16614076565b835b6060806040519050835180825260208201818101602087015b8183101561402a578051835260209283019201614012565b50855184518101855292509050808201602086015b8183101561405757805183526020928301920161403f565b508651929092011591909101601f01601f191660405250905092915050565b60606801000000000000000083106140c6576040805162461bcd60e51b815260206004820152600e60248201526d696e70757420746f6f206c6f6e6760901b604482015290519081900360640190fd5b604080516001808252818301909252606091602082018180368337019050509050603784116141205782840160f81b8160008151811061410257fe5b60200101906001600160f81b031916908160001a90535090506117e1565b606061412b85613ec1565b90508381510160370160f81b8260008151811061414457fe5b60200101906001600160f81b031916908160001a9053506141658282613ff9565b95945050505050565b60606141798261423f565b61418257600080fd5b600061418d83614405565b90506060816040519080825280602002602001820160405280156141cb57816020015b6141b8614560565b8152602001906001900390816141b05790505b50905060006141dd856020015161426f565b60208601510190506000805b84811015614234576141fa836142d2565b915060405180604001604052808381526020018481525084828151811061421d57fe5b6020908102919091010152918101916001016141e9565b509195945050505050565b805160009061425057506000610efe565b6020820151805160001a9060c0821015612b9857600092505050610efe565b8051600090811a6080811015614289576000915050610efe565b60b88110806142a4575060c081108015906142a4575060f881105b156142b3576001915050610efe565b60c08110156142c75760b519019050610efe565b60f519019050610efe565b80516000908190811a60808110156142ed57600191506143fe565b60b881101561430257607e19810191506143fe565b60c081101561437c57600060b78203600186019550806020036101000a865104915060018101820193505080831015614376576040805162461bcd60e51b81526020600482015260116024820152706164646974696f6e206f766572666c6f7760781b604482015290519081900360640190fd5b506143fe565b60f88110156143915760be19810191506143fe565b600060f78203600186019550806020036101000a8651049150600181018201935050808310156143fc576040805162461bcd60e51b81526020600482015260116024820152706164646974696f6e206f766572666c6f7760781b604482015290519081900360640190fd5b505b5092915050565b805160009061441657506000610efe565b6000809050600061442a846020015161426f565b602085015185519181019250015b8082101561445857614449826142d2565b60019093019290910190614438565b50909392505050565b6040518060c001604052806000801916815260200160006001600160a01b03168152602001606081526020016060815260200160608152602001600067ffffffffffffffff1681525090565b6040805160c081018252600080825260208201819052918101829052606081018290526080810182905260a081019190915290565b60408051608081018252600080825260208201819052918101829052606081019190915290565b604051806080016040528060006001600160a01b031681526020016060815260200160608152602001600063ffffffff1681525090565b6040518060400160405280614553614560565b8152602001600081525090565b60405180604001604052806000815260200160008152509056fe6f6e6c79207374616b696e672073797374656d20636f6e74726163742063616e2063616c6c20746869732066756e6374696f6e746865206d6573736167652073656e646572206d75737420626520696e63656e746976697a6520636f6e7472616374466f72206d696e69546f6b656e2c20746865207472616e7366657220616d6f756e74206d757374206e6f74206265206c657373207468616e2031756e7265636f676e697a6564207472616e736665724f75742073796e207061636b61676565787069726554696d65206d7573742062652074776f206d696e75746573206c61746572616d6f756e7420697320746f6f206c617267652c20657863656564206d6178696d756d206265703220746f6b656e20616d6f756e7474686520636f6e747261637420686173206e6f74206265656e20626f756e6420746f20616e79206265703220746f6b656e726563656976656420424e4220616d6f756e742073686f756c64206265206e6f206c657373207468616e207468652073756d206f66207472616e736665724f757420424e4220616d6f756e7420616e64206d696e696d756d2072656c6179466565616d6f756e7420697320746f6f206c617267652c2075696e74323536206f766572666c6f774c656e677468206f6620726563697069656e74416464727320646f65736e277420657175616c20746f206c656e677468206f6620726566756e644164647273696e76616c6964207472616e7366657220616d6f756e743a20707265636973696f6e206c6f737320696e20616d6f756e7420636f6e76657273696f6e536166654d6174683a206d756c7469706c69636174696f6e206f766572666c6f77726563656976656420424e4220616d6f756e742073686f756c64206265206e6f206c657373207468616e20746865206d696e696d756d2072656c6179466565746865206d6573736167652073656e646572206d75737420626520676f7665726e616e636520636f6e7472616374756e7265636f676e697a6564207472616e736665724f75742061636b207061636b6167654c656e677468206f6620726563697069656e74416464727320646f65736e277420657175616c20746f206c656e677468206f6620616d6f756e7473746865206d6573736167652073656e646572206d7573742062652063726f737320636861696e20636f6e7472616374746865206d73672073656e646572206d75737420626520746f6b656e4d616e6167657274686520636f6e7472616374206e6f7420696e69742079657400000000000000726563656976656420424e4220616d6f756e742073686f756c64206265206e6f206c657373207468616e207468652073756d206f66207472616e7366657220424e4220616d6f756e7420616e642072656c6179466565696e76616c696420726563656976656420424e4220616d6f756e743a20707265636973696f6e206c6f737320696e20616d6f756e7420636f6e76657273696f6ea2646970667358221220caf56ff249aa6ca806ce69d2892ae51c7530b6d135624d7e27646a2095f0b01964736f6c63430006040033", + }, + { + ContractAddr: StakingContract, + CommitUrl: "https://github.com/bnb-chain/bsc-genesis-contract/commit/9d45b31c12b2c04757284717f4351cb44e81a3a7", + Code: "60806040526004361061036f5760003560e01c806370fd5bad116101c6578063b88a802f116100f7578063d7ecfcb611610095578063f45fd80b1161006f578063f45fd80b14610bd0578063f9a2bbc714610c15578063fc3e590814610646578063fd6a687914610c2a57610376565b8063d7ecfcb6146107ee578063dc927faf14610ba6578063edc1a5b014610bbb57610376565b8063c2117d82116100d1578063c2117d8214610abc578063c81b166214610ad1578063c8509d8114610ae6578063d61b9b9314610b6b57610376565b8063b88a802f14610a57578063baaafd3b14610a6c578063bf8546ca14610a8157610376565b806396713da911610164578063a78abc161161013e578063a78abc1614610939578063ab51bb9614610962578063ac43175114610977578063b14315df14610a4257610376565b806396713da9146108fa5780639dc092621461090f578063a1a11bf51461092457610376565b806375d47a0a116101a057806375d47a0a1461084b5780637942fd0514610860578063831d65d11461087557806392b888a4146105f257610376565b806370fd5bad146107ee578063718a8aa81461080357806375aca5931461081857610376565b8063413d9c3a116102a05780635d17c8bd1161023e57806369b635b61161021857806369b635b61461075b5780636bd8f804146107705780636e47b482146107a65780636fb7f7eb146107bb57610376565b80635d17c8bd1461071c5780635d499b1b1461073157806362b171d21461074657610376565b80634bf6c8821161027a5780634bf6c882146106b15780634d99dd16146106c657806351e80672146106f2578063552aaf931461070757610376565b8063413d9c3a1461065b57806343756e5c14610670578063493279b11461068557610376565b8063151817e31161030d578063333ad3e7116102e7578063333ad3e71461061c57806334c43354146106315780633dffc387146105f25780633fdfa7e41461064657610376565b8063151817e3146105c757806317c9efb0146105f25780632fdeb1111461060757610376565b80630bee7a67116103495780630bee7a671461043b5780630e2374a5146104695780631182b8751461049a57806311fe9ec61461059457610376565b8063026e402b1461037b57806302985992146103a9578063047636d1146103d057610376565b3661037657005b600080fd5b6103a76004803603604081101561039157600080fd5b506001600160a01b038135169060200135610c3f565b005b3480156103b557600080fd5b506103be611141565b60408051918252519081900360200190f35b3480156103dc57600080fd5b50610403600480360360208110156103f357600080fd5b50356001600160a01b0316611147565b6040518082606080838360005b83811015610428578181015183820152602001610410565b5050505090500191505060405180910390f35b34801561044757600080fd5b5061045061119b565b6040805163ffffffff9092168252519081900360200190f35b34801561047557600080fd5b5061047e6111a0565b604080516001600160a01b039092168252519081900360200190f35b3480156104a657600080fd5b5061051f600480360360408110156104bd57600080fd5b60ff8235169190810190604081016020820135600160201b8111156104e157600080fd5b8201836020820111156104f357600080fd5b803590602001918460018302840111600160201b8311171561051457600080fd5b5090925090506111a6565b6040805160208082528351818301528351919283929083019185019080838360005b83811015610559578181015183820152602001610541565b50505050905090810190601f1680156105865780820380516001836020036101000a031916815260200191505b509250505060405180910390f35b3480156105a057600080fd5b506103be600480360360208110156105b757600080fd5b50356001600160a01b031661135c565b3480156105d357600080fd5b506105dc611377565b6040805160ff9092168252519081900360200190f35b3480156105fe57600080fd5b506105dc61137c565b34801561061357600080fd5b506103be611381565b34801561062857600080fd5b50610450611387565b34801561063d57600080fd5b506103be61138c565b34801561065257600080fd5b506105dc611397565b34801561066757600080fd5b506103be61139c565b34801561067c57600080fd5b5061047e6113a3565b34801561069157600080fd5b5061069a6113a9565b6040805161ffff9092168252519081900360200190f35b3480156106bd57600080fd5b506105dc6113ae565b6103a7600480360360408110156106dc57600080fd5b506001600160a01b0381351690602001356113b3565b3480156106fe57600080fd5b5061047e61199a565b34801561071357600080fd5b506105dc6119a0565b34801561072857600080fd5b506103be6119a5565b34801561073d57600080fd5b506103be6119ab565b34801561075257600080fd5b506103be6119b4565b34801561076757600080fd5b506103be611aec565b6103a76004803603606081101561078657600080fd5b506001600160a01b03813581169160208101359091169060400135611af2565b3480156107b257600080fd5b5061047e612160565b3480156107c757600080fd5b506103be600480360360208110156107de57600080fd5b50356001600160a01b0316612166565b3480156107fa57600080fd5b506105dc612181565b34801561080f57600080fd5b506105dc612186565b34801561082457600080fd5b506103be6004803603602081101561083b57600080fd5b50356001600160a01b031661218b565b34801561085757600080fd5b5061047e6121a6565b34801561086c57600080fd5b506105dc6121ac565b34801561088157600080fd5b506103a76004803603604081101561089857600080fd5b60ff8235169190810190604081016020820135600160201b8111156108bc57600080fd5b8201836020820111156108ce57600080fd5b803590602001918460018302840111600160201b831117156108ef57600080fd5b5090925090506121b1565b34801561090657600080fd5b506105dc612473565b34801561091b57600080fd5b5061047e612478565b34801561093057600080fd5b5061047e61247e565b34801561094557600080fd5b5061094e612484565b604080519115158252519081900360200190f35b34801561096e57600080fd5b506104506119a0565b34801561098357600080fd5b506103a76004803603604081101561099a57600080fd5b810190602081018135600160201b8111156109b457600080fd5b8201836020820111156109c657600080fd5b803590602001918460018302840111600160201b831117156109e757600080fd5b919390929091602081019035600160201b811115610a0457600080fd5b820183602082011115610a1657600080fd5b803590602001918460018302840111600160201b83111715610a3757600080fd5b50909250905061248d565b348015610a4e57600080fd5b506105dc612a8a565b348015610a6357600080fd5b506103be612a8f565b348015610a7857600080fd5b506103be612bc4565b348015610a8d57600080fd5b506103be60048036036040811015610aa457600080fd5b506001600160a01b0381358116916020013516612bcf565b348015610ac857600080fd5b506103be612bfc565b348015610add57600080fd5b5061047e612c02565b348015610af257600080fd5b506103a760048036036040811015610b0957600080fd5b60ff8235169190810190604081016020820135600160201b811115610b2d57600080fd5b820183602082011115610b3f57600080fd5b803590602001918460018302840111600160201b83111715610b6057600080fd5b509092509050612c08565b348015610b7757600080fd5b506103be60048036036040811015610b8e57600080fd5b506001600160a01b0381358116916020013516612e25565b348015610bb257600080fd5b5061047e612e50565b348015610bc757600080fd5b506103be612e56565b348015610bdc57600080fd5b506103be60048036036060811015610bf357600080fd5b506001600160a01b038135811691602081013582169160409091013516612e63565b348015610c2157600080fd5b5061047e612e98565b348015610c3657600080fd5b5061047e612e9e565b60105460ff1660021415610c8b576040805162461bcd60e51b815260206004820152600e60248201526d4e6f2072652d656e7472616e637960901b604482015290519081900360640190fd5b6010805460ff19166002179055806402540be4003406158015610cb357506402540be4008106155b610d04576040805162461bcd60e51b815260206004820152601c60248201527f707265636973696f6e206c6f737320696e20636f6e76657273696f6e00000000604482015290519081900360640190fd5b60005460ff16610d41576638d7ea4c6800006001908155662386f26fc1000060025568056bc75e2d631000006003556000805460ff191690911790555b600354821015610d98576040805162461bcd60e51b815260206004820152601760248201527f696e76616c69642064656c656761746520616d6f756e74000000000000000000604482015290519081900360640190fd5b600154610dac90839063ffffffff612ea416565b341015610df7576040805162461bcd60e51b81526020600482015260146024820152736e6f7420656e6f756768206d73672076616c756560601b604482015290519081900360640190fd5b60405133906108fc9060009081818181818888f19350505050610e55576040805162461bcd60e51b815260206004820152601160248201527034b73b30b634b2103232b632b3b0ba37b960791b604482015290519081900360640190fd5b6000610e6c836402540be40063ffffffff612f0516565b90506000610e80348563ffffffff612f4716565b90506000610e9960025483612f4790919063ffffffff16565b6040805160038082526080820190925291925060609190816020015b6060815260200190600190039081610eb5579050509050610ed533612f89565b81600081518110610ee257fe5b6020026020010181905250610eff876001600160a01b0316612f89565b81600181518110610f0c57fe5b6020026020010181905250610f2084612fac565b81600281518110610f2d57fe5b60200260200101819052506060610f4d6001610f4884612fbf565b613049565b8051602080830191909120600f80546000908152600a845260408082209390935581546001908101909255338152600b909352912080549091019055905061200063f7a251d7601083610fab876402540be40063ffffffff612f0516565b6040518463ffffffff1660e01b8152600401808460ff1660ff16815260200180602001838152602001828103825284818151815260200191508051906020019080838360005b83811015611009578181015183820152602001610ff1565b50505050905090810190601f1680156110365780820380516001836020036101000a031916815260200191505b50945050505050600060405180830381600087803b15801561105757600080fd5b505af115801561106b573d6000803e3d6000fd5b5061100492506108fc91506110889050898663ffffffff612ea416565b6040518115909202916000818181858888f193505050501580156110b0573d6000803e3d6000fd5b506002546040516110029180156108fc02916000818181858888f193505050501580156110e1573d6000803e3d6000fd5b50604080518881526020810185905281516001600160a01b038b169233927f5f32ed2794e2e72d19e3cb2320e8820a499c4204887372beba51f5e61c040867929081900390910190a350506010805460ff19166001179055505050505050565b60035481565b61114f614dd9565b611157614dd9565b6001600160a01b0383166000818152600b60209081526040808320548552838352600c82528083205485830152928252600d9052819020549082015290505b919050565b606481565b61200181565b606033612000146111e85760405162461bcd60e51b815260040180806020018281038252602f815260200180614f97602f913960400191505060405180910390fd5b60005460ff16611225576638d7ea4c6800006001908155662386f26fc1000060025568056bc75e2d631000006003556000805460ff191690911790555b61122d614df7565b61127461126f85858080601f0160208091040260200160405190810160405280939291908181526020018383808284376000920191909152506130cf92505050565b6130f4565b905060006112896112848361313e565b61318c565b90506000606060ff8316600414156112ae576112a484613243565b9092509050611305565b60ff8316600514156112c3576112a484613423565b6040805162461bcd60e51b8152602060048201526012602482015271756e6b6e6f776e206576656e74207479706560701b604482015290519081900360640190fd5b63ffffffff821615611351576040805163ffffffff84168152905160ff8516917f391d6e5ea6ab6c49b9a0abb1782cae5def8d711f973b00c729658c0b2a80b31b919081900360200190a25b979650505050505050565b6001600160a01b031660009081526006602052604090205490565b600581565b600181565b60015481565b606581565b662386f26fc1000081565b600381565b620a8c0081565b61100181565b606181565b600881565b60105460ff16600214156113ff576040805162461bcd60e51b815260206004820152600e60248201526d4e6f2072652d656e7472616e637960901b604482015290519081900360640190fd5b6010805460ff19166002179055806402540be400340615801561142757506402540be4008106155b611478576040805162461bcd60e51b815260206004820152601c60248201527f707265636973696f6e206c6f737320696e20636f6e76657273696f6e00000000604482015290519081900360640190fd5b60005460ff166114b5576638d7ea4c6800006001908155662386f26fc1000060025568056bc75e2d631000006003556000805460ff191690911790555b600154341015611503576040805162461bcd60e51b81526020600482015260146024820152736e6f7420656e6f7567682072656c61792066656560601b604482015290519081900360640190fd5b6003548210156115be576002548211611556576040805162461bcd60e51b815260206004820152601060248201526f6e6f7420656e6f7567682066756e647360801b604482015290519081900360640190fd5b3360009081526005602090815260408083206001600160a01b038716845290915290205482146115be576040805162461bcd60e51b815260206004820152600e60248201526d1a5b9d985b1a5908185b5bdd5b9d60921b604482015290519081900360640190fd5b3360009081526005602090815260408083206001600160a01b0387168452909152902054821115611627576040805162461bcd60e51b815260206004820152600e60248201526d1a5b9d985b1a5908185b5bdd5b9d60921b604482015290519081900360640190fd5b3360009081526007602090815260408083206001600160a01b038716845290915290205442101561169f576040805162461bcd60e51b815260206004820152601a60248201527f70656e64696e6720756e64656c65676174696f6e206578697374000000000000604482015290519081900360640190fd5b60006116b6836402540be40063ffffffff612f0516565b60025490915034906000906116d290839063ffffffff612f4716565b6040805160038082526080820190925291925060609190816020015b60608152602001906001900390816116ee57905050905061170e33612f89565b8160008151811061171b57fe5b6020026020010181905250611738876001600160a01b0316612f89565b8160018151811061174557fe5b602002602001018190525061175984612fac565b8160028151811061176657fe5b602002602001018190525060606117816002610f4884612fbf565b8051602080830191909120600f80546000908152600a845260408082209390935581546001908101909255338152600c90935291208054909101905590506117d242620a8c0063ffffffff612ea416565b3360009081526007602090815260408083206001600160a01b038d16845290915290205561200063f7a251d7601083611816876402540be40063ffffffff612f0516565b6040518463ffffffff1660e01b8152600401808460ff1660ff16815260200180602001838152602001828103825284818151815260200191508051906020019080838360005b8381101561187457818101518382015260200161185c565b50505050905090810190601f1680156118a15780820380516001836020036101000a031916815260200191505b50945050505050600060405180830381600087803b1580156118c257600080fd5b505af11580156118d6573d6000803e3d6000fd5b5050604051611004925085156108fc02915085906000818181858888f19350505050158015611909573d6000803e3d6000fd5b506002546040516110029180156108fc02916000818181858888f1935050505015801561193a573d6000803e3d6000fd5b50604080518881526020810185905281516001600160a01b038b169233927fdf0b6ac27f3f3bb31cee3dab0f4fe40cc19c6a3f8daaec52e06b261e58a12519929081900390910190a350506010805460ff19166001179055505050505050565b61200081565b600081565b60025481565b6402540be40081565b60105460009060ff1660021415611a03576040805162461bcd60e51b815260206004820152600e60248201526d4e6f2072652d656e7472616e637960901b604482015290519081900360640190fd5b506010805460ff191660021790553360009081526008602052604090205480611a6a576040805162461bcd60e51b81526020600482015260146024820152736e6f20756e64656c6567617465642066756e647360601b604482015290519081900360640190fd5b336000818152600860205260408082208290555183156108fc0291849190818181858888f19350505050158015611aa5573d6000803e3d6000fd5b5060408051828152905133917fc712d133b8d448221aaed2198ed1f0db6dfc860fb01bc3a630916fe6cbef946f919081900360200190a26010805460ff1916600117905590565b60035490565b60105460ff1660021415611b3e576040805162461bcd60e51b815260206004820152600e60248201526d4e6f2072652d656e7472616e637960901b604482015290519081900360640190fd5b6010805460ff19166002179055806402540be4003406158015611b6657506402540be4008106155b611bb7576040805162461bcd60e51b815260206004820152601c60248201527f707265636973696f6e206c6f737320696e20636f6e76657273696f6e00000000604482015290519081900360640190fd5b60005460ff16611bf4576638d7ea4c6800006001908155662386f26fc1000060025568056bc75e2d631000006003556000805460ff191690911790555b826001600160a01b0316846001600160a01b03161415611c52576040805162461bcd60e51b815260206004820152601460248201527334b73b30b634b2103932b232b632b3b0ba34b7b760611b604482015290519081900360640190fd5b600154341015611ca0576040805162461bcd60e51b81526020600482015260146024820152736e6f7420656e6f7567682072656c61792066656560601b604482015290519081900360640190fd5b6003548210158015611cd557503360009081526005602090815260408083206001600160a01b03881684529091529020548211155b611d17576040805162461bcd60e51b815260206004820152600e60248201526d1a5b9d985b1a5908185b5bdd5b9d60921b604482015290519081900360640190fd5b3360009081526009602090815260408083206001600160a01b03888116855290835281842090871684529091529020544210801590611d8557503360009081526009602090815260408083206001600160a01b03878116855290835281842090881684529091529020544210155b611dd6576040805162461bcd60e51b815260206004820152601a60248201527f70656e64696e6720726564656c65676174696f6e206578697374000000000000604482015290519081900360640190fd5b6000611ded836402540be40063ffffffff612f0516565b6002549091503490600090611e0990839063ffffffff612f4716565b60408051600480825260a0820190925291925060609190816020015b6060815260200190600190039081611e25579050509050611e4533612f89565b81600081518110611e5257fe5b6020026020010181905250611e6f886001600160a01b0316612f89565b81600181518110611e7c57fe5b6020026020010181905250611e99876001600160a01b0316612f89565b81600281518110611ea657fe5b6020026020010181905250611eba84612fac565b81600381518110611ec757fe5b60200260200101819052506060611ee26003610f4884612fbf565b8051602080830191909120600f80546000908152600a845260408082209390935581546001908101909255338152600d9093529120805490910190559050611f3342620a8c0063ffffffff612ea416565b3360009081526009602090815260408083206001600160a01b038d81168552908352818420908e168452909152902055611f7642620a8c0063ffffffff612ea416565b3360009081526009602090815260408083206001600160a01b038e81168552908352818420908d16845290915290205561200063f7a251d7601083611fc6876402540be40063ffffffff612f0516565b6040518463ffffffff1660e01b8152600401808460ff1660ff16815260200180602001838152602001828103825284818151815260200191508051906020019080838360005b8381101561202457818101518382015260200161200c565b50505050905090810190601f1680156120515780820380516001836020036101000a031916815260200191505b50945050505050600060405180830381600087803b15801561207257600080fd5b505af1158015612086573d6000803e3d6000fd5b5050604051611004925085156108fc02915085906000818181858888f193505050501580156120b9573d6000803e3d6000fd5b506002546040516110029180156108fc02916000818181858888f193505050501580156120ea573d6000803e3d6000fd5b50876001600160a01b0316896001600160a01b0316336001600160a01b03167fdb0d03fdfcb145c486c442659e6a341a8828985505097cb5190afcf541e840158a87604051808381526020018281526020019250505060405180910390a450506010805460ff1916600117905550505050505050565b61100581565b6001600160a01b031660009081526004602052604090205490565b600281565b601081565b6001600160a01b031660009081526008602052604090205490565b61100881565b600b81565b33612000146121f15760405162461bcd60e51b815260040180806020018281038252602f815260200180614f97602f913960400191505060405180910390fd5b60005460ff1661222e576638d7ea4c6800006001908155662386f26fc1000060025568056bc75e2d631000006003556000805460ff191690911790555b612236614df7565b61227861126f84848080601f0160208091040260200160405190810160405280939291908181526020018383808284376000920191909152506130cf92505050565b905060008060606000805b61228c8661363f565b156122f757806122a9576122a26112848761313e565b94506122ef565b80600114156122c5576122be6112848761313e565b93506122ef565b80600214156122ea576122df6122da8761313e565b613660565b9250600191506122ef565b6122f7565b600101612283565b8161233d576040805162461bcd60e51b81526020600482015260116024820152701c9b1c08191958dbd9194819985a5b1959607a1b604482015290519081900360640190fd5b612346836136d0565b612389576040805162461bcd60e51b815260206004820152600f60248201526e0eee4dedcce40e0c2c6d640d0c2e6d608b1b604482015290519081900360640190fd5b61239561126f846130cf565b955060006123a56112848861313e565b90506123af614df7565b6123b88861363f565b156123db576123d461126f6123cf6122da8b61313e565b6130cf565b905061241c565b6040805162461bcd60e51b8152602060048201526011602482015270656d7074792061636b207061636b61676560781b604482015290519081900360640190fd5b60ff82166001141561243857612433818888613724565b612466565b60ff82166002141561244f57612433818888613adb565b60ff8216600314156112c357612433818888613da8565b5050505050505050505050565b600981565b61100781565b61100681565b60005460ff1681565b60005460ff166124e4576040805162461bcd60e51b815260206004820152601960248201527f74686520636f6e7472616374206e6f7420696e69742079657400000000000000604482015290519081900360640190fd5b33611007146125245760405162461bcd60e51b815260040180806020018281038252602e815260200180614f0d602e913960400191505060405180910390fd5b61258584848080601f0160208091040260200160405190810160405280939291908181526020018383808284376000920191909152505060408051808201909152600a81526972656c6179657246656560b01b6020820152915061410d9050565b156126eb57602081146125df576040805162461bcd60e51b815260206004820152601d60248201527f6c656e677468206f662072656c61796572466565206d69736d61746368000000604482015290519081900360640190fd5b604080516020601f840181900481028201810190925282815260009161261d918585808385018382808284376000920191909152506141f492505050565b9050600354811061265f5760405162461bcd60e51b815260040180806020018281038252602e815260200180614f3b602e913960400191505060405180910390fd5b600254811161269f5760405162461bcd60e51b815260040180806020018281038252602e815260200180614f69602e913960400191505060405180910390fd5b6402540be4008106156126e35760405162461bcd60e51b815260040180806020018281038252602c815260200180614ee1602c913960400191505060405180910390fd5b6001556129f8565b61274f84848080601f0160208091040260200160405190810160405280939291908181526020018383808284376000920191909152505060408051808201909152600d81526c62534352656c6179657246656560981b6020820152915061410d9050565b1561287557602081146127a9576040805162461bcd60e51b815260206004820181905260248201527f6c656e677468206f662062534352656c61796572466565206d69736d61746368604482015290519081900360640190fd5b604080516020601f84018190048102820181019092528281526000916127e7918585808385018382808284376000920191909152506141f492505050565b905060015481106128295760405162461bcd60e51b815260040180806020018281038252602e815260200180614e61602e913960400191505060405180910390fd5b6402540be40081061561286d5760405162461bcd60e51b815260040180806020018281038252602f815260200180614e32602f913960400191505060405180910390fd5b6002556129f8565b6128d984848080601f0160208091040260200160405190810160405280939291908181526020018383808284376000920191909152505060408051808201909152600d81526c36b4b72232b632b3b0ba34b7b760991b6020820152915061410d9050565b156129bb5760208114612933576040805162461bcd60e51b815260206004820181905260248201527f6c656e677468206f66206d696e44656c65676174696f6e206d69736d61746368604482015290519081900360640190fd5b604080516020601f8401819004810282018101909252828152600091612971918585808385018382808284376000920191909152506141f492505050565b905060015481116129b35760405162461bcd60e51b8152600401808060200182810382526031815260200180614e8f6031913960400191505060405180910390fd5b6003556129f8565b6040805162461bcd60e51b815260206004820152600d60248201526c756e6b6e6f776e20706172616d60981b604482015290519081900360640190fd5b7f6cdb0ac70ab7f2e2d035cca5be60d89906f2dede7648ddbd7402189c1eeed17a848484846040518080602001806020018381038352878782818152602001925080828437600083820152601f01601f191690910184810383528581526020019050858580828437600083820152604051601f909101601f19169092018290039850909650505050505050a150505050565b600481565b60105460009060ff1660021415612ade576040805162461bcd60e51b815260206004820152600e60248201526d4e6f2072652d656e7472616e637960901b604482015290519081900360640190fd5b506010805460ff191660021790553360009081526006602052604090205480612b42576040805162461bcd60e51b81526020600482015260116024820152701b9bc81c195b991a5b99c81c995dd85c99607a1b604482015290519081900360640190fd5b336000818152600660205260408082208290555183156108fc0291849190818181858888f19350505050158015612b7d573d6000803e3d6000fd5b5060408051828152905133917f83b78188b13346b2ffb484da70d42ee27de7fbf9f2bd8045269e10ed643ccd76919081900360200190a26010805460ff1916600117905590565b6638d7ea4c68000081565b6001600160a01b038083166000908152600760209081526040808320938516835292905220545b92915050565b60015490565b61100281565b3361200014612c485760405162461bcd60e51b815260040180806020018281038252602f815260200180614f97602f913960400191505060405180910390fd5b60005460ff16612c85576638d7ea4c6800006001908155662386f26fc1000060025568056bc75e2d631000006003556000805460ff191690911790555b612cc482828080601f0160208091040260200160405190810160405280939291908181526020018383808284376000920191909152506136d092505050565b612d07576040805162461bcd60e51b815260206004820152600f60248201526e0eee4dedcce40e0c2c6d640d0c2e6d608b1b604482015290519081900360640190fd5b612d0f614df7565b612d5161126f84848080601f0160208091040260200160405190810160405280939291908181526020018383808284376000920191909152506130cf92505050565b90506000612d616112848361313e565b9050612d6b614df7565b612d748361363f565b15612d9257612d8b61126f6123cf6122da8661313e565b9050612dd8565b6040805162461bcd60e51b8152602060048201526016602482015275656d707479206661696c2061636b207061636b61676560501b604482015290519081900360640190fd5b60ff821660011415612df257612ded816141f9565b612e1c565b60ff821660021415612e0757612ded81614413565b60ff8216600314156112c357612ded8161453d565b5050505b505050565b6001600160a01b03918216600090815260056020908152604080832093909416825291909152205490565b61100381565b68056bc75e2d6310000081565b6001600160a01b0392831660009081526009602090815260408083209486168352938152838220929094168152925290205490565b61100081565b61100481565b600082820183811015612efe576040805162461bcd60e51b815260206004820152601b60248201527f536166654d6174683a206164646974696f6e206f766572666c6f770000000000604482015290519081900360640190fd5b9392505050565b6000612efe83836040518060400160405280601a81526020017f536166654d6174683a206469766973696f6e206279207a65726f0000000000008152506146a2565b6000612efe83836040518060400160405280601e81526020017f536166654d6174683a207375627472616374696f6e206f766572666c6f770000815250614744565b60408051600560a21b8318601482015260348101909152606090612efe8161479e565b6060612bf6612fba836147f4565b61479e565b6060815160001415612fe05750604080516000815260208101909152611196565b606082600081518110612fef57fe5b602002602001015190506000600190505b8351811015613030576130268285838151811061301957fe5b60200260200101516148da565b9150600101613000565b50612efe613043825160c060ff16614957565b826148da565b6040805160028082526060828101909352829190816020015b60608152602001906001900390816130625790505090506130858460ff16612fac565b8160008151811061309257fe5b60200260200101819052506130a68361479e565b816001815181106130b357fe5b60200260200101819052506130c781612fbf565b949350505050565b6130d7614e17565b506040805180820190915281518152602082810190820152919050565b6130fc614df7565b61310582614a4f565b61310e57600080fd5b600061311d8360200151614a89565b60208085015160408051808201909152868152920190820152915050919050565b613146614e17565b61314f8261363f565b61315857600080fd5b6020820151600061316882614aec565b80830160209586015260408051808201909152908152938401919091525090919050565b8051600090158015906131a157508151602110155b6131aa57600080fd5b60006131b98360200151614a89565b90508083600001511015613214576040805162461bcd60e51b815260206004820152601a60248201527f6c656e677468206973206c657373207468616e206f6666736574000000000000604482015290519081900360640190fd5b82516020808501518301805192849003929183101561323a57826020036101000a820491505b50949350505050565b600060606000806000805b6132578761363f565b156132a957826132795761327261326d8861313e565b614c1f565b915061329e565b82600114156132995761328e6112848861313e565b90506001935061329e565b6132a9565b82600101925061324e565b836132ef576040805162461bcd60e51b81526020600482015260116024820152701c9b1c08191958dbd9194819985a5b1959607a1b604482015290519081900360640190fd5b60006110046001600160a01b031663727be1f8836040518263ffffffff1660e01b815260040180828152602001915050602060405180830381600087803b15801561333957600080fd5b505af115801561334d573d6000803e3d6000fd5b505050506040513d602081101561336357600080fd5b50519050806133885761337a600484846065614c39565b96509650505050505061341e565b6001600160a01b0383166000908152600660205260409020546133b1908363ffffffff612ea416565b6001600160a01b038416600081815260066020908152604091829020939093558051858152905191927f7cc266c7b444f808013fa187f7b904d470a051a6564e78f482aa496581ba4bf892918290030190a260408051600080825260208201909252909750955050505050505b915091565b6000606060008060008060005b6134398861363f565b156134a257836134565761344f61326d8961313e565b9250613497565b83600114156134725761346b61326d8961313e565b9150613497565b8360021415613492576134876112848961313e565b905060019450613497565b6134a2565b836001019350613430565b846134e8576040805162461bcd60e51b81526020600482015260116024820152701c9b1c08191958dbd9194819985a5b1959607a1b604482015290519081900360640190fd5b60006110046001600160a01b031663727be1f8836040518263ffffffff1660e01b815260040180828152602001915050602060405180830381600087803b15801561353257600080fd5b505af1158015613546573d6000803e3d6000fd5b505050506040513d602081101561355c57600080fd5b505190508061358257613573600585846065614c39565b9750975050505050505061341e565b6001600160a01b0380851660008181526007602090815260408083209488168352938152838220829055918152600890915220546135c6908363ffffffff612ea416565b6001600160a01b0380861660008181526008602090815260409182902094909455805186815290519287169391927f35a799836f74fac7eccf5c73902823b970543d2274d3b93d8da3d37a255772a2929181900390910190a3604080516000808252602082019092529098509650505050505050915091565b6000613649614e17565b505080518051602091820151919092015191011190565b805160609061366e57600080fd5b600061367d8360200151614a89565b83516040805191839003808352601f19601f82011683016020019091529192506060908280156136b4576020820181803683370190505b509050600081602001905061323a848760200151018285614d35565b8051602080830191909120600e546000908152600a90925260408220548082146136ff57600092505050611196565b5050600e80546000908152600a60205260408120558054600190810190915592915050565b60008060008060005b6137368861363f565b1561379f57836137535761374c61326d8961313e565b9250613794565b836001141561376f5761376861326d8961313e565b9150613794565b836002141561378f576137846112848961313e565b905060019450613794565b61379f565b83600101935061372d565b846137e5576040805162461bcd60e51b81526020600482015260116024820152701c9b1c08191958dbd9194819985a5b1959607a1b604482015290519081900360640190fd5b60006137fc826402540be40063ffffffff614d8016565b6001600160a01b0385166000908152600b602052604090208054600019019055905060ff8816600114156139425760ff87161561386f576040805162461bcd60e51b815260206004820152600c60248201526b77726f6e672073746174757360a01b604482015290519081900360640190fd5b6001600160a01b038416600090815260046020526040902054613898908263ffffffff612ea416565b6001600160a01b038086166000908152600460209081526040808320949094556005815283822092871682529190915220546138da908263ffffffff612ea416565b6001600160a01b038086166000818152600560209081526040808320948916808452948252918290209490945580518581529051929391927f9a57c81564ab02642f34fd87e41baa9b074c18342cec3b7268b62bf752018fd1929181900390910190a3613ad0565b60ff8816613a94576001600160a01b038416600090815260086020526040902054613973908263ffffffff612ea416565b6001600160a01b0385166000908152600860209081526040808320939093558251630e4f7c3f60e31b81526004810185905292516110049363727be1f89360248083019493928390030190829087803b1580156139cf57600080fd5b505af11580156139e3573d6000803e3d6000fd5b505050506040513d60208110156139f957600080fd5b5051613a42576040805162461bcd60e51b81526020600482015260136024820152721dda5d1a191c985dc8189b988819985a5b1959606a1b604482015290519081900360640190fd5b6040805182815260ff8916602082015281516001600160a01b0380871693908816927fcbd481ae600289fad8c0484d07ce0ffe4f010d7c844ecfdeaf2a13fead52886e929081900390910190a3613ad0565b6040805162461bcd60e51b815260206004820152600c60248201526b77726f6e672073746174757360a01b604482015290519081900360640190fd5b505050505050505050565b60008060008060005b613aed8861363f565b15613b565783613b0a57613b0361326d8961313e565b9250613b4b565b8360011415613b2657613b1f61326d8961313e565b9150613b4b565b8360021415613b4657613b3b6112848961313e565b905060019450613b4b565b613b56565b836001019350613ae4565b84613b9c576040805162461bcd60e51b81526020600482015260116024820152701c9b1c08191958dbd9194819985a5b1959607a1b604482015290519081900360640190fd5b6000613bb3826402540be40063ffffffff614d8016565b6001600160a01b0385166000908152600c602052604090208054600019019055905060ff881660011415613d325760ff871615613c26576040805162461bcd60e51b815260206004820152600c60248201526b77726f6e672073746174757360a01b604482015290519081900360640190fd5b6001600160a01b038416600090815260046020526040902054613c4f908263ffffffff612f4716565b6001600160a01b03808616600090815260046020908152604080832094909455600581528382209287168252919091522054613c91908263ffffffff612f4716565b6001600160a01b03808616600090815260056020908152604080832093881683529290522055613cca42620a8c0063ffffffff612ea416565b6001600160a01b038086166000818152600760209081526040808320948916808452948252918290209490945580518581529051929391927fd6f878a5bcbbe79a64e6418bb0d56aaa20b9a60587d45749819df88dfc7c3c44929181900390910190a3613ad0565b60ff8816613a94576001600160a01b03808516600081815260076020908152604080832094881680845294825280832092909255815185815260ff8c169181019190915281517f4417d10c1e33efa83a770b8d4f47176e78c08c1298d534901ad3b16bb585fa2e929181900390910190a3613ad0565b6000806000806000805b613dbb8961363f565b15613e405784613dd857613dd161326d8a61313e565b9350613e35565b8460011415613df457613ded61326d8a61313e565b9250613e35565b8460021415613e1057613e0961326d8a61313e565b9150613e35565b8460031415613e3057613e256112848a61313e565b905060019550613e35565b613e40565b846001019450613db2565b85613e86576040805162461bcd60e51b81526020600482015260116024820152701c9b1c08191958dbd9194819985a5b1959607a1b604482015290519081900360640190fd5b6000613e9d826402540be40063ffffffff614d8016565b6001600160a01b0386166000908152600d602052604090208054600019019055905060ff8916600114156140705760ff881615613f10576040805162461bcd60e51b815260206004820152600c60248201526b77726f6e672073746174757360a01b604482015290519081900360640190fd5b6001600160a01b03808616600090815260056020908152604080832093881683529290522054613f46908263ffffffff612f4716565b6001600160a01b03868116600090815260056020908152604080832089851684529091528082209390935590851681522054613f829082612ea4565b6001600160a01b03808716600090815260056020908152604080832093881683529290522055613fbb42620a8c0063ffffffff612ea416565b6001600160a01b0380871660009081526009602090815260408083208985168452825280832093881683529290522055613ffe42620a8c0063ffffffff612ea416565b6001600160a01b038087166000818152600960209081526040808320898616808552908352818420958b1680855295835292819020959095558451868152945191947f78bffae3f8c6691ac7fc1a3bff800cb2d612f5ad9ae5b0444cfe2eb15c189e18929081900390910190a4614101565b60ff8916613a94576001600160a01b038581166000818152600960209081526040808320898616808552818452828520968a16808652968452828520859055908352818420818552835281842093909355805186815260ff8e169281019290925280519293927fb93bee5c59f85ede6b074a99f4ffcd3e3fc0d5c3d8156de331de89a49e0ce77c9281900390910190a45b50505050505050505050565b6000816040516020018082805190602001908083835b602083106141425780518252601f199092019160209182019101614123565b6001836020036101000a03801982511681845116808217855250505050505090500191505060405160208183030381529060405280519060200120836040516020018082805190602001908083835b602083106141b05780518252601f199092019160209182019101614191565b6001836020036101000a0380198251168184511680821785525050505050509050019150506040516020818303038152906040528051906020012014905092915050565b015190565b60008060008060005b61420b8661363f565b1561427457836142285761422161326d8761313e565b9250614269565b83600114156142445761423d61326d8761313e565b9150614269565b8360021415614264576142596112848761313e565b905060019450614269565b614274565b836001019350614202565b846142ba576040805162461bcd60e51b81526020600482015260116024820152701c9b1c08191958dbd9194819985a5b1959607a1b604482015290519081900360640190fd5b60006142d1826402540be40063ffffffff614d8016565b6001600160a01b0385166000908152600b602090815260408083208054600019019055600890915290205490915061430f908263ffffffff612ea416565b6001600160a01b0385166000908152600860209081526040808320939093558251630e4f7c3f60e31b81526004810185905292516110049363727be1f89360248083019493928390030190829087803b15801561436b57600080fd5b505af115801561437f573d6000803e3d6000fd5b505050506040513d602081101561439557600080fd5b50516143de576040805162461bcd60e51b81526020600482015260136024820152721dda5d1a191c985dc8189b988819985a5b1959606a1b604482015290519081900360640190fd5b6040516001907ff83de021914a4585482db5ca47d520a5657165b443fa2c7ef8ed4635f054da9b90600090a250505050505050565b60008060008060005b6144258661363f565b1561448e57836144425761443b61326d8761313e565b9250614483565b836001141561445e5761445761326d8761313e565b9150614483565b836002141561447e576144736112848761313e565b905060019450614483565b61448e565b83600101935061441c565b846144d4576040805162461bcd60e51b81526020600482015260116024820152701c9b1c08191958dbd9194819985a5b1959607a1b604482015290519081900360640190fd5b6001600160a01b038084166000908152600c60209081526040808320805460001901905560078252808320938616835292905281812081905590516002917ff83de021914a4585482db5ca47d520a5657165b443fa2c7ef8ed4635f054da9b91a2505050505050565b6000806000806000805b6145508761363f565b156145d5578461456d5761456661326d8861313e565b93506145ca565b84600114156145895761458261326d8861313e565b92506145ca565b84600214156145a55761459e61326d8861313e565b91506145ca565b84600314156145c5576145ba6112848861313e565b9050600195506145ca565b6145d5565b846001019450614547565b8561461b576040805162461bcd60e51b81526020600482015260116024820152701c9b1c08191958dbd9194819985a5b1959607a1b604482015290519081900360640190fd5b6001600160a01b038481166000908152600d602090815260408083208054600019019055600982528083208785168085528184528285209588168552948352818420849055825280832093835292905281812081905590516003917ff83de021914a4585482db5ca47d520a5657165b443fa2c7ef8ed4635f054da9b91a250505050505050565b6000818361472e5760405162461bcd60e51b81526004018080602001828103825283818151815260200191508051906020019080838360005b838110156146f35781810151838201526020016146db565b50505050905090810190601f1680156147205780820380516001836020036101000a031916815260200191505b509250505060405180910390fd5b50600083858161473a57fe5b0495945050505050565b600081848411156147965760405162461bcd60e51b81526020600482018181528351602484015283519092839260449091019190850190808383600083156146f35781810151838201526020016146db565b505050900390565b6060815160011480156147d05750607f60f81b826000815181106147be57fe5b01602001516001600160f81b03191611155b156147dc575080611196565b612bf66147ee8351608060ff16614957565b836148da565b604080516020808252818301909252606091829190602082018180368337505050602081018490529050600067ffffffffffffffff1984166148385750601861485c565b6fffffffffffffffffffffffffffffffff1984166148585750601061485c565b5060005b60208110156148925781818151811061487157fe5b01602001516001600160f81b0319161561488a57614892565b60010161485c565b60008160200390506060816040519080825280601f01601f1916602001820160405280156148c7576020820181803683370190505b5080830196909652508452509192915050565b6060806040519050835180825260208201818101602087015b8183101561490b5780518352602092830192016148f3565b50855184518101855292509050808201602086015b81831015614938578051835260209283019201614920565b508651929092011591909101601f01601f191660405250905092915050565b60606801000000000000000083106149a7576040805162461bcd60e51b815260206004820152600e60248201526d696e70757420746f6f206c6f6e6760901b604482015290519081900360640190fd5b60408051600180825281830190925260609160208201818036833701905050905060378411614a015782840160f81b816000815181106149e357fe5b60200101906001600160f81b031916908160001a9053509050612bf6565b6060614a0c856147f4565b90508381510160370160f81b82600081518110614a2557fe5b60200101906001600160f81b031916908160001a905350614a4682826148da565b95945050505050565b8051600090614a6057506000611196565b6020820151805160001a9060c0821015614a7f57600092505050611196565b5060019392505050565b8051600090811a6080811015614aa3576000915050611196565b60b8811080614abe575060c08110801590614abe575060f881105b15614acd576001915050611196565b60c0811015614ae15760b519019050611196565b60f519019050611196565b80516000908190811a6080811015614b075760019150614c18565b60b8811015614b1c57607e1981019150614c18565b60c0811015614b9657600060b78203600186019550806020036101000a865104915060018101820193505080831015614b90576040805162461bcd60e51b81526020600482015260116024820152706164646974696f6e206f766572666c6f7760781b604482015290519081900360640190fd5b50614c18565b60f8811015614bab5760be1981019150614c18565b600060f78203600186019550806020036101000a865104915060018101820193505080831015614c16576040805162461bcd60e51b81526020600482015260116024820152706164646974696f6e206f766572666c6f7760781b604482015290519081900360640190fd5b505b5092915050565b8051600090601514614c3057600080fd5b612bf68261318c565b60006060614c52846402540be40063ffffffff612f0516565b60408051600480825260a0820190925291955060609190816020015b6060815260200190600190039081614c6e579050509050614c918760ff16612fac565b81600081518110614c9e57fe5b6020026020010181905250614cbb866001600160a01b0316612f89565b81600181518110614cc857fe5b6020026020010181905250614cdc85612fac565b81600281518110614ce957fe5b6020026020010181905250614d038463ffffffff16612fac565b81600381518110614d1057fe5b60200260200101819052506060614d2682612fbf565b94989497509395505050505050565b80614d3f57612e20565b5b60208110614d5f578251825260209283019290910190601f1901614d40565b915181516020939093036101000a6000190180199091169216919091179052565b600082614d8f57506000612bf6565b82820282848281614d9c57fe5b0414612efe5760405162461bcd60e51b8152600401808060200182810382526021815260200180614ec06021913960400191505060405180910390fd5b60405180606001604052806003906020820280368337509192915050565b6040518060400160405280614e0a614e17565b8152602001600081525090565b60405180604001604052806000815260200160008152509056fe7468652042534352656c61796572466565206d6f642074656e20646563696d616c73206d757374206265207a65726f7468652042534352656c61796572466565206d757374206265206c657373207468616e2072656c61796572466565746865206d696e44656c65676174696f6e206d7573742062652067726561746572207468616e2072656c61796572466565536166654d6174683a206d756c7469706c69636174696f6e206f766572666c6f777468652072656c61796572466565206d6f642074656e20646563696d616c73206d757374206265207a65726f746865206d6573736167652073656e646572206d75737420626520676f7665726e616e636520636f6e74726163747468652072656c61796572466565206d757374206265206c657373207468616e206d696e44656c65676174696f6e7468652072656c61796572466565206d757374206265206d6f7265207468616e2042534352656c61796572466565746865206d6573736167652073656e646572206d7573742062652063726f737320636861696e20636f6e7472616374a2646970667358221220066cc7c05d87e3cd20ecf3649deba7ac4ce8b389992969b6006793cbf330c73564736f6c63430006040033", + }, + }, + } + + gibbsUpgrade[rialtoNet] = &Upgrade{ + UpgradeName: "gibbs", + Configs: []*UpgradeConfig{ + { + ContractAddr: TokenHubContract, + CommitUrl: "https://github.com/bnb-chain/bsc-genesis-contract/commit/9d45b31c12b2c04757284717f4351cb44e81a3a7", + Code: "60806040526004361061036f5760003560e01c80639a854bbd116101c6578063bd466461116100f7578063f014847211610095578063fc1a598f1161006f578063fc1a598f14610d58578063fc3e590814610a3b578063fd6a687914610d8b578063ff9c0027146106ec576103b7565b8063f014847214610d19578063f9a2bbc714610d2e578063fa9e915914610d43576103b7565b8063d9e6dae9116100d1578063d9e6dae91461066e578063dc927faf14610cda578063e1c7392a14610cef578063ebf71d5314610d04576103b7565b8063bd46646114610c0d578063c81b166214610c40578063c8509d8114610c55576103b7565b8063aa7415f511610164578063b99328c51161013e578063b99328c514610b77578063b9fd21e314610bb0578063ba35ead614610bc5578063bbface1f14610bda576103b7565b8063aa7415f514610a50578063ab51bb9614610a97578063ac43175114610aac576103b7565b8063a1a11bf5116101a0578063a1a11bf5146109fc578063a496fba214610a11578063a78abc1614610a26578063a7c9f02d14610a3b576103b7565b80639a854bbd146109995780639a99b4f0146109ae5780639dc09262146109e7576103b7565b806361368475116102a0578063727be1f81161023e578063831d65d111610218578063831d65d1146108c05780638b87b21f146105ed5780638eff336c1461094557806396713da914610984576103b7565b8063727be1f81461086c57806375d47a0a146108965780637942fd05146108ab576103b7565b80636e47b4821161027a5780636e47b4821461082d57806370fd5bad146106ec578063718a8aa81461084257806371d3086314610857576103b7565b8063613684751461066e57806366dea52a146106ec5780636e05652014610701576103b7565b806343a368b91161030d57806350432d32116102e757806350432d321461068357806351e806721461069857806359b92789146106ad5780635d499b1b146106d7576103b7565b806343a368b91461062d578063493279b1146106425780634bf6c8821461066e576103b7565b8063149d14d911610349578063149d14d9146105155780633d7132231461053c5780633dffc387146105ed57806343756e5c14610618576103b7565b80630bee7a67146103bc5780630e2374a5146103ea5780631182b8751461041b576103b7565b366103b75734156103b5576040805133815234602082015281517f6c98249d85d88c3753a04a22230f595e4dc8d3dc86c34af35deeeedc861b89db929181900390910190a15b005b600080fd5b3480156103c857600080fd5b506103d1610da0565b6040805163ffffffff9092168252519081900360200190f35b3480156103f657600080fd5b506103ff610da5565b604080516001600160a01b039092168252519081900360200190f35b34801561042757600080fd5b506104a06004803603604081101561043e57600080fd5b60ff8235169190810190604081016020820135600160201b81111561046257600080fd5b82018360208201111561047457600080fd5b803590602001918460018302840111600160201b8311171561049557600080fd5b509092509050610dab565b6040805160208082528351818301528351919283929083019185019080838360005b838110156104da5781810151838201526020016104c2565b50505050905090810190601f1680156105075780820380516001836020036101000a031916815260200191505b509250505060405180910390f35b34801561052157600080fd5b5061052a610ed9565b60408051918252519081900360200190f35b34801561054857600080fd5b506103ff6004803603602081101561055f57600080fd5b810190602081018135600160201b81111561057957600080fd5b82018360208201111561058b57600080fd5b803590602001918460018302840111600160201b831117156105ac57600080fd5b91908080601f016020809104026020016040519081016040528093929190818152602001838380828437600092019190915250929550610edf945050505050565b3480156105f957600080fd5b50610602610f03565b6040805160ff9092168252519081900360200190f35b34801561062457600080fd5b506103ff610f08565b34801561063957600080fd5b5061052a610f0e565b34801561064e57600080fd5b50610657610f1a565b6040805161ffff9092168252519081900360200190f35b34801561067a57600080fd5b50610602610f20565b34801561068f57600080fd5b5061052a610f25565b3480156106a457600080fd5b506103ff610f30565b3480156106b957600080fd5b506103ff600480360360208110156106d057600080fd5b5035610f36565b3480156106e357600080fd5b5061052a610f51565b3480156106f857600080fd5b50610602610f5a565b6108196004803603608081101561071757600080fd5b810190602081018135600160201b81111561073157600080fd5b82018360208201111561074357600080fd5b803590602001918460208302840111600160201b8311171561076457600080fd5b919390929091602081019035600160201b81111561078157600080fd5b82018360208201111561079357600080fd5b803590602001918460208302840111600160201b831117156107b457600080fd5b919390929091602081019035600160201b8111156107d157600080fd5b8201836020820111156107e357600080fd5b803590602001918460208302840111600160201b8311171561080457600080fd5b91935091503567ffffffffffffffff16610f5f565b604080519115158252519081900360200190f35b34801561083957600080fd5b506103ff611434565b34801561084e57600080fd5b5061060261143a565b34801561086357600080fd5b5061052a61143f565b34801561087857600080fd5b506108196004803603602081101561088f57600080fd5b5035611445565b3480156108a257600080fd5b506103ff6114bf565b3480156108b757600080fd5b506106026114c5565b3480156108cc57600080fd5b506103b5600480360360408110156108e357600080fd5b60ff8235169190810190604081016020820135600160201b81111561090757600080fd5b82018360208201111561091957600080fd5b803590602001918460018302840111600160201b8311171561093a57600080fd5b5090925090506114ca565b34801561095157600080fd5b506103b56004803603606081101561096857600080fd5b508035906001600160a01b036020820135169060400135611613565b34801561099057600080fd5b50610602611699565b3480156109a557600080fd5b5061052a61169e565b3480156109ba57600080fd5b5061052a600480360360408110156109d157600080fd5b506001600160a01b0381351690602001356116aa565b3480156109f357600080fd5b506103ff6117e8565b348015610a0857600080fd5b506103ff6117ee565b348015610a1d57600080fd5b506106026117f4565b348015610a3257600080fd5b506108196117f9565b348015610a4757600080fd5b50610602611802565b61081960048036036080811015610a6657600080fd5b5080356001600160a01b03908116916020810135909116906040810135906060013567ffffffffffffffff16611807565b348015610aa357600080fd5b506103d16117f4565b348015610ab857600080fd5b506103b560048036036040811015610acf57600080fd5b810190602081018135600160201b811115610ae957600080fd5b820183602082011115610afb57600080fd5b803590602001918460018302840111600160201b83111715610b1c57600080fd5b919390929091602081019035600160201b811115610b3957600080fd5b820183602082011115610b4b57600080fd5b803590602001918460018302840111600160201b83111715610b6c57600080fd5b509092509050611ec8565b348015610b8357600080fd5b506103b560048036036040811015610b9a57600080fd5b50803590602001356001600160a01b0316612137565b348015610bbc57600080fd5b5061052a6121ad565b348015610bd157600080fd5b5061052a6121b7565b348015610be657600080fd5b5061052a60048036036020811015610bfd57600080fd5b50356001600160a01b03166121bd565b348015610c1957600080fd5b5061052a60048036036020811015610c3057600080fd5b50356001600160a01b03166121cf565b348015610c4c57600080fd5b506103ff6121ea565b348015610c6157600080fd5b506103b560048036036040811015610c7857600080fd5b60ff8235169190810190604081016020820135600160201b811115610c9c57600080fd5b820183602082011115610cae57600080fd5b803590602001918460018302840111600160201b83111715610ccf57600080fd5b5090925090506121f0565b348015610ce657600080fd5b506103ff6122c0565b348015610cfb57600080fd5b506103b56122c6565b348015610d1057600080fd5b50610602612366565b348015610d2557600080fd5b5061060261236b565b348015610d3a57600080fd5b506103ff612370565b348015610d4f57600080fd5b5061052a612376565b348015610d6457600080fd5b506104a060048036036020811015610d7b57600080fd5b50356001600160a01b031661237c565b348015610d9757600080fd5b506103ff6124a3565b606481565b61200181565b60005460609060ff16610df3576040805162461bcd60e51b81526020600482015260196024820152600080516020614906833981519152604482015290519081900360640190fd5b3361200014610e335760405162461bcd60e51b815260040180806020018281038252602f8152602001806148b4602f913960400191505060405180910390fd5b60ff841660021415610e8557610e7e83838080601f0160208091040260200160405190810160405280939291908181526020018383808284376000920191909152506124a992505050565b9050610ed2565b6040805162461bcd60e51b815260206004820152601860248201527f756e7265636f676e697a65642073796e207061636b6167650000000000000000604482015290519081900360640190fd5b9392505050565b60015490565b6020818101516000908152600490915260409020546001600160a01b03165b919050565b600181565b61100181565b670de0b6b3a764000081565b6102ca81565b600881565b66071afd498d000081565b61200081565b6000908152600460205260409020546001600160a01b031690565b6402540be40081565b600281565b6000805460ff16610fa5576040805162461bcd60e51b81526020600482015260196024820152600080516020614906833981519152604482015290519081900360640190fd5b868514610fe35760405162461bcd60e51b815260040180806020018281038252603b815260200180614879603b913960400191505060405180910390fd5b8683146110215760405162461bcd60e51b815260040180806020018281038252603f81526020018061474c603f913960400191505060405180910390fd5b426078018267ffffffffffffffff16101561106d5760405162461bcd60e51b815260040180806020018281038252602481526020018061463c6024913960400191505060405180910390fd5b6402540be4003406156110b15760405162461bcd60e51b815260040180806020018281038252604081526020018061497c6040913960400191505060405180910390fd5b604080518681526020808802820101909152859060009081906060908480156110e4578160200160208202803683370190505b50905060005b848110156111bf576402540be4008b8b8381811061110457fe5b905060200201358161111257fe5b061561114f5760405162461bcd60e51b815260040180806020018281038252603c81526020018061478b603c913960400191505060405180910390fd5b6111748b8b8381811061115e57fe5b90506020020135856125cd90919063ffffffff16565b93506111a06402540be4008c8c8481811061118b57fe5b9050602002013561262790919063ffffffff16565b8282815181106111ac57fe5b60209081029190910101526001016110ea565b506001546111e4906111d7908663ffffffff61266916565b849063ffffffff6125cd16565b3410156112225760405162461bcd60e51b81526004018080602001828103825260568152602001806149266056913960600191505060405180910390fd5b611232348463ffffffff6126c216565b915061123c614462565b6040518060c001604052806221272160e91b60001b815260200160006001600160a01b031681526020018381526020018e8e808060200260200160405190810160405280939291908181526020018383602002808284376000920191909152505050908252506040805160208c810282810182019093528c82529283019290918d918d91829185019084908082843760009201919091525050509082525067ffffffffffffffff8916602090910152905061200063f7a251d7600361130084612704565b611315876402540be40063ffffffff61262716565b6040518463ffffffff1660e01b8152600401808460ff1660ff16815260200180602001838152602001828103825284818151815260200191508051906020019080838360005b8381101561137357818101518382015260200161135b565b50505050905090810190601f1680156113a05780820380516001836020036101000a031916815260200191505b50945050505050600060405180830381600087803b1580156113c157600080fd5b505af11580156113d5573d6000803e3d6000fd5b505060408051600081523360208201528082018890526060810187905290517f74eab09b0e53aefc23f2e1b16da593f95c2dd49c6f5a23720463d10d9c330b2a9350908190036080019150a15060019c9b505050505050505050505050565b61100581565b601081565b60015481565b600033612001146114875760405162461bcd60e51b815260040180806020018281038252603381526020018061457c6033913960400191505060405180910390fd5b6040516120019083156108fc029084906000818181858888f193505050501580156114b6573d6000803e3d6000fd5b50600192915050565b61100881565b600b81565b60005460ff1661150f576040805162461bcd60e51b81526020600482015260196024820152600080516020614906833981519152604482015290519081900360640190fd5b336120001461154f5760405162461bcd60e51b815260040180806020018281038252602f8152602001806148b4602f913960400191505060405180910390fd5b60ff83166003141561159f5761159a82828080601f0160208091040260200160405190810160405280939291908181526020018383808284376000920191909152506129bf92505050565b61160e565b7f41ce201247b6ceb957dcdb217d0b8acb50b9ea0e12af9af4f5e7f38902101605838383604051808460ff1660ff168152602001806020018281038252848482818152602001925080828437600083820152604051601f909101601f1916909201829003965090945050505050a15b505050565b33611008146116535760405162461bcd60e51b81526004018080602001828103825260238152602001806148e36023913960400191505060405180910390fd5b600083815260046020908152604080832080546001600160a01b039096166001600160a01b03199096168617905593825260038152838220949094556002909352912055565b600981565b677ce66c50e284000081565b6000805460ff166116f0576040805162461bcd60e51b81526020600482015260196024820152600080516020614906833981519152604482015290519081900360640190fd5b33611005146117305760405162461bcd60e51b815260040180806020018281038252602f8152602001806145af602f913960400191505060405180910390fd5b600047831061173f5747611741565b825b9050670de0b6b3a764000081111561175d5760009150506117e2565b80156117df576040516001600160a01b0385169082156108fc029083906000818181858888f19350505050158015611799573d6000803e3d6000fd5b50604080516001600160a01b03861681526020810183905281517ff8b71c64315fc33b2ead2adfa487955065152a8ac33d9d5193aafd7f45dc15a0929181900390910190a15b90505b92915050565b61100781565b61100681565b600081565b60005460ff1681565b600381565b6000805460ff1661184d576040805162461bcd60e51b81526020600482015260196024820152600080516020614906833981519152604482015290519081900360640190fd5b426078018267ffffffffffffffff1610156118995760405162461bcd60e51b815260040180806020018281038252602481526020018061463c6024913960400191505060405180910390fd5b6402540be4003406156118dd5760405162461bcd60e51b815260040180806020018281038252604081526020018061497c6040913960400191505060405180910390fd5b600080806001600160a01b0388166119bc5760015461190390879063ffffffff6125cd16565b3410156119415760405162461bcd60e51b81526004018080602001828103825260618152602001806146c66061913960800191505060405180910390fd5b6402540be4008606156119855760405162461bcd60e51b815260040180806020018281038252603c81526020018061478b603c913960400191505060405180910390fd5b611995348763ffffffff6126c216565b90506119ac866402540be40063ffffffff61262716565b6221272160e91b93509150611c5f565b6001600160a01b038816600090815260036020526040902054925082611a135760405162461bcd60e51b81526004018080602001828103825260318152602001806146956031913960400191505060405180910390fd5b600154341015611a545760405162461bcd60e51b815260040180806020018281038252603f8152602001806147e8603f913960400191505060405180910390fd5b506001600160a01b0387166000908152600260205260409020543490600881111580611a9f5750600881118015611a9f5750611a9d876007198301600a0a63ffffffff612a1b16565b155b611ada5760405162461bcd60e51b815260040180806020018281038252603c81526020018061478b603c913960400191505060405180910390fd5b611ae48782612a5d565b9250611aef84612a9d565b15611b37576305f5e100831015611b375760405162461bcd60e51b815260040180806020018281038252603a8152602001806145de603a913960400191505060405180910390fd5b600881101580611b515750600881108015611b5157508683115b611b8c5760405162461bcd60e51b81526004018080602001828103825260258152602001806147276025913960400191505060405180910390fd5b677ce66c50e2840000831115611bd35760405162461bcd60e51b81526004018080602001828103825260358152602001806146606035913960400191505060405180910390fd5b604080516323b872dd60e01b81523360048201523060248201526044810189905290516001600160a01b038b16916323b872dd9160648083019260209291908290030181600087803b158015611c2857600080fd5b505af1158015611c3c573d6000803e3d6000fd5b505050506040513d6020811015611c5257600080fd5b5051611c5d57600080fd5b505b611c67614462565b6040805160c0810182528581526001600160a01b038b166020820152815160018082528184018452919283019181602001602082028036833750505081526040805160018082528183019092526020928301929091908083019080368337505050815260408051600180825281830190925260209283019290919080830190803683370190505081526020018767ffffffffffffffff168152509050828160400151600081518110611d1557fe5b602002602001018181525050878160600151600081518110611d3357fe5b60200260200101906001600160a01b031690816001600160a01b031681525050338160800151600081518110611d6557fe5b6001600160a01b039092166020928302919091019091015261200063f7a251d76003611d9084612704565b611da5866402540be40063ffffffff61262716565b6040518463ffffffff1660e01b8152600401808460ff1660ff16815260200180602001838152602001828103825284818151815260200191508051906020019080838360005b83811015611e03578181015183820152602001611deb565b50505050905090810190601f168015611e305780820380516001836020036101000a031916815260200191505b50945050505050600060405180830381600087803b158015611e5157600080fd5b505af1158015611e65573d6000803e3d6000fd5b5050604080516001600160a01b038d1681523360208201528082018b90526060810186905290517f74eab09b0e53aefc23f2e1b16da593f95c2dd49c6f5a23720463d10d9c330b2a9350908190036080019150a150600198975050505050505050565b3361100714611f085760405162461bcd60e51b815260040180806020018281038252602e815260200180614827602e913960400191505060405180910390fd5b60208114611f5d576040805162461bcd60e51b815260206004820152601b60248201527f65787065637465642076616c7565206c656e6774682069732033320000000000604482015290519081900360640190fd5b606084848080601f01602080910402602001604051908101604052809392919081815260200183838082843760009201919091525050604080516020601f8801819004810282018101909252868152939450606093925086915085908190840183828082843760009201919091525050505060208301519091506772656c617946656560c01b811415612065576020820151670de0b6b3a7640000811180159061200c57506402540be4008106155b61205d576040805162461bcd60e51b815260206004820152601960248201527f7468652072656c6179466565206f7574206f662072616e676500000000000000604482015290519081900360640190fd5b6001556120a2565b6040805162461bcd60e51b815260206004820152600d60248201526c756e6b6e6f776e20706172616d60981b604482015290519081900360640190fd5b7f6cdb0ac70ab7f2e2d035cca5be60d89906f2dede7648ddbd7402189c1eeed17a878787876040518080602001806020018381038352878782818152602001925080828437600083820152601f01601f191690910184810383528581526020019050858580828437600083820152604051601f909101601f19169092018290039850909650505050505050a150505050505050565b33611008146121775760405162461bcd60e51b81526004018080602001828103825260238152602001806148e36023913960400191505060405180910390fd5b600091825260046020908152604080842080546001600160a01b03191690556001600160a01b0392909216835260039052812055565b6221272160e91b81565b61c35081565b60026020526000908152604090205481565b6001600160a01b031660009081526003602052604090205490565b61100281565b60005460ff16612235576040805162461bcd60e51b81526020600482015260196024820152600080516020614906833981519152604482015290519081900360640190fd5b33612000146122755760405162461bcd60e51b815260040180806020018281038252602f8152602001806148b4602f913960400191505060405180910390fd5b60ff83166003141561159f5761159a82828080601f016020809104026020016040519081016040528093929190818152602001838380828437600092019190915250612ba392505050565b61100381565b60005460ff161561231e576040805162461bcd60e51b815260206004820152601960248201527f74686520636f6e747261637420616c726561647920696e697400000000000000604482015290519081900360640190fd5b66071afd498d000060019081556000808052600260205260127fac33ff75c19e70fe83507db0d683fd3465c996598dc972688b7ace676c89077b55805460ff19169091179055565b600481565b600581565b61100081565b61271081565b6001600160a01b03811660009081526003602090815260409182902054825182815280840190935260609290918391906020820181803683375050506020810183905290506000805b60208160ff16101561240c57828160ff16815181106123e057fe5b01602001516001600160f81b031916156123ff57600190910190612404565b61240c565b6001016123c5565b5060608160ff166040519080825280601f01601f19166020018201604052801561243d576020820181803683370190505b50905060005b8260ff168160ff16101561249957838160ff168151811061246057fe5b602001015160f81c60f81b828260ff168151811061247a57fe5b60200101906001600160f81b031916908160001a905350600101612443565b5095945050505050565b61100481565b60606124b36144ae565b60006124be84612ca1565b9150915080612514576040805162461bcd60e51b815260206004820152601f60248201527f756e7265636f676e697a6564207472616e73666572496e207061636b61676500604482015290519081900360640190fd5b600061251f83612de0565b905063ffffffff8116156125b3576040808401516020808601516001600160a01b0316600090815260029091529182205461255a9190612a5d565b90506125646144e3565b60405180608001604052808660000151815260200183815260200186608001516001600160a01b031681526020018463ffffffff1681525090506125a78161312d565b95505050505050610efe565b50506040805160008152602081019091529150610efe9050565b6000828201838110156117df576040805162461bcd60e51b815260206004820152601b60248201527f536166654d6174683a206164646974696f6e206f766572666c6f770000000000604482015290519081900360640190fd5b60006117df83836040518060400160405280601a81526020017f536166654d6174683a206469766973696f6e206279207a65726f000000000000815250613209565b600082612678575060006117e2565b8282028284828161268557fe5b04146117df5760405162461bcd60e51b81526004018080602001828103825260218152602001806147c76021913960400191505060405180910390fd5b60006117df83836040518060400160405280601e81526020017f536166654d6174683a207375627472616374696f6e206f766572666c6f7700008152506132ab565b60408051600680825260e08201909252606091829190816020015b606081526020019060019003908161271f575050835190915061274190613305565b8160008151811061274e57fe5b602002602001018190525061276f83602001516001600160a01b0316613318565b8160018151811061277c57fe5b6020026020010181905250600083604001515190506060816040519080825280602002602001820160405280156127c757816020015b60608152602001906001900390816127b25790505b50905060005b82811015612814576127f5866040015182815181106127e857fe5b6020026020010151613305565b82828151811061280157fe5b60209081029190910101526001016127cd565b5061281e8161333b565b8360028151811061282b57fe5b602002602001018190525060608260405190808252806020026020018201604052801561286c57816020015b60608152602001906001900390816128575790505b50905060005b838110156128c2576128a38760600151828151811061288d57fe5b60200260200101516001600160a01b0316613318565b8282815181106128af57fe5b6020908102919091010152600101612872565b506128cc8161333b565b846003815181106128d957fe5b602002602001018190525060608360405190808252806020026020018201604052801561291a57816020015b60608152602001906001900390816129055790505b50905060005b8481101561295a5761293b8860800151828151811061288d57fe5b82828151811061294757fe5b6020908102919091010152600101612920565b506129648161333b565b8560048151811061297157fe5b60200260200101819052506129938760a0015167ffffffffffffffff16613305565b856005815181106129a057fe5b60200260200101819052506129b48561333b565b979650505050505050565b6129c761450a565b60006129d2836133c5565b9150915080612a125760405162461bcd60e51b81526004018080602001828103825260248152602001806148556024913960400191505060405180910390fd5b61160e82613590565b60006117df83836040518060400160405280601881526020017f536166654d6174683a206d6f64756c6f206279207a65726f0000000000000000815250613a14565b60006008821115612a8657612a7f836007198401600a0a63ffffffff61262716565b90506117e2565b6117df836008849003600a0a63ffffffff61266916565b604080516020808252818301909252600091606091906020820181803683375050506020810184905290506000805b60208160ff161015612b1357828160ff1681518110612ae757fe5b01602001516001600160f81b03191615612b0657600190910190612b0b565b612b13565b600101612acc565b50600760ff82161015612b2b57600092505050610efe565b816005820360ff1681518110612b3d57fe5b6020910101516001600160f81b031916602d60f81b14612b6257600092505050610efe565b816001820360ff1681518110612b7457fe5b6020910101516001600160f81b031916604d60f81b14612b9957600092505050610efe565b5060019392505050565b612bab614462565b6000612bb683613a76565b9150915080612bf65760405162461bcd60e51b81526004018080602001828103825260248152602001806146186024913960400191505060405180910390fd5b612bfe61450a565b602080840180516001600160a01b0390811684526040808701518585015291511660009081526002909252812054905b846040015151811015612c7f57612c5c85604001518281518110612c4e57fe5b602002602001015183613cda565b85604001518281518110612c6c57fe5b6020908102919091010152600101612c2e565b506080840151604083015260056060830152612c9a82613590565b5050505050565b612ca96144ae565b6000612cb36144ae565b612cbb614541565b612ccc612cc786613d13565b613d38565b90506000805b612cdb83613d82565b15612dd35780612cfd57612cf6612cf184613da3565b613df1565b8452612dcb565b8060011415612d2a57612d17612d1284613da3565b613ea8565b6001600160a01b03166020850152612dcb565b8060021415612d4957612d3f612cf184613da3565b6040850152612dcb565b8060031415612d7157612d5e612d1284613da3565b6001600160a01b03166060850152612dcb565b8060041415612d9957612d86612d1284613da3565b6001600160a01b03166080850152612dcb565b8060051415612dc657612dae612cf184613da3565b67ffffffffffffffff1660a085015260019150612dcb565b612dd3565b600101612cd2565b5091935090915050915091565b60208101516000906001600160a01b0316612f17578160a0015167ffffffffffffffff16421115612e1357506001610efe565b8160400151471015612e2757506003610efe565b606082015160408084015190516000926001600160a01b0316916127109184818181858888f193505050503d8060008114612e7e576040519150601f19603f3d011682016040523d82523d6000602084013e612e83565b606091505b5050905080612e96575060049050610efe565b7f471eb9cc1ffe55ffadf15b32595415eb9d80f22e761d24bd6dffc607e1284d5983602001518460600151856040015160405180846001600160a01b03166001600160a01b03168152602001836001600160a01b03166001600160a01b03168152602001828152602001935050505060405180910390a15060009050610efe565b8160a0015167ffffffffffffffff16421115612f3557506001610efe565b81516020808401516001600160a01b031660009081526003909152604090205414612f6257506002610efe565b602080830151604080516370a0823160e01b815230600482015290516000936001600160a01b03909316926370a082319261c3509260248083019392829003018187803b158015612fb257600080fd5b5086fa158015612fc6573d6000803e3d6000fd5b50505050506040513d6020811015612fdd57600080fd5b50516040840151909150811015612ff8575060039050610efe565b600083602001516001600160a01b031663a9059cbb61c350866060015187604001516040518463ffffffff1660e01b815260040180836001600160a01b03166001600160a01b0316815260200182815260200192505050602060405180830381600088803b15801561306957600080fd5b5087f115801561307d573d6000803e3d6000fd5b50505050506040513d602081101561309457600080fd5b505190508015613121577f471eb9cc1ffe55ffadf15b32595415eb9d80f22e761d24bd6dffc607e1284d5984602001518560600151866040015160405180846001600160a01b03166001600160a01b03168152602001836001600160a01b03166001600160a01b03168152602001828152602001935050505060405180910390a15060009150610efe9050565b5060059150610efe9050565b60408051600480825260a08201909252606091829190816020015b6060815260200190600190039081613148575050835190915061316a90613305565b8160008151811061317757fe5b602002602001018190525061318f8360200151613305565b8160018151811061319c57fe5b60200260200101819052506131bd83604001516001600160a01b0316613318565b816002815181106131ca57fe5b60200260200101819052506131e8836060015163ffffffff16613305565b816003815181106131f557fe5b6020026020010181905250610ed28161333b565b600081836132955760405162461bcd60e51b81526004018080602001828103825283818151815260200191508051906020019080838360005b8381101561325a578181015183820152602001613242565b50505050905090810190601f1680156132875780820380516001836020036101000a031916815260200191505b509250505060405180910390fd5b5060008385816132a157fe5b0495945050505050565b600081848411156132fd5760405162461bcd60e51b815260206004820181815283516024840152835190928392604490910191908501908083836000831561325a578181015183820152602001613242565b505050900390565b60606117e261331383613ec2565b613fa8565b60408051600560a21b8318601482015260348101909152606090610ed281613fa8565b606081516000141561335c5750604080516000815260208101909152610efe565b60608260008151811061336b57fe5b602002602001015190506000600190505b83518110156133ac576133a28285838151811061339557fe5b6020026020010151613ffa565b915060010161337c565b50610ed26133bf825160c060ff16614077565b82613ffa565b6133cd61450a565b60006133d761450a565b6133df614541565b6133eb612cc786613d13565b90506000805b6133fa83613d82565b15612dd3578061342057613410612d1284613da3565b6001600160a01b03168452613588565b80600114156134c157606061343c61343785613da3565b61416f565b90508051604051908082528060200260200182016040528015613469578160200160208202803683370190505b50602086015260005b81518110156134ba5761349782828151811061348a57fe5b6020026020010151613df1565b866020015182815181106134a757fe5b6020908102919091010152600101613472565b5050613588565b80600214156135635760606134d861343785613da3565b90508051604051908082528060200260200182016040528015613505578160200160208202803683370190505b50604086015260005b81518110156134ba5761353382828151811061352657fe5b6020026020010151613ea8565b8660400151828151811061354357fe5b6001600160a01b039092166020928302919091019091015260010161350e565b8060031415612dc657613578612cf184613da3565b63ffffffff166060850152600191505b6001016133f1565b80516001600160a01b03166137ba5760005b8160200151518110156137b4576000826040015182815181106135c157fe5b60200260200101516001600160a01b0316612710846020015184815181106135e557fe5b60209081029190910101516040516000818181858888f193505050503d806000811461362d576040519150601f19603f3d011682016040523d82523d6000602084013e613632565b606091505b50509050806136f5577f203f9f67a785f4f81be4d48b109aa0c498d1bc8097ecc2627063f480cc5fe73e83600001518460400151848151811061367157fe5b60200260200101518560200151858151811061368957fe5b6020026020010151866060015160405180856001600160a01b03166001600160a01b03168152602001846001600160a01b03166001600160a01b031681526020018381526020018263ffffffff1663ffffffff16815260200194505050505060405180910390a16137ab565b7fd468d4fa5e8fb4adc119b29a983fd0785e04af5cb8b7a3a69a47270c54b6901a83600001518460400151848151811061372b57fe5b60200260200101518560200151858151811061374357fe5b6020026020010151866060015160405180856001600160a01b03166001600160a01b03168152602001846001600160a01b03166001600160a01b031681526020018381526020018263ffffffff1663ffffffff16815260200194505050505060405180910390a15b506001016135a2565b50613a11565b60005b816020015151811015613a0f57600082600001516001600160a01b031663a9059cbb61c350856040015185815181106137f257fe5b60200260200101518660200151868151811061380a57fe5b60200260200101516040518463ffffffff1660e01b815260040180836001600160a01b03166001600160a01b0316815260200182815260200192505050602060405180830381600088803b15801561386157600080fd5b5087f1158015613875573d6000803e3d6000fd5b50505050506040513d602081101561388c57600080fd5b505190508015613950577fd468d4fa5e8fb4adc119b29a983fd0785e04af5cb8b7a3a69a47270c54b6901a8360000151846040015184815181106138cc57fe5b6020026020010151856020015185815181106138e457fe5b6020026020010151866060015160405180856001600160a01b03166001600160a01b03168152602001846001600160a01b03166001600160a01b031681526020018381526020018263ffffffff1663ffffffff16815260200194505050505060405180910390a1613a06565b7f203f9f67a785f4f81be4d48b109aa0c498d1bc8097ecc2627063f480cc5fe73e83600001518460400151848151811061398657fe5b60200260200101518560200151858151811061399e57fe5b6020026020010151866060015160405180856001600160a01b03166001600160a01b03168152602001846001600160a01b03166001600160a01b031681526020018381526020018263ffffffff1663ffffffff16815260200194505050505060405180910390a15b506001016137bd565b505b50565b60008183613a635760405162461bcd60e51b815260206004820181815283516024840152835190928392604490910191908501908083836000831561325a578181015183820152602001613242565b50828481613a6d57fe5b06949350505050565b613a7e614462565b6000613a88614462565b613a90614541565b613a9c612cc786613d13565b90506000805b613aab83613d82565b15612dd35780613ac857613ac1612cf184613da3565b8452613cd2565b8060011415613af057613add612d1284613da3565b6001600160a01b03166020850152613cd2565b8060021415613b7f576060613b0761343785613da3565b90508051604051908082528060200260200182016040528015613b34578160200160208202803683370190505b50604086015260005b8151811015613b7857613b5582828151811061348a57fe5b86604001518281518110613b6557fe5b6020908102919091010152600101613b3d565b5050613cd2565b8060031415613c14576060613b9661343785613da3565b90508051604051908082528060200260200182016040528015613bc3578160200160208202803683370190505b50606086015260005b8151811015613b7857613be482828151811061352657fe5b86606001518281518110613bf457fe5b6001600160a01b0390921660209283029190910190910152600101613bcc565b8060041415613ca9576060613c2b61343785613da3565b90508051604051908082528060200260200182016040528015613c58578160200160208202803683370190505b50608086015260005b8151811015613b7857613c7982828151811061352657fe5b86608001518281518110613c8957fe5b6001600160a01b0390921660209283029190910190910152600101613c61565b8060051415612dc657613cbe612cf184613da3565b67ffffffffffffffff1660a0850152600191505b600101613aa2565b60006008821115613cfc57612a7f836007198401600a0a63ffffffff61266916565b6117df836008849003600a0a63ffffffff61262716565b613d1b614561565b506040805180820190915281518152602082810190820152919050565b613d40614541565b613d4982614240565b613d5257600080fd5b6000613d618360200151614270565b60208085015160408051808201909152868152920190820152915050919050565b6000613d8c614561565b505080518051602091820151919092015191011190565b613dab614561565b613db482613d82565b613dbd57600080fd5b60208201516000613dcd826142d3565b80830160209586015260408051808201909152908152938401919091525090919050565b805160009015801590613e0657508151602110155b613e0f57600080fd5b6000613e1e8360200151614270565b90508083600001511015613e79576040805162461bcd60e51b815260206004820152601a60248201527f6c656e677468206973206c657373207468616e206f6666736574000000000000604482015290519081900360640190fd5b825160208085015183018051928490039291831015613e9f57826020036101000a820491505b50949350505050565b8051600090601514613eb957600080fd5b6117e282613df1565b604080516020808252818301909252606091829190602082018180368337505050602081018490529050600067ffffffffffffffff198416613f0657506018613f2a565b6fffffffffffffffffffffffffffffffff198416613f2657506010613f2a565b5060005b6020811015613f6057818181518110613f3f57fe5b01602001516001600160f81b03191615613f5857613f60565b600101613f2a565b60008160200390506060816040519080825280601f01601f191660200182016040528015613f95576020820181803683370190505b5080830196909652508452509192915050565b606081516001148015613fda5750607f60f81b82600081518110613fc857fe5b01602001516001600160f81b03191611155b15613fe6575080610efe565b6117e2613ff88351608060ff16614077565b835b6060806040519050835180825260208201818101602087015b8183101561402b578051835260209283019201614013565b50855184518101855292509050808201602086015b81831015614058578051835260209283019201614040565b508651929092011591909101601f01601f191660405250905092915050565b60606801000000000000000083106140c7576040805162461bcd60e51b815260206004820152600e60248201526d696e70757420746f6f206c6f6e6760901b604482015290519081900360640190fd5b604080516001808252818301909252606091602082018180368337019050509050603784116141215782840160f81b8160008151811061410357fe5b60200101906001600160f81b031916908160001a90535090506117e2565b606061412c85613ec2565b90508381510160370160f81b8260008151811061414557fe5b60200101906001600160f81b031916908160001a9053506141668282613ffa565b95945050505050565b606061417a82614240565b61418357600080fd5b600061418e83614406565b90506060816040519080825280602002602001820160405280156141cc57816020015b6141b9614561565b8152602001906001900390816141b15790505b50905060006141de8560200151614270565b60208601510190506000805b84811015614235576141fb836142d3565b915060405180604001604052808381526020018481525084828151811061421e57fe5b6020908102919091010152918101916001016141ea565b509195945050505050565b805160009061425157506000610efe565b6020820151805160001a9060c0821015612b9957600092505050610efe565b8051600090811a608081101561428a576000915050610efe565b60b88110806142a5575060c081108015906142a5575060f881105b156142b4576001915050610efe565b60c08110156142c85760b519019050610efe565b60f519019050610efe565b80516000908190811a60808110156142ee57600191506143ff565b60b881101561430357607e19810191506143ff565b60c081101561437d57600060b78203600186019550806020036101000a865104915060018101820193505080831015614377576040805162461bcd60e51b81526020600482015260116024820152706164646974696f6e206f766572666c6f7760781b604482015290519081900360640190fd5b506143ff565b60f88110156143925760be19810191506143ff565b600060f78203600186019550806020036101000a8651049150600181018201935050808310156143fd576040805162461bcd60e51b81526020600482015260116024820152706164646974696f6e206f766572666c6f7760781b604482015290519081900360640190fd5b505b5092915050565b805160009061441757506000610efe565b6000809050600061442b8460200151614270565b602085015185519181019250015b808210156144595761444a826142d3565b60019093019290910190614439565b50909392505050565b6040518060c001604052806000801916815260200160006001600160a01b03168152602001606081526020016060815260200160608152602001600067ffffffffffffffff1681525090565b6040805160c081018252600080825260208201819052918101829052606081018290526080810182905260a081019190915290565b60408051608081018252600080825260208201819052918101829052606081019190915290565b604051806080016040528060006001600160a01b031681526020016060815260200160608152602001600063ffffffff1681525090565b6040518060400160405280614554614561565b8152602001600081525090565b60405180604001604052806000815260200160008152509056fe6f6e6c79207374616b696e672073797374656d20636f6e74726163742063616e2063616c6c20746869732066756e6374696f6e746865206d6573736167652073656e646572206d75737420626520696e63656e746976697a6520636f6e7472616374466f72206d696e69546f6b656e2c20746865207472616e7366657220616d6f756e74206d757374206e6f74206265206c657373207468616e2031756e7265636f676e697a6564207472616e736665724f75742073796e207061636b61676565787069726554696d65206d7573742062652074776f206d696e75746573206c61746572616d6f756e7420697320746f6f206c617267652c20657863656564206d6178696d756d206265703220746f6b656e20616d6f756e7474686520636f6e747261637420686173206e6f74206265656e20626f756e6420746f20616e79206265703220746f6b656e726563656976656420424e4220616d6f756e742073686f756c64206265206e6f206c657373207468616e207468652073756d206f66207472616e736665724f757420424e4220616d6f756e7420616e64206d696e696d756d2072656c6179466565616d6f756e7420697320746f6f206c617267652c2075696e74323536206f766572666c6f774c656e677468206f6620726563697069656e74416464727320646f65736e277420657175616c20746f206c656e677468206f6620726566756e644164647273696e76616c6964207472616e7366657220616d6f756e743a20707265636973696f6e206c6f737320696e20616d6f756e7420636f6e76657273696f6e536166654d6174683a206d756c7469706c69636174696f6e206f766572666c6f77726563656976656420424e4220616d6f756e742073686f756c64206265206e6f206c657373207468616e20746865206d696e696d756d2072656c6179466565746865206d6573736167652073656e646572206d75737420626520676f7665726e616e636520636f6e7472616374756e7265636f676e697a6564207472616e736665724f75742061636b207061636b6167654c656e677468206f6620726563697069656e74416464727320646f65736e277420657175616c20746f206c656e677468206f6620616d6f756e7473746865206d6573736167652073656e646572206d7573742062652063726f737320636861696e20636f6e7472616374746865206d73672073656e646572206d75737420626520746f6b656e4d616e6167657274686520636f6e7472616374206e6f7420696e69742079657400000000000000726563656976656420424e4220616d6f756e742073686f756c64206265206e6f206c657373207468616e207468652073756d206f66207472616e7366657220424e4220616d6f756e7420616e642072656c6179466565696e76616c696420726563656976656420424e4220616d6f756e743a20707265636973696f6e206c6f737320696e20616d6f756e7420636f6e76657273696f6ea26469706673582212208f0455128650f210f452673f65fe656f790624ecf0bc6bbea21225e02413b67564736f6c63430006040033", + }, + { + ContractAddr: StakingContract, + CommitUrl: "https://github.com/bnb-chain/bsc-genesis-contract/commit/9d45b31c12b2c04757284717f4351cb44e81a3a7", + Code: "60806040526004361061036f5760003560e01c806370fd5bad116101c6578063b88a802f116100f7578063d7ecfcb611610095578063f45fd80b1161006f578063f45fd80b14610bd0578063f9a2bbc714610c15578063fc3e590814610646578063fd6a687914610c2a57610376565b8063d7ecfcb6146107ee578063dc927faf14610ba6578063edc1a5b014610bbb57610376565b8063c2117d82116100d1578063c2117d8214610abc578063c81b166214610ad1578063c8509d8114610ae6578063d61b9b9314610b6b57610376565b8063b88a802f14610a57578063baaafd3b14610a6c578063bf8546ca14610a8157610376565b806396713da911610164578063a78abc161161013e578063a78abc1614610939578063ab51bb9614610962578063ac43175114610977578063b14315df14610a4257610376565b806396713da9146108fa5780639dc092621461090f578063a1a11bf51461092457610376565b806375d47a0a116101a057806375d47a0a1461084b5780637942fd0514610860578063831d65d11461087557806392b888a4146105f257610376565b806370fd5bad146107ee578063718a8aa81461080357806375aca5931461081857610376565b8063413d9c3a116102a05780635d17c8bd1161023e57806369b635b61161021857806369b635b61461075b5780636bd8f804146107705780636e47b482146107a65780636fb7f7eb146107bb57610376565b80635d17c8bd1461071c5780635d499b1b1461073157806362b171d21461074657610376565b80634bf6c8821161027a5780634bf6c882146106b15780634d99dd16146106c657806351e80672146106f2578063552aaf931461070757610376565b8063413d9c3a1461065b57806343756e5c14610670578063493279b11461068557610376565b8063151817e31161030d578063333ad3e7116102e7578063333ad3e71461061c57806334c43354146106315780633dffc387146105f25780633fdfa7e41461064657610376565b8063151817e3146105c757806317c9efb0146105f25780632fdeb1111461060757610376565b80630bee7a67116103495780630bee7a671461043b5780630e2374a5146104695780631182b8751461049a57806311fe9ec61461059457610376565b8063026e402b1461037b57806302985992146103a9578063047636d1146103d057610376565b3661037657005b600080fd5b6103a76004803603604081101561039157600080fd5b506001600160a01b038135169060200135610c3f565b005b3480156103b557600080fd5b506103be611141565b60408051918252519081900360200190f35b3480156103dc57600080fd5b50610403600480360360208110156103f357600080fd5b50356001600160a01b0316611147565b6040518082606080838360005b83811015610428578181015183820152602001610410565b5050505090500191505060405180910390f35b34801561044757600080fd5b5061045061119b565b6040805163ffffffff9092168252519081900360200190f35b34801561047557600080fd5b5061047e6111a0565b604080516001600160a01b039092168252519081900360200190f35b3480156104a657600080fd5b5061051f600480360360408110156104bd57600080fd5b60ff8235169190810190604081016020820135600160201b8111156104e157600080fd5b8201836020820111156104f357600080fd5b803590602001918460018302840111600160201b8311171561051457600080fd5b5090925090506111a6565b6040805160208082528351818301528351919283929083019185019080838360005b83811015610559578181015183820152602001610541565b50505050905090810190601f1680156105865780820380516001836020036101000a031916815260200191505b509250505060405180910390f35b3480156105a057600080fd5b506103be600480360360208110156105b757600080fd5b50356001600160a01b031661135c565b3480156105d357600080fd5b506105dc611377565b6040805160ff9092168252519081900360200190f35b3480156105fe57600080fd5b506105dc61137c565b34801561061357600080fd5b506103be611381565b34801561062857600080fd5b50610450611387565b34801561063d57600080fd5b506103be61138c565b34801561065257600080fd5b506105dc611397565b34801561066757600080fd5b506103be61139c565b34801561067c57600080fd5b5061047e6113a3565b34801561069157600080fd5b5061069a6113a9565b6040805161ffff9092168252519081900360200190f35b3480156106bd57600080fd5b506105dc6113af565b6103a7600480360360408110156106dc57600080fd5b506001600160a01b0381351690602001356113b4565b3480156106fe57600080fd5b5061047e61199b565b34801561071357600080fd5b506105dc6119a1565b34801561072857600080fd5b506103be6119a6565b34801561073d57600080fd5b506103be6119ac565b34801561075257600080fd5b506103be6119b5565b34801561076757600080fd5b506103be611aed565b6103a76004803603606081101561078657600080fd5b506001600160a01b03813581169160208101359091169060400135611af3565b3480156107b257600080fd5b5061047e612161565b3480156107c757600080fd5b506103be600480360360208110156107de57600080fd5b50356001600160a01b0316612167565b3480156107fa57600080fd5b506105dc612182565b34801561080f57600080fd5b506105dc612187565b34801561082457600080fd5b506103be6004803603602081101561083b57600080fd5b50356001600160a01b031661218c565b34801561085757600080fd5b5061047e6121a7565b34801561086c57600080fd5b506105dc6121ad565b34801561088157600080fd5b506103a76004803603604081101561089857600080fd5b60ff8235169190810190604081016020820135600160201b8111156108bc57600080fd5b8201836020820111156108ce57600080fd5b803590602001918460018302840111600160201b831117156108ef57600080fd5b5090925090506121b2565b34801561090657600080fd5b506105dc612474565b34801561091b57600080fd5b5061047e612479565b34801561093057600080fd5b5061047e61247f565b34801561094557600080fd5b5061094e612485565b604080519115158252519081900360200190f35b34801561096e57600080fd5b506104506119a1565b34801561098357600080fd5b506103a76004803603604081101561099a57600080fd5b810190602081018135600160201b8111156109b457600080fd5b8201836020820111156109c657600080fd5b803590602001918460018302840111600160201b831117156109e757600080fd5b919390929091602081019035600160201b811115610a0457600080fd5b820183602082011115610a1657600080fd5b803590602001918460018302840111600160201b83111715610a3757600080fd5b50909250905061248e565b348015610a4e57600080fd5b506105dc612a8b565b348015610a6357600080fd5b506103be612a90565b348015610a7857600080fd5b506103be612bc5565b348015610a8d57600080fd5b506103be60048036036040811015610aa457600080fd5b506001600160a01b0381358116916020013516612bd0565b348015610ac857600080fd5b506103be612bfd565b348015610add57600080fd5b5061047e612c03565b348015610af257600080fd5b506103a760048036036040811015610b0957600080fd5b60ff8235169190810190604081016020820135600160201b811115610b2d57600080fd5b820183602082011115610b3f57600080fd5b803590602001918460018302840111600160201b83111715610b6057600080fd5b509092509050612c09565b348015610b7757600080fd5b506103be60048036036040811015610b8e57600080fd5b506001600160a01b0381358116916020013516612e26565b348015610bb257600080fd5b5061047e612e51565b348015610bc757600080fd5b506103be612e57565b348015610bdc57600080fd5b506103be60048036036060811015610bf357600080fd5b506001600160a01b038135811691602081013582169160409091013516612e64565b348015610c2157600080fd5b5061047e612e99565b348015610c3657600080fd5b5061047e612e9f565b60105460ff1660021415610c8b576040805162461bcd60e51b815260206004820152600e60248201526d4e6f2072652d656e7472616e637960901b604482015290519081900360640190fd5b6010805460ff19166002179055806402540be4003406158015610cb357506402540be4008106155b610d04576040805162461bcd60e51b815260206004820152601c60248201527f707265636973696f6e206c6f737320696e20636f6e76657273696f6e00000000604482015290519081900360640190fd5b60005460ff16610d41576638d7ea4c6800006001908155662386f26fc1000060025568056bc75e2d631000006003556000805460ff191690911790555b600354821015610d98576040805162461bcd60e51b815260206004820152601760248201527f696e76616c69642064656c656761746520616d6f756e74000000000000000000604482015290519081900360640190fd5b600154610dac90839063ffffffff612ea516565b341015610df7576040805162461bcd60e51b81526020600482015260146024820152736e6f7420656e6f756768206d73672076616c756560601b604482015290519081900360640190fd5b60405133906108fc9060009081818181818888f19350505050610e55576040805162461bcd60e51b815260206004820152601160248201527034b73b30b634b2103232b632b3b0ba37b960791b604482015290519081900360640190fd5b6000610e6c836402540be40063ffffffff612f0616565b90506000610e80348563ffffffff612f4816565b90506000610e9960025483612f4890919063ffffffff16565b6040805160038082526080820190925291925060609190816020015b6060815260200190600190039081610eb5579050509050610ed533612f8a565b81600081518110610ee257fe5b6020026020010181905250610eff876001600160a01b0316612f8a565b81600181518110610f0c57fe5b6020026020010181905250610f2084612fad565b81600281518110610f2d57fe5b60200260200101819052506060610f4d6001610f4884612fc0565b61304a565b8051602080830191909120600f80546000908152600a845260408082209390935581546001908101909255338152600b909352912080549091019055905061200063f7a251d7601083610fab876402540be40063ffffffff612f0616565b6040518463ffffffff1660e01b8152600401808460ff1660ff16815260200180602001838152602001828103825284818151815260200191508051906020019080838360005b83811015611009578181015183820152602001610ff1565b50505050905090810190601f1680156110365780820380516001836020036101000a031916815260200191505b50945050505050600060405180830381600087803b15801561105757600080fd5b505af115801561106b573d6000803e3d6000fd5b5061100492506108fc91506110889050898663ffffffff612ea516565b6040518115909202916000818181858888f193505050501580156110b0573d6000803e3d6000fd5b506002546040516110029180156108fc02916000818181858888f193505050501580156110e1573d6000803e3d6000fd5b50604080518881526020810185905281516001600160a01b038b169233927f5f32ed2794e2e72d19e3cb2320e8820a499c4204887372beba51f5e61c040867929081900390910190a350506010805460ff19166001179055505050505050565b60035481565b61114f614dda565b611157614dda565b6001600160a01b0383166000818152600b60209081526040808320548552838352600c82528083205485830152928252600d9052819020549082015290505b919050565b606481565b61200181565b606033612000146111e85760405162461bcd60e51b815260040180806020018281038252602f815260200180614f98602f913960400191505060405180910390fd5b60005460ff16611225576638d7ea4c6800006001908155662386f26fc1000060025568056bc75e2d631000006003556000805460ff191690911790555b61122d614df8565b61127461126f85858080601f0160208091040260200160405190810160405280939291908181526020018383808284376000920191909152506130d092505050565b6130f5565b905060006112896112848361313f565b61318d565b90506000606060ff8316600414156112ae576112a484613244565b9092509050611305565b60ff8316600514156112c3576112a484613424565b6040805162461bcd60e51b8152602060048201526012602482015271756e6b6e6f776e206576656e74207479706560701b604482015290519081900360640190fd5b63ffffffff821615611351576040805163ffffffff84168152905160ff8516917f391d6e5ea6ab6c49b9a0abb1782cae5def8d711f973b00c729658c0b2a80b31b919081900360200190a25b979650505050505050565b6001600160a01b031660009081526006602052604090205490565b600581565b600181565b60015481565b606581565b662386f26fc1000081565b600381565b620a8c0081565b61100181565b6102ca81565b600881565b60105460ff1660021415611400576040805162461bcd60e51b815260206004820152600e60248201526d4e6f2072652d656e7472616e637960901b604482015290519081900360640190fd5b6010805460ff19166002179055806402540be400340615801561142857506402540be4008106155b611479576040805162461bcd60e51b815260206004820152601c60248201527f707265636973696f6e206c6f737320696e20636f6e76657273696f6e00000000604482015290519081900360640190fd5b60005460ff166114b6576638d7ea4c6800006001908155662386f26fc1000060025568056bc75e2d631000006003556000805460ff191690911790555b600154341015611504576040805162461bcd60e51b81526020600482015260146024820152736e6f7420656e6f7567682072656c61792066656560601b604482015290519081900360640190fd5b6003548210156115bf576002548211611557576040805162461bcd60e51b815260206004820152601060248201526f6e6f7420656e6f7567682066756e647360801b604482015290519081900360640190fd5b3360009081526005602090815260408083206001600160a01b038716845290915290205482146115bf576040805162461bcd60e51b815260206004820152600e60248201526d1a5b9d985b1a5908185b5bdd5b9d60921b604482015290519081900360640190fd5b3360009081526005602090815260408083206001600160a01b0387168452909152902054821115611628576040805162461bcd60e51b815260206004820152600e60248201526d1a5b9d985b1a5908185b5bdd5b9d60921b604482015290519081900360640190fd5b3360009081526007602090815260408083206001600160a01b03871684529091529020544210156116a0576040805162461bcd60e51b815260206004820152601a60248201527f70656e64696e6720756e64656c65676174696f6e206578697374000000000000604482015290519081900360640190fd5b60006116b7836402540be40063ffffffff612f0616565b60025490915034906000906116d390839063ffffffff612f4816565b6040805160038082526080820190925291925060609190816020015b60608152602001906001900390816116ef57905050905061170f33612f8a565b8160008151811061171c57fe5b6020026020010181905250611739876001600160a01b0316612f8a565b8160018151811061174657fe5b602002602001018190525061175a84612fad565b8160028151811061176757fe5b602002602001018190525060606117826002610f4884612fc0565b8051602080830191909120600f80546000908152600a845260408082209390935581546001908101909255338152600c90935291208054909101905590506117d342620a8c0063ffffffff612ea516565b3360009081526007602090815260408083206001600160a01b038d16845290915290205561200063f7a251d7601083611817876402540be40063ffffffff612f0616565b6040518463ffffffff1660e01b8152600401808460ff1660ff16815260200180602001838152602001828103825284818151815260200191508051906020019080838360005b8381101561187557818101518382015260200161185d565b50505050905090810190601f1680156118a25780820380516001836020036101000a031916815260200191505b50945050505050600060405180830381600087803b1580156118c357600080fd5b505af11580156118d7573d6000803e3d6000fd5b5050604051611004925085156108fc02915085906000818181858888f1935050505015801561190a573d6000803e3d6000fd5b506002546040516110029180156108fc02916000818181858888f1935050505015801561193b573d6000803e3d6000fd5b50604080518881526020810185905281516001600160a01b038b169233927fdf0b6ac27f3f3bb31cee3dab0f4fe40cc19c6a3f8daaec52e06b261e58a12519929081900390910190a350506010805460ff19166001179055505050505050565b61200081565b600081565b60025481565b6402540be40081565b60105460009060ff1660021415611a04576040805162461bcd60e51b815260206004820152600e60248201526d4e6f2072652d656e7472616e637960901b604482015290519081900360640190fd5b506010805460ff191660021790553360009081526008602052604090205480611a6b576040805162461bcd60e51b81526020600482015260146024820152736e6f20756e64656c6567617465642066756e647360601b604482015290519081900360640190fd5b336000818152600860205260408082208290555183156108fc0291849190818181858888f19350505050158015611aa6573d6000803e3d6000fd5b5060408051828152905133917fc712d133b8d448221aaed2198ed1f0db6dfc860fb01bc3a630916fe6cbef946f919081900360200190a26010805460ff1916600117905590565b60035490565b60105460ff1660021415611b3f576040805162461bcd60e51b815260206004820152600e60248201526d4e6f2072652d656e7472616e637960901b604482015290519081900360640190fd5b6010805460ff19166002179055806402540be4003406158015611b6757506402540be4008106155b611bb8576040805162461bcd60e51b815260206004820152601c60248201527f707265636973696f6e206c6f737320696e20636f6e76657273696f6e00000000604482015290519081900360640190fd5b60005460ff16611bf5576638d7ea4c6800006001908155662386f26fc1000060025568056bc75e2d631000006003556000805460ff191690911790555b826001600160a01b0316846001600160a01b03161415611c53576040805162461bcd60e51b815260206004820152601460248201527334b73b30b634b2103932b232b632b3b0ba34b7b760611b604482015290519081900360640190fd5b600154341015611ca1576040805162461bcd60e51b81526020600482015260146024820152736e6f7420656e6f7567682072656c61792066656560601b604482015290519081900360640190fd5b6003548210158015611cd657503360009081526005602090815260408083206001600160a01b03881684529091529020548211155b611d18576040805162461bcd60e51b815260206004820152600e60248201526d1a5b9d985b1a5908185b5bdd5b9d60921b604482015290519081900360640190fd5b3360009081526009602090815260408083206001600160a01b03888116855290835281842090871684529091529020544210801590611d8657503360009081526009602090815260408083206001600160a01b03878116855290835281842090881684529091529020544210155b611dd7576040805162461bcd60e51b815260206004820152601a60248201527f70656e64696e6720726564656c65676174696f6e206578697374000000000000604482015290519081900360640190fd5b6000611dee836402540be40063ffffffff612f0616565b6002549091503490600090611e0a90839063ffffffff612f4816565b60408051600480825260a0820190925291925060609190816020015b6060815260200190600190039081611e26579050509050611e4633612f8a565b81600081518110611e5357fe5b6020026020010181905250611e70886001600160a01b0316612f8a565b81600181518110611e7d57fe5b6020026020010181905250611e9a876001600160a01b0316612f8a565b81600281518110611ea757fe5b6020026020010181905250611ebb84612fad565b81600381518110611ec857fe5b60200260200101819052506060611ee36003610f4884612fc0565b8051602080830191909120600f80546000908152600a845260408082209390935581546001908101909255338152600d9093529120805490910190559050611f3442620a8c0063ffffffff612ea516565b3360009081526009602090815260408083206001600160a01b038d81168552908352818420908e168452909152902055611f7742620a8c0063ffffffff612ea516565b3360009081526009602090815260408083206001600160a01b038e81168552908352818420908d16845290915290205561200063f7a251d7601083611fc7876402540be40063ffffffff612f0616565b6040518463ffffffff1660e01b8152600401808460ff1660ff16815260200180602001838152602001828103825284818151815260200191508051906020019080838360005b8381101561202557818101518382015260200161200d565b50505050905090810190601f1680156120525780820380516001836020036101000a031916815260200191505b50945050505050600060405180830381600087803b15801561207357600080fd5b505af1158015612087573d6000803e3d6000fd5b5050604051611004925085156108fc02915085906000818181858888f193505050501580156120ba573d6000803e3d6000fd5b506002546040516110029180156108fc02916000818181858888f193505050501580156120eb573d6000803e3d6000fd5b50876001600160a01b0316896001600160a01b0316336001600160a01b03167fdb0d03fdfcb145c486c442659e6a341a8828985505097cb5190afcf541e840158a87604051808381526020018281526020019250505060405180910390a450506010805460ff1916600117905550505050505050565b61100581565b6001600160a01b031660009081526004602052604090205490565b600281565b601081565b6001600160a01b031660009081526008602052604090205490565b61100881565b600b81565b33612000146121f25760405162461bcd60e51b815260040180806020018281038252602f815260200180614f98602f913960400191505060405180910390fd5b60005460ff1661222f576638d7ea4c6800006001908155662386f26fc1000060025568056bc75e2d631000006003556000805460ff191690911790555b612237614df8565b61227961126f84848080601f0160208091040260200160405190810160405280939291908181526020018383808284376000920191909152506130d092505050565b905060008060606000805b61228d86613640565b156122f857806122aa576122a36112848761313f565b94506122f0565b80600114156122c6576122bf6112848761313f565b93506122f0565b80600214156122eb576122e06122db8761313f565b613661565b9250600191506122f0565b6122f8565b600101612284565b8161233e576040805162461bcd60e51b81526020600482015260116024820152701c9b1c08191958dbd9194819985a5b1959607a1b604482015290519081900360640190fd5b612347836136d1565b61238a576040805162461bcd60e51b815260206004820152600f60248201526e0eee4dedcce40e0c2c6d640d0c2e6d608b1b604482015290519081900360640190fd5b61239661126f846130d0565b955060006123a66112848861313f565b90506123b0614df8565b6123b988613640565b156123dc576123d561126f6123d06122db8b61313f565b6130d0565b905061241d565b6040805162461bcd60e51b8152602060048201526011602482015270656d7074792061636b207061636b61676560781b604482015290519081900360640190fd5b60ff82166001141561243957612434818888613725565b612467565b60ff82166002141561245057612434818888613adc565b60ff8216600314156112c357612434818888613da9565b5050505050505050505050565b600981565b61100781565b61100681565b60005460ff1681565b60005460ff166124e5576040805162461bcd60e51b815260206004820152601960248201527f74686520636f6e7472616374206e6f7420696e69742079657400000000000000604482015290519081900360640190fd5b33611007146125255760405162461bcd60e51b815260040180806020018281038252602e815260200180614f0e602e913960400191505060405180910390fd5b61258684848080601f0160208091040260200160405190810160405280939291908181526020018383808284376000920191909152505060408051808201909152600a81526972656c6179657246656560b01b6020820152915061410e9050565b156126ec57602081146125e0576040805162461bcd60e51b815260206004820152601d60248201527f6c656e677468206f662072656c61796572466565206d69736d61746368000000604482015290519081900360640190fd5b604080516020601f840181900481028201810190925282815260009161261e918585808385018382808284376000920191909152506141f592505050565b905060035481106126605760405162461bcd60e51b815260040180806020018281038252602e815260200180614f3c602e913960400191505060405180910390fd5b60025481116126a05760405162461bcd60e51b815260040180806020018281038252602e815260200180614f6a602e913960400191505060405180910390fd5b6402540be4008106156126e45760405162461bcd60e51b815260040180806020018281038252602c815260200180614ee2602c913960400191505060405180910390fd5b6001556129f9565b61275084848080601f0160208091040260200160405190810160405280939291908181526020018383808284376000920191909152505060408051808201909152600d81526c62534352656c6179657246656560981b6020820152915061410e9050565b1561287657602081146127aa576040805162461bcd60e51b815260206004820181905260248201527f6c656e677468206f662062534352656c61796572466565206d69736d61746368604482015290519081900360640190fd5b604080516020601f84018190048102820181019092528281526000916127e8918585808385018382808284376000920191909152506141f592505050565b9050600154811061282a5760405162461bcd60e51b815260040180806020018281038252602e815260200180614e62602e913960400191505060405180910390fd5b6402540be40081061561286e5760405162461bcd60e51b815260040180806020018281038252602f815260200180614e33602f913960400191505060405180910390fd5b6002556129f9565b6128da84848080601f0160208091040260200160405190810160405280939291908181526020018383808284376000920191909152505060408051808201909152600d81526c36b4b72232b632b3b0ba34b7b760991b6020820152915061410e9050565b156129bc5760208114612934576040805162461bcd60e51b815260206004820181905260248201527f6c656e677468206f66206d696e44656c65676174696f6e206d69736d61746368604482015290519081900360640190fd5b604080516020601f8401819004810282018101909252828152600091612972918585808385018382808284376000920191909152506141f592505050565b905060015481116129b45760405162461bcd60e51b8152600401808060200182810382526031815260200180614e906031913960400191505060405180910390fd5b6003556129f9565b6040805162461bcd60e51b815260206004820152600d60248201526c756e6b6e6f776e20706172616d60981b604482015290519081900360640190fd5b7f6cdb0ac70ab7f2e2d035cca5be60d89906f2dede7648ddbd7402189c1eeed17a848484846040518080602001806020018381038352878782818152602001925080828437600083820152601f01601f191690910184810383528581526020019050858580828437600083820152604051601f909101601f19169092018290039850909650505050505050a150505050565b600481565b60105460009060ff1660021415612adf576040805162461bcd60e51b815260206004820152600e60248201526d4e6f2072652d656e7472616e637960901b604482015290519081900360640190fd5b506010805460ff191660021790553360009081526006602052604090205480612b43576040805162461bcd60e51b81526020600482015260116024820152701b9bc81c195b991a5b99c81c995dd85c99607a1b604482015290519081900360640190fd5b336000818152600660205260408082208290555183156108fc0291849190818181858888f19350505050158015612b7e573d6000803e3d6000fd5b5060408051828152905133917f83b78188b13346b2ffb484da70d42ee27de7fbf9f2bd8045269e10ed643ccd76919081900360200190a26010805460ff1916600117905590565b6638d7ea4c68000081565b6001600160a01b038083166000908152600760209081526040808320938516835292905220545b92915050565b60015490565b61100281565b3361200014612c495760405162461bcd60e51b815260040180806020018281038252602f815260200180614f98602f913960400191505060405180910390fd5b60005460ff16612c86576638d7ea4c6800006001908155662386f26fc1000060025568056bc75e2d631000006003556000805460ff191690911790555b612cc582828080601f0160208091040260200160405190810160405280939291908181526020018383808284376000920191909152506136d192505050565b612d08576040805162461bcd60e51b815260206004820152600f60248201526e0eee4dedcce40e0c2c6d640d0c2e6d608b1b604482015290519081900360640190fd5b612d10614df8565b612d5261126f84848080601f0160208091040260200160405190810160405280939291908181526020018383808284376000920191909152506130d092505050565b90506000612d626112848361313f565b9050612d6c614df8565b612d7583613640565b15612d9357612d8c61126f6123d06122db8661313f565b9050612dd9565b6040805162461bcd60e51b8152602060048201526016602482015275656d707479206661696c2061636b207061636b61676560501b604482015290519081900360640190fd5b60ff821660011415612df357612dee816141fa565b612e1d565b60ff821660021415612e0857612dee81614414565b60ff8216600314156112c357612dee8161453e565b5050505b505050565b6001600160a01b03918216600090815260056020908152604080832093909416825291909152205490565b61100381565b68056bc75e2d6310000081565b6001600160a01b0392831660009081526009602090815260408083209486168352938152838220929094168152925290205490565b61100081565b61100481565b600082820183811015612eff576040805162461bcd60e51b815260206004820152601b60248201527f536166654d6174683a206164646974696f6e206f766572666c6f770000000000604482015290519081900360640190fd5b9392505050565b6000612eff83836040518060400160405280601a81526020017f536166654d6174683a206469766973696f6e206279207a65726f0000000000008152506146a3565b6000612eff83836040518060400160405280601e81526020017f536166654d6174683a207375627472616374696f6e206f766572666c6f770000815250614745565b60408051600560a21b8318601482015260348101909152606090612eff8161479f565b6060612bf7612fbb836147f5565b61479f565b6060815160001415612fe15750604080516000815260208101909152611196565b606082600081518110612ff057fe5b602002602001015190506000600190505b8351811015613031576130278285838151811061301a57fe5b60200260200101516148db565b9150600101613001565b50612eff613044825160c060ff16614958565b826148db565b6040805160028082526060828101909352829190816020015b60608152602001906001900390816130635790505090506130868460ff16612fad565b8160008151811061309357fe5b60200260200101819052506130a78361479f565b816001815181106130b457fe5b60200260200101819052506130c881612fc0565b949350505050565b6130d8614e18565b506040805180820190915281518152602082810190820152919050565b6130fd614df8565b61310682614a50565b61310f57600080fd5b600061311e8360200151614a8a565b60208085015160408051808201909152868152920190820152915050919050565b613147614e18565b61315082613640565b61315957600080fd5b6020820151600061316982614aed565b80830160209586015260408051808201909152908152938401919091525090919050565b8051600090158015906131a257508151602110155b6131ab57600080fd5b60006131ba8360200151614a8a565b90508083600001511015613215576040805162461bcd60e51b815260206004820152601a60248201527f6c656e677468206973206c657373207468616e206f6666736574000000000000604482015290519081900360640190fd5b82516020808501518301805192849003929183101561323b57826020036101000a820491505b50949350505050565b600060606000806000805b61325887613640565b156132aa578261327a5761327361326e8861313f565b614c20565b915061329f565b826001141561329a5761328f6112848861313f565b90506001935061329f565b6132aa565b82600101925061324f565b836132f0576040805162461bcd60e51b81526020600482015260116024820152701c9b1c08191958dbd9194819985a5b1959607a1b604482015290519081900360640190fd5b60006110046001600160a01b031663727be1f8836040518263ffffffff1660e01b815260040180828152602001915050602060405180830381600087803b15801561333a57600080fd5b505af115801561334e573d6000803e3d6000fd5b505050506040513d602081101561336457600080fd5b50519050806133895761337b600484846065614c3a565b96509650505050505061341f565b6001600160a01b0383166000908152600660205260409020546133b2908363ffffffff612ea516565b6001600160a01b038416600081815260066020908152604091829020939093558051858152905191927f7cc266c7b444f808013fa187f7b904d470a051a6564e78f482aa496581ba4bf892918290030190a260408051600080825260208201909252909750955050505050505b915091565b6000606060008060008060005b61343a88613640565b156134a357836134575761345061326e8961313f565b9250613498565b83600114156134735761346c61326e8961313f565b9150613498565b8360021415613493576134886112848961313f565b905060019450613498565b6134a3565b836001019350613431565b846134e9576040805162461bcd60e51b81526020600482015260116024820152701c9b1c08191958dbd9194819985a5b1959607a1b604482015290519081900360640190fd5b60006110046001600160a01b031663727be1f8836040518263ffffffff1660e01b815260040180828152602001915050602060405180830381600087803b15801561353357600080fd5b505af1158015613547573d6000803e3d6000fd5b505050506040513d602081101561355d57600080fd5b505190508061358357613574600585846065614c3a565b9750975050505050505061341f565b6001600160a01b0380851660008181526007602090815260408083209488168352938152838220829055918152600890915220546135c7908363ffffffff612ea516565b6001600160a01b0380861660008181526008602090815260409182902094909455805186815290519287169391927f35a799836f74fac7eccf5c73902823b970543d2274d3b93d8da3d37a255772a2929181900390910190a3604080516000808252602082019092529098509650505050505050915091565b600061364a614e18565b505080518051602091820151919092015191011190565b805160609061366f57600080fd5b600061367e8360200151614a8a565b83516040805191839003808352601f19601f82011683016020019091529192506060908280156136b5576020820181803683370190505b509050600081602001905061323b848760200151018285614d36565b8051602080830191909120600e546000908152600a909252604082205480821461370057600092505050611196565b5050600e80546000908152600a60205260408120558054600190810190915592915050565b60008060008060005b61373788613640565b156137a057836137545761374d61326e8961313f565b9250613795565b83600114156137705761376961326e8961313f565b9150613795565b8360021415613790576137856112848961313f565b905060019450613795565b6137a0565b83600101935061372e565b846137e6576040805162461bcd60e51b81526020600482015260116024820152701c9b1c08191958dbd9194819985a5b1959607a1b604482015290519081900360640190fd5b60006137fd826402540be40063ffffffff614d8116565b6001600160a01b0385166000908152600b602052604090208054600019019055905060ff8816600114156139435760ff871615613870576040805162461bcd60e51b815260206004820152600c60248201526b77726f6e672073746174757360a01b604482015290519081900360640190fd5b6001600160a01b038416600090815260046020526040902054613899908263ffffffff612ea516565b6001600160a01b038086166000908152600460209081526040808320949094556005815283822092871682529190915220546138db908263ffffffff612ea516565b6001600160a01b038086166000818152600560209081526040808320948916808452948252918290209490945580518581529051929391927f9a57c81564ab02642f34fd87e41baa9b074c18342cec3b7268b62bf752018fd1929181900390910190a3613ad1565b60ff8816613a95576001600160a01b038416600090815260086020526040902054613974908263ffffffff612ea516565b6001600160a01b0385166000908152600860209081526040808320939093558251630e4f7c3f60e31b81526004810185905292516110049363727be1f89360248083019493928390030190829087803b1580156139d057600080fd5b505af11580156139e4573d6000803e3d6000fd5b505050506040513d60208110156139fa57600080fd5b5051613a43576040805162461bcd60e51b81526020600482015260136024820152721dda5d1a191c985dc8189b988819985a5b1959606a1b604482015290519081900360640190fd5b6040805182815260ff8916602082015281516001600160a01b0380871693908816927fcbd481ae600289fad8c0484d07ce0ffe4f010d7c844ecfdeaf2a13fead52886e929081900390910190a3613ad1565b6040805162461bcd60e51b815260206004820152600c60248201526b77726f6e672073746174757360a01b604482015290519081900360640190fd5b505050505050505050565b60008060008060005b613aee88613640565b15613b575783613b0b57613b0461326e8961313f565b9250613b4c565b8360011415613b2757613b2061326e8961313f565b9150613b4c565b8360021415613b4757613b3c6112848961313f565b905060019450613b4c565b613b57565b836001019350613ae5565b84613b9d576040805162461bcd60e51b81526020600482015260116024820152701c9b1c08191958dbd9194819985a5b1959607a1b604482015290519081900360640190fd5b6000613bb4826402540be40063ffffffff614d8116565b6001600160a01b0385166000908152600c602052604090208054600019019055905060ff881660011415613d335760ff871615613c27576040805162461bcd60e51b815260206004820152600c60248201526b77726f6e672073746174757360a01b604482015290519081900360640190fd5b6001600160a01b038416600090815260046020526040902054613c50908263ffffffff612f4816565b6001600160a01b03808616600090815260046020908152604080832094909455600581528382209287168252919091522054613c92908263ffffffff612f4816565b6001600160a01b03808616600090815260056020908152604080832093881683529290522055613ccb42620a8c0063ffffffff612ea516565b6001600160a01b038086166000818152600760209081526040808320948916808452948252918290209490945580518581529051929391927fd6f878a5bcbbe79a64e6418bb0d56aaa20b9a60587d45749819df88dfc7c3c44929181900390910190a3613ad1565b60ff8816613a95576001600160a01b03808516600081815260076020908152604080832094881680845294825280832092909255815185815260ff8c169181019190915281517f4417d10c1e33efa83a770b8d4f47176e78c08c1298d534901ad3b16bb585fa2e929181900390910190a3613ad1565b6000806000806000805b613dbc89613640565b15613e415784613dd957613dd261326e8a61313f565b9350613e36565b8460011415613df557613dee61326e8a61313f565b9250613e36565b8460021415613e1157613e0a61326e8a61313f565b9150613e36565b8460031415613e3157613e266112848a61313f565b905060019550613e36565b613e41565b846001019450613db3565b85613e87576040805162461bcd60e51b81526020600482015260116024820152701c9b1c08191958dbd9194819985a5b1959607a1b604482015290519081900360640190fd5b6000613e9e826402540be40063ffffffff614d8116565b6001600160a01b0386166000908152600d602052604090208054600019019055905060ff8916600114156140715760ff881615613f11576040805162461bcd60e51b815260206004820152600c60248201526b77726f6e672073746174757360a01b604482015290519081900360640190fd5b6001600160a01b03808616600090815260056020908152604080832093881683529290522054613f47908263ffffffff612f4816565b6001600160a01b03868116600090815260056020908152604080832089851684529091528082209390935590851681522054613f839082612ea5565b6001600160a01b03808716600090815260056020908152604080832093881683529290522055613fbc42620a8c0063ffffffff612ea516565b6001600160a01b0380871660009081526009602090815260408083208985168452825280832093881683529290522055613fff42620a8c0063ffffffff612ea516565b6001600160a01b038087166000818152600960209081526040808320898616808552908352818420958b1680855295835292819020959095558451868152945191947f78bffae3f8c6691ac7fc1a3bff800cb2d612f5ad9ae5b0444cfe2eb15c189e18929081900390910190a4614102565b60ff8916613a95576001600160a01b038581166000818152600960209081526040808320898616808552818452828520968a16808652968452828520859055908352818420818552835281842093909355805186815260ff8e169281019290925280519293927fb93bee5c59f85ede6b074a99f4ffcd3e3fc0d5c3d8156de331de89a49e0ce77c9281900390910190a45b50505050505050505050565b6000816040516020018082805190602001908083835b602083106141435780518252601f199092019160209182019101614124565b6001836020036101000a03801982511681845116808217855250505050505090500191505060405160208183030381529060405280519060200120836040516020018082805190602001908083835b602083106141b15780518252601f199092019160209182019101614192565b6001836020036101000a0380198251168184511680821785525050505050509050019150506040516020818303038152906040528051906020012014905092915050565b015190565b60008060008060005b61420c86613640565b1561427557836142295761422261326e8761313f565b925061426a565b83600114156142455761423e61326e8761313f565b915061426a565b83600214156142655761425a6112848761313f565b90506001945061426a565b614275565b836001019350614203565b846142bb576040805162461bcd60e51b81526020600482015260116024820152701c9b1c08191958dbd9194819985a5b1959607a1b604482015290519081900360640190fd5b60006142d2826402540be40063ffffffff614d8116565b6001600160a01b0385166000908152600b6020908152604080832080546000190190556008909152902054909150614310908263ffffffff612ea516565b6001600160a01b0385166000908152600860209081526040808320939093558251630e4f7c3f60e31b81526004810185905292516110049363727be1f89360248083019493928390030190829087803b15801561436c57600080fd5b505af1158015614380573d6000803e3d6000fd5b505050506040513d602081101561439657600080fd5b50516143df576040805162461bcd60e51b81526020600482015260136024820152721dda5d1a191c985dc8189b988819985a5b1959606a1b604482015290519081900360640190fd5b6040516001907ff83de021914a4585482db5ca47d520a5657165b443fa2c7ef8ed4635f054da9b90600090a250505050505050565b60008060008060005b61442686613640565b1561448f57836144435761443c61326e8761313f565b9250614484565b836001141561445f5761445861326e8761313f565b9150614484565b836002141561447f576144746112848761313f565b905060019450614484565b61448f565b83600101935061441d565b846144d5576040805162461bcd60e51b81526020600482015260116024820152701c9b1c08191958dbd9194819985a5b1959607a1b604482015290519081900360640190fd5b6001600160a01b038084166000908152600c60209081526040808320805460001901905560078252808320938616835292905281812081905590516002917ff83de021914a4585482db5ca47d520a5657165b443fa2c7ef8ed4635f054da9b91a2505050505050565b6000806000806000805b61455187613640565b156145d6578461456e5761456761326e8861313f565b93506145cb565b846001141561458a5761458361326e8861313f565b92506145cb565b84600214156145a65761459f61326e8861313f565b91506145cb565b84600314156145c6576145bb6112848861313f565b9050600195506145cb565b6145d6565b846001019450614548565b8561461c576040805162461bcd60e51b81526020600482015260116024820152701c9b1c08191958dbd9194819985a5b1959607a1b604482015290519081900360640190fd5b6001600160a01b038481166000908152600d602090815260408083208054600019019055600982528083208785168085528184528285209588168552948352818420849055825280832093835292905281812081905590516003917ff83de021914a4585482db5ca47d520a5657165b443fa2c7ef8ed4635f054da9b91a250505050505050565b6000818361472f5760405162461bcd60e51b81526004018080602001828103825283818151815260200191508051906020019080838360005b838110156146f45781810151838201526020016146dc565b50505050905090810190601f1680156147215780820380516001836020036101000a031916815260200191505b509250505060405180910390fd5b50600083858161473b57fe5b0495945050505050565b600081848411156147975760405162461bcd60e51b81526020600482018181528351602484015283519092839260449091019190850190808383600083156146f45781810151838201526020016146dc565b505050900390565b6060815160011480156147d15750607f60f81b826000815181106147bf57fe5b01602001516001600160f81b03191611155b156147dd575080611196565b612bf76147ef8351608060ff16614958565b836148db565b604080516020808252818301909252606091829190602082018180368337505050602081018490529050600067ffffffffffffffff1984166148395750601861485d565b6fffffffffffffffffffffffffffffffff1984166148595750601061485d565b5060005b60208110156148935781818151811061487257fe5b01602001516001600160f81b0319161561488b57614893565b60010161485d565b60008160200390506060816040519080825280601f01601f1916602001820160405280156148c8576020820181803683370190505b5080830196909652508452509192915050565b6060806040519050835180825260208201818101602087015b8183101561490c5780518352602092830192016148f4565b50855184518101855292509050808201602086015b81831015614939578051835260209283019201614921565b508651929092011591909101601f01601f191660405250905092915050565b60606801000000000000000083106149a8576040805162461bcd60e51b815260206004820152600e60248201526d696e70757420746f6f206c6f6e6760901b604482015290519081900360640190fd5b60408051600180825281830190925260609160208201818036833701905050905060378411614a025782840160f81b816000815181106149e457fe5b60200101906001600160f81b031916908160001a9053509050612bf7565b6060614a0d856147f5565b90508381510160370160f81b82600081518110614a2657fe5b60200101906001600160f81b031916908160001a905350614a4782826148db565b95945050505050565b8051600090614a6157506000611196565b6020820151805160001a9060c0821015614a8057600092505050611196565b5060019392505050565b8051600090811a6080811015614aa4576000915050611196565b60b8811080614abf575060c08110801590614abf575060f881105b15614ace576001915050611196565b60c0811015614ae25760b519019050611196565b60f519019050611196565b80516000908190811a6080811015614b085760019150614c19565b60b8811015614b1d57607e1981019150614c19565b60c0811015614b9757600060b78203600186019550806020036101000a865104915060018101820193505080831015614b91576040805162461bcd60e51b81526020600482015260116024820152706164646974696f6e206f766572666c6f7760781b604482015290519081900360640190fd5b50614c19565b60f8811015614bac5760be1981019150614c19565b600060f78203600186019550806020036101000a865104915060018101820193505080831015614c17576040805162461bcd60e51b81526020600482015260116024820152706164646974696f6e206f766572666c6f7760781b604482015290519081900360640190fd5b505b5092915050565b8051600090601514614c3157600080fd5b612bf78261318d565b60006060614c53846402540be40063ffffffff612f0616565b60408051600480825260a0820190925291955060609190816020015b6060815260200190600190039081614c6f579050509050614c928760ff16612fad565b81600081518110614c9f57fe5b6020026020010181905250614cbc866001600160a01b0316612f8a565b81600181518110614cc957fe5b6020026020010181905250614cdd85612fad565b81600281518110614cea57fe5b6020026020010181905250614d048463ffffffff16612fad565b81600381518110614d1157fe5b60200260200101819052506060614d2782612fc0565b94989497509395505050505050565b80614d4057612e21565b5b60208110614d60578251825260209283019290910190601f1901614d41565b915181516020939093036101000a6000190180199091169216919091179052565b600082614d9057506000612bf7565b82820282848281614d9d57fe5b0414612eff5760405162461bcd60e51b8152600401808060200182810382526021815260200180614ec16021913960400191505060405180910390fd5b60405180606001604052806003906020820280368337509192915050565b6040518060400160405280614e0b614e18565b8152602001600081525090565b60405180604001604052806000815260200160008152509056fe7468652042534352656c61796572466565206d6f642074656e20646563696d616c73206d757374206265207a65726f7468652042534352656c61796572466565206d757374206265206c657373207468616e2072656c61796572466565746865206d696e44656c65676174696f6e206d7573742062652067726561746572207468616e2072656c61796572466565536166654d6174683a206d756c7469706c69636174696f6e206f766572666c6f777468652072656c61796572466565206d6f642074656e20646563696d616c73206d757374206265207a65726f746865206d6573736167652073656e646572206d75737420626520676f7665726e616e636520636f6e74726163747468652072656c61796572466565206d757374206265206c657373207468616e206d696e44656c65676174696f6e7468652072656c61796572466565206d757374206265206d6f7265207468616e2042534352656c61796572466565746865206d6573736167652073656e646572206d7573742062652063726f737320636861696e20636f6e7472616374a26469706673582212202d43db3e2ebde4487e0172cc743868769dac674a6b5ac4e5b38d932f5fe77d2964736f6c63430006040033", + }, + }, + } + } func UpgradeBuildInSystemContract(config *params.ChainConfig, blockNumber *big.Int, statedb *state.IntraBlockState) { @@ -400,6 +434,10 @@ func UpgradeBuildInSystemContract(config *params.ChainConfig, blockNumber *big.I applySystemContractUpgrade(eulerUpgrade[network], blockNumber, statedb, logger) } + if config.IsOnGibbs(blockNumber) { + applySystemContractUpgrade(gibbsUpgrade[network], blockNumber, statedb, logger) + } + /* apply other upgrades */ @@ -426,6 +464,13 @@ func applySystemContractUpgrade(upgrade *Upgrade, blockNumber *big.Int, statedb if err != nil { panic(fmt.Errorf("failed to decode new contract code: %s", err.Error())) } + + prevContractCode := statedb.GetCode(cfg.ContractAddr) + if len(prevContractCode) == 0 && len(newContractCode) > 0 { + // system contracts defined after genesis need to be explicitly created + statedb.CreateAccount(cfg.ContractAddr, true) + } + statedb.SetCode(cfg.ContractAddr, newContractCode) if cfg.AfterUpgrade != nil { diff --git a/core/types/accounts/account.go b/core/types/accounts/account.go index 13f1fd19181..90b0b2194f1 100644 --- a/core/types/accounts/account.go +++ b/core/types/accounts/account.go @@ -618,3 +618,102 @@ func (a *Account) Equals(acc *Account) bool { a.Balance.Cmp(&acc.Balance) == 0 && a.Incarnation == acc.Incarnation } + +// Deserialise2 - method to deserialize accounts in Erigon22 history +func Deserialise2(a *Account, enc []byte) error { + a.Reset() + pos := 0 + nonceBytes := int(enc[pos]) + pos++ + if nonceBytes > 0 { + a.Nonce = bytesToUint64(enc[pos : pos+nonceBytes]) + pos += nonceBytes + } + balanceBytes := int(enc[pos]) + pos++ + if balanceBytes > 0 { + a.Balance.SetBytes(enc[pos : pos+balanceBytes]) + pos += balanceBytes + } + codeHashBytes := int(enc[pos]) + pos++ + if codeHashBytes > 0 { + copy(a.CodeHash[:], enc[pos:pos+codeHashBytes]) + pos += codeHashBytes + } + if pos >= len(enc) { + return fmt.Errorf("deserialse2: %d >= %d ", pos, len(enc)) + } + incBytes := int(enc[pos]) + pos++ + if incBytes > 0 { + a.Incarnation = bytesToUint64(enc[pos : pos+incBytes]) + } + return nil +} + +func Serialise2(a *Account) []byte { + var l int + l++ + if a.Nonce > 0 { + l += (bits.Len64(a.Nonce) + 7) / 8 + } + l++ + if !a.Balance.IsZero() { + l += a.Balance.ByteLen() + } + l++ + if !a.IsEmptyCodeHash() { + l += 32 + } + l++ + if a.Incarnation > 0 { + l += (bits.Len64(a.Incarnation) + 7) / 8 + } + value := make([]byte, l) + pos := 0 + if a.Nonce == 0 { + value[pos] = 0 + pos++ + } else { + nonceBytes := (bits.Len64(a.Nonce) + 7) / 8 + value[pos] = byte(nonceBytes) + var nonce = a.Nonce + for i := nonceBytes; i > 0; i-- { + value[pos+i] = byte(nonce) + nonce >>= 8 + } + pos += nonceBytes + 1 + } + if a.Balance.IsZero() { + value[pos] = 0 + pos++ + } else { + balanceBytes := a.Balance.ByteLen() + value[pos] = byte(balanceBytes) + pos++ + a.Balance.WriteToSlice(value[pos : pos+balanceBytes]) + pos += balanceBytes + } + if a.IsEmptyCodeHash() { + value[pos] = 0 + pos++ + } else { + value[pos] = 32 + pos++ + copy(value[pos:pos+32], a.CodeHash[:]) + pos += 32 + } + if a.Incarnation == 0 { + value[pos] = 0 + } else { + incBytes := (bits.Len64(a.Incarnation) + 7) / 8 + value[pos] = byte(incBytes) + var inc = a.Incarnation + for i := incBytes; i > 0; i-- { + value[pos+i] = byte(inc) + inc >>= 8 + } + } + return value +} diff --git a/core/types/blacklist.go b/core/types/blacklist.go new file mode 100644 index 00000000000..9428a763217 --- /dev/null +++ b/core/types/blacklist.go @@ -0,0 +1,11 @@ +package types + +import "github.com/ledgerwatch/erigon/common" + +// This is introduced because of the Tendermint IAVL Merkel Proof verification exploitation. +var NanoBlackList = []common.Address{ + common.HexToAddress("0x489A8756C18C0b8B24EC2a2b9FF3D4d447F79BEc"), + common.HexToAddress("0xFd6042Df3D74ce9959922FeC559d7995F3933c55"), + // Test Account + common.HexToAddress("0xdb789Eb5BDb4E559beD199B8b82dED94e1d056C9"), +} diff --git a/core/types/block.go b/core/types/block.go index ec90dbe236f..261c49dcbc7 100644 --- a/core/types/block.go +++ b/core/types/block.go @@ -18,6 +18,7 @@ package types import ( + "bytes" "encoding/binary" "errors" "fmt" @@ -28,7 +29,9 @@ import ( "sync/atomic" "time" + "github.com/gballet/go-verkle" rlp2 "github.com/ledgerwatch/erigon-lib/rlp" + "github.com/ledgerwatch/erigon/common" "github.com/ledgerwatch/erigon/common/hexutil" "github.com/ledgerwatch/erigon/rlp" @@ -99,6 +102,10 @@ type Header struct { Eip1559 bool // to avoid relying on BaseFee != nil for that Seal []rlp.RawValue // AuRa POA network field WithSeal bool // to avoid relying on Seal != nil for that + // The verkle proof is ignored in legacy headers + Verkle bool + VerkleProof []byte `json:"verkleProof"` + VerkleKeyVals []verkle.KeyValuePair `json:"verkleKeyVals"` } func (h Header) EncodingSize() int { @@ -175,6 +182,30 @@ func (h Header) EncodingSize() int { encodingSize += baseFeeLen } + if h.Verkle { + // Encoding of Verkle Proof + encodingSize++ + switch len(h.VerkleProof) { + case 0: + case 1: + if h.VerkleProof[0] >= 128 { + encodingSize++ + } + default: + if len(h.VerkleProof) >= 56 { + encodingSize += (bits.Len(uint(len(h.VerkleProof))) + 7) / 8 + } + encodingSize += len(h.VerkleProof) + } + encodingSize++ + + var tmpBuffer bytes.Buffer + if err := rlp.Encode(&tmpBuffer, h.VerkleKeyVals); err != nil { + panic(err) + } + encodingSize += tmpBuffer.Len() + } + return encodingSize } @@ -192,6 +223,29 @@ func (h Header) EncodeRLP(w io.Writer) error { } else { encodingSize += 33 /* MixDigest */ + 9 /* BlockNonce */ } + if h.Verkle { + // Encoding of Verkle Proof + encodingSize++ + switch len(h.VerkleProof) { + case 0: + case 1: + if h.VerkleProof[0] >= 128 { + encodingSize++ + } + default: + if len(h.VerkleProof) >= 56 { + encodingSize += (bits.Len(uint(len(h.VerkleProof))) + 7) / 8 + } + encodingSize += len(h.VerkleProof) + } + encodingSize++ + + var tmpBuffer bytes.Buffer + if err := rlp.Encode(&tmpBuffer, h.VerkleKeyVals); err != nil { + return nil + } + encodingSize += tmpBuffer.Len() + } encodingSize++ var diffBitLen, diffLen int @@ -424,6 +478,16 @@ func (h Header) EncodeRLP(w io.Writer) error { } } + if h.Verkle { + if err := EncodeString(h.VerkleProof, w, b[:]); err != nil { + return err + } + + if err := rlp.Encode(w, h.VerkleKeyVals); err != nil { + return nil + } + } + return nil } @@ -544,6 +608,17 @@ func (h *Header) DecodeRLP(s *rlp.Stream) error { h.Eip1559 = true h.BaseFee = new(big.Int).SetBytes(b) } + + if h.Verkle { + if h.VerkleProof, err = s.Bytes(); err != nil { + return fmt.Errorf("read VerkleProof: %w", err) + } + rawKv, err := s.Raw() + if err != nil { + return err + } + rlp.DecodeBytes(rawKv, h.VerkleKeyVals) + } if err := s.ListEnd(); err != nil { return fmt.Errorf("close header struct: %w", err) } @@ -681,16 +756,19 @@ func (b *Body) SendersFromTxs() []common.Address { } func (rb RawBody) EncodingSize() int { - payloadSize, _, _ := rb.payloadSize() + payloadSize, _, _, _ := rb.payloadSize() return payloadSize } -func (rb RawBody) payloadSize() (payloadSize int, txsLen, unclesLen int) { +func (rb RawBody) payloadSize() (payloadSize, txsLen, unclesLen int, transactionsSizes []int) { + transactionsSizes = make([]int, len(rb.Transactions)) + // size of Transactions payloadSize++ - for _, tx := range rb.Transactions { + for idx, tx := range rb.Transactions { txsLen++ var txLen = len(tx) + transactionsSizes[idx] = txLen if txLen >= 56 { txsLen += (bits.Len(uint(txLen)) + 7) / 8 } @@ -714,11 +792,11 @@ func (rb RawBody) payloadSize() (payloadSize int, txsLen, unclesLen int) { payloadSize += (bits.Len(uint(unclesLen)) + 7) / 8 } payloadSize += unclesLen - return payloadSize, txsLen, unclesLen + return payloadSize, txsLen, unclesLen, transactionsSizes } func (rb RawBody) EncodeRLP(w io.Writer) error { - payloadSize, txsLen, unclesLen := rb.payloadSize() + payloadSize, txsLen, unclesLen, txSizes := rb.payloadSize() var b [33]byte // prefix if err := EncodeStructSizePrefix(payloadSize, w, b[:]); err != nil { @@ -728,7 +806,10 @@ func (rb RawBody) EncodeRLP(w io.Writer) error { if err := EncodeStructSizePrefix(txsLen, w, b[:]); err != nil { return err } - for _, tx := range rb.Transactions { + for idx, tx := range rb.Transactions { + if err := EncodeStructSizePrefix(txSizes[idx], w, b[:]); err != nil { + return err + } if _, err := w.Write(tx); err != nil { return nil } @@ -756,7 +837,11 @@ func (rb *RawBody) DecodeRLP(s *rlp.Stream) error { } var tx []byte for tx, err = s.Raw(); err == nil; tx, err = s.Raw() { - rb.Transactions = append(rb.Transactions, tx) + _, txContent, _, err := rlp.Split(tx) + if err != nil { + return err + } + rb.Transactions = append(rb.Transactions, txContent) } if !errors.Is(err, rlp.EOL) { return err @@ -769,6 +854,7 @@ func (rb *RawBody) DecodeRLP(s *rlp.Stream) error { if _, err = s.List(); err != nil { return err } + for err == nil { var uncle Header if err = uncle.DecodeRLP(s); err != nil { diff --git a/core/types/block_test.go b/core/types/block_test.go index c72ace5fbba..a52c369bbc9 100644 --- a/core/types/block_test.go +++ b/core/types/block_test.go @@ -18,6 +18,7 @@ package types import ( "bytes" + "encoding/json" "math/big" "reflect" "testing" @@ -25,6 +26,7 @@ import ( "github.com/holiman/uint256" "github.com/ledgerwatch/erigon/common" + "github.com/ledgerwatch/erigon/common/hexutil" "github.com/ledgerwatch/erigon/common/math" "github.com/ledgerwatch/erigon/common/u256" "github.com/ledgerwatch/erigon/crypto" @@ -268,3 +270,94 @@ func makeBenchBlock() *Block { } return NewBlock(header, txs, uncles, receipts) } + +func TestCanEncodeAndDecodeRawBody(t *testing.T) { + body := &RawBody{ + Uncles: []*Header{ + { + ParentHash: common.Hash{}, + UncleHash: common.Hash{}, + Coinbase: common.Address{}, + Root: common.Hash{}, + TxHash: common.Hash{}, + ReceiptHash: common.Hash{}, + Bloom: Bloom{}, + Difficulty: big.NewInt(100), + Number: big.NewInt(1000), + GasLimit: 50, + GasUsed: 60, + Time: 90, + Extra: []byte("testing"), + MixDigest: common.Hash{}, + Nonce: BlockNonce{}, + BaseFee: nil, + Eip1559: false, + Seal: nil, + WithSeal: false, + Verkle: false, + VerkleProof: nil, + VerkleKeyVals: nil, + }, + { + GasUsed: 108, + GasLimit: 100, + Difficulty: big.NewInt(99), + Number: big.NewInt(1000), + }, + }, + Transactions: [][]byte{ + { + 10, 20, 30, + }, + { + 40, 50, 60, + }, + }, + } + expectedJson, err := json.Marshal(body) + if err != nil { + t.Fatal(err) + } + writer := bytes.NewBuffer(nil) + err = body.EncodeRLP(writer) + if err != nil { + t.Fatal(err) + } + rlpBytes := common.CopyBytes(writer.Bytes()) + writer.Reset() + writer.WriteString(hexutil.Encode(rlpBytes)) + + var rawBody RawBody + fromHex := common.CopyBytes(common.FromHex(writer.String())) + bodyReader := bytes.NewReader(fromHex) + stream := rlp.NewStream(bodyReader, 0) + + err = rawBody.DecodeRLP(stream) + if err != nil { + t.Fatal(err) + } + + resultJson, err := json.Marshal(rawBody) + if err != nil { + t.Fatal(err) + } + + if len(rawBody.Transactions) != 2 { + t.Fatalf("expected there to be 1 transaction once decoded") + } + if rawBody.Transactions[0][0] != 10 { + t.Fatal("expected first element in transactions to be 10") + } + if rawBody.Transactions[1][2] != 60 { + t.Fatal("expected 2nd element in transactions to end in 60") + } + if rawBody.Uncles[0].GasLimit != 50 { + t.Fatal("expected gas limit of first uncle to be 50") + } + if rawBody.Uncles[1].GasLimit != 100 { + t.Fatal("expected gas limit of 2nd uncle to be 100") + } + if string(resultJson) != string(expectedJson) { + t.Fatalf("encoded and decoded json do not match, got\n%s\nwant\n%s", resultJson, expectedJson) + } +} diff --git a/core/types/gen_erigon_log_json.go b/core/types/gen_erigon_log_json.go new file mode 100644 index 00000000000..5523671f583 --- /dev/null +++ b/core/types/gen_erigon_log_json.go @@ -0,0 +1,102 @@ +// Code generated by github.com/fjl/gencodec. DO NOT EDIT. + +package types + +import ( + "encoding/json" + "errors" + + "github.com/ledgerwatch/erigon/common" + "github.com/ledgerwatch/erigon/common/hexutil" +) + +var _ = (*logMarshaling)(nil) + +// MarshalJSON marshals as JSON. +func (l ErigonLog) MarshalJSON() ([]byte, error) { + type ErigonLog struct { + Address common.Address `json:"address" gencodec:"required"` + Topics []common.Hash `json:"topics" gencodec:"required"` + Data hexutil.Bytes `json:"data" gencodec:"required"` + BlockNumber hexutil.Uint64 `json:"blockNumber"` + TxHash common.Hash `json:"transactionHash" gencodec:"required"` + TxIndex hexutil.Uint `json:"transactionIndex"` + BlockHash common.Hash `json:"blockHash"` + Index hexutil.Uint `json:"logIndex"` + Removed bool `json:"removed"` + Timestamp hexutil.Uint64 `json:"timestamp"` + } + + var enc ErigonLog + enc.Address = l.Address + enc.Topics = l.Topics + enc.Data = l.Data + enc.BlockNumber = hexutil.Uint64(l.BlockNumber) + enc.TxHash = l.TxHash + enc.TxIndex = hexutil.Uint(l.TxIndex) + enc.BlockHash = l.BlockHash + enc.Index = hexutil.Uint(l.Index) + enc.Removed = l.Removed + enc.Timestamp = hexutil.Uint64(l.Timestamp) + + return json.Marshal(&enc) +} + +// UnmarshalJSON unmarshals from JSON. +func (l *ErigonLog) UnmarshalJSON(input []byte) error { + type ErigonLog struct { + Address *common.Address `json:"address" gencodec:"required"` + Topics []common.Hash `json:"topics" gencodec:"required"` + Data *hexutil.Bytes `json:"data" gencodec:"required"` + BlockNumber *hexutil.Uint64 `json:"blockNumber"` + TxHash *common.Hash `json:"transactionHash" gencodec:"required"` + TxIndex *hexutil.Uint `json:"transactionIndex"` + BlockHash *common.Hash `json:"blockHash"` + Index *hexutil.Uint `json:"logIndex"` + Removed *bool `json:"removed"` + Timestamp *hexutil.Uint64 `json:"timestamp"` + } + + var dec ErigonLog + if err := json.Unmarshal(input, &dec); err != nil { + return err + } + if dec.Address == nil { + return errors.New("missing required field 'address' for ErigonLog") + } + l.Address = *dec.Address + if dec.Topics == nil { + return errors.New("missing required field 'topics' for ErigonLog") + } + l.Topics = dec.Topics + if dec.Data == nil { + return errors.New("missing required field 'data' for ErigonLog") + } + l.Data = *dec.Data + if dec.BlockNumber != nil { + l.BlockNumber = uint64(*dec.BlockNumber) + } + + if dec.TxHash == nil { + return errors.New("missing required field 'transactionHash' for ErigonLog") + } + l.TxHash = *dec.TxHash + if dec.TxIndex != nil { + l.TxIndex = uint(*dec.TxIndex) + } + if dec.BlockHash != nil { + l.BlockHash = *dec.BlockHash + } + if dec.Index != nil { + l.Index = uint(*dec.Index) + } + if dec.Removed != nil { + l.Removed = *dec.Removed + } + + if dec.Timestamp != nil { + l.Timestamp = uint64(*dec.Timestamp) + } + + return nil +} diff --git a/core/types/gen_log_json.go b/core/types/gen_log_json.go index 7c0c23e5525..236e7aecc85 100644 --- a/core/types/gen_log_json.go +++ b/core/types/gen_log_json.go @@ -70,6 +70,7 @@ func (l *Log) UnmarshalJSON(input []byte) error { if dec.BlockNumber != nil { l.BlockNumber = uint64(*dec.BlockNumber) } + if dec.TxHash == nil { return errors.New("missing required field 'transactionHash' for Log") } diff --git a/core/types/legacy_tx.go b/core/types/legacy_tx.go index 01ba4d63d0d..12641e5c97b 100644 --- a/core/types/legacy_tx.go +++ b/core/types/legacy_tx.go @@ -394,7 +394,7 @@ func (tx LegacyTx) EncodeRLP(w io.Writer) error { // DecodeRLP decodes LegacyTx but with the list token already consumed and encodingSize being presented func (tx *LegacyTx) DecodeRLP(s *rlp.Stream, encodingSize uint64) error { var err error - s.NewList(uint64(encodingSize)) + s.NewList(encodingSize) if tx.Nonce, err = s.Uint(); err != nil { return fmt.Errorf("read Nonce: %w", err) } diff --git a/core/types/log.go b/core/types/log.go index 29ea8dafc46..42bbea78f96 100644 --- a/core/types/log.go +++ b/core/types/log.go @@ -41,6 +41,7 @@ type Log struct { // but not secured by consensus. // block in which the transaction was included BlockNumber uint64 `json:"blockNumber" codec:"-"` + // hash of the transaction TxHash common.Hash `json:"transactionHash" gencodec:"required" codec:"-"` // index of the transaction in the block @@ -55,6 +56,21 @@ type Log struct { Removed bool `json:"removed" codec:"-"` } +type ErigonLog struct { + Address common.Address `json:"address" gencodec:"required" codec:"1"` + Topics []common.Hash `json:"topics" gencodec:"required" codec:"2"` + Data []byte `json:"data" gencodec:"required" codec:"3"` + BlockNumber uint64 `json:"blockNumber" codec:"-"` + TxHash common.Hash `json:"transactionHash" gencodec:"required" codec:"-"` + TxIndex uint `json:"transactionIndex" codec:"-"` + BlockHash common.Hash `json:"blockHash" codec:"-"` + Index uint `json:"logIndex" codec:"-"` + Removed bool `json:"removed" codec:"-"` + Timestamp uint64 `json:"timestamp" codec:"-"` +} + +type ErigonLogs []*ErigonLog + type Logs []*Log type logMarshaling struct { diff --git a/core/types/log_test.go b/core/types/log_test.go index 5b948f55413..f16e9beadb7 100644 --- a/core/types/log_test.go +++ b/core/types/log_test.go @@ -33,7 +33,7 @@ var unmarshalLogTests = map[string]struct { wantError error }{ "ok": { - input: `{"address":"0xecf8f87f810ecf450940c9f60066b4a7a501d6a7","blockHash":"0x656c34545f90a730a19008c0e7a7cd4fb3895064b48d6d69761bd5abad681056","blockNumber":"0x1ecfa4","data":"0x000000000000000000000000000000000000000000000001a055690d9db80000","logIndex":"0x2","topics":["0xddf252ad1be2c89b69c2b068fc378daa952ba7f163c4a11628f55a4df523b3ef","0x00000000000000000000000080b2c9d7cbbf30a1b0fc8983c647d754c6525615"],"transactionHash":"0x3b198bfd5d2907285af009e9ae84a0ecd63677110d89d7e030251acb87f6487e","transactionIndex":"0x3"}`, + input: `{"address":"0xecf8f87f810ecf450940c9f60066b4a7a501d6a7","blockHash":"0x656c34545f90a730a19008c0e7a7cd4fb3895064b48d6d69761bd5abad681056","blockNumber":"0x1ecfa4","timestamp":"0x57a53d3a","data":"0x000000000000000000000000000000000000000000000001a055690d9db80000","logIndex":"0x2","topics":["0xddf252ad1be2c89b69c2b068fc378daa952ba7f163c4a11628f55a4df523b3ef","0x00000000000000000000000080b2c9d7cbbf30a1b0fc8983c647d754c6525615"],"transactionHash":"0x3b198bfd5d2907285af009e9ae84a0ecd63677110d89d7e030251acb87f6487e","transactionIndex":"0x3"}`, want: &Log{ Address: common.HexToAddress("0xecf8f87f810ecf450940c9f60066b4a7a501d6a7"), BlockHash: common.HexToHash("0x656c34545f90a730a19008c0e7a7cd4fb3895064b48d6d69761bd5abad681056"), @@ -48,8 +48,9 @@ var unmarshalLogTests = map[string]struct { }, }, }, + "empty data": { - input: `{"address":"0xecf8f87f810ecf450940c9f60066b4a7a501d6a7","blockHash":"0x656c34545f90a730a19008c0e7a7cd4fb3895064b48d6d69761bd5abad681056","blockNumber":"0x1ecfa4","data":"0x","logIndex":"0x2","topics":["0xddf252ad1be2c89b69c2b068fc378daa952ba7f163c4a11628f55a4df523b3ef","0x00000000000000000000000080b2c9d7cbbf30a1b0fc8983c647d754c6525615"],"transactionHash":"0x3b198bfd5d2907285af009e9ae84a0ecd63677110d89d7e030251acb87f6487e","transactionIndex":"0x3"}`, + input: `{"address":"0xecf8f87f810ecf450940c9f60066b4a7a501d6a7","blockHash":"0x656c34545f90a730a19008c0e7a7cd4fb3895064b48d6d69761bd5abad681056","blockNumber":"0x1ecfa4","timestamp":"0x57a53d3a","data":"0x","logIndex":"0x2","topics":["0xddf252ad1be2c89b69c2b068fc378daa952ba7f163c4a11628f55a4df523b3ef","0x00000000000000000000000080b2c9d7cbbf30a1b0fc8983c647d754c6525615"],"transactionHash":"0x3b198bfd5d2907285af009e9ae84a0ecd63677110d89d7e030251acb87f6487e","transactionIndex":"0x3"}`, want: &Log{ Address: common.HexToAddress("0xecf8f87f810ecf450940c9f60066b4a7a501d6a7"), BlockHash: common.HexToHash("0x656c34545f90a730a19008c0e7a7cd4fb3895064b48d6d69761bd5abad681056"), @@ -70,25 +71,27 @@ var unmarshalLogTests = map[string]struct { Address: common.HexToAddress("0xecf8f87f810ecf450940c9f60066b4a7a501d6a7"), BlockHash: common.Hash{}, BlockNumber: 0, - Data: []byte{}, - Index: 0, - TxIndex: 3, - TxHash: common.HexToHash("0x3b198bfd5d2907285af009e9ae84a0ecd63677110d89d7e030251acb87f6487e"), + + Data: []byte{}, + Index: 0, + TxIndex: 3, + TxHash: common.HexToHash("0x3b198bfd5d2907285af009e9ae84a0ecd63677110d89d7e030251acb87f6487e"), Topics: []common.Hash{ common.HexToHash("0xddf252ad1be2c89b69c2b068fc378daa952ba7f163c4a11628f55a4df523b3ef"), }, }, }, "Removed: true": { - input: `{"address":"0xecf8f87f810ecf450940c9f60066b4a7a501d6a7","blockHash":"0x656c34545f90a730a19008c0e7a7cd4fb3895064b48d6d69761bd5abad681056","blockNumber":"0x1ecfa4","data":"0x","logIndex":"0x2","topics":["0xddf252ad1be2c89b69c2b068fc378daa952ba7f163c4a11628f55a4df523b3ef"],"transactionHash":"0x3b198bfd5d2907285af009e9ae84a0ecd63677110d89d7e030251acb87f6487e","transactionIndex":"0x3","removed":true}`, + input: `{"address":"0xecf8f87f810ecf450940c9f60066b4a7a501d6a7","blockHash":"0x656c34545f90a730a19008c0e7a7cd4fb3895064b48d6d69761bd5abad681056","blockNumber":"0x1ecfa4","timestamp":"0x57a53d3a","data":"0x","logIndex":"0x2","topics":["0xddf252ad1be2c89b69c2b068fc378daa952ba7f163c4a11628f55a4df523b3ef"],"transactionHash":"0x3b198bfd5d2907285af009e9ae84a0ecd63677110d89d7e030251acb87f6487e","transactionIndex":"0x3","removed":true}`, want: &Log{ Address: common.HexToAddress("0xecf8f87f810ecf450940c9f60066b4a7a501d6a7"), BlockHash: common.HexToHash("0x656c34545f90a730a19008c0e7a7cd4fb3895064b48d6d69761bd5abad681056"), BlockNumber: 2019236, - Data: []byte{}, - Index: 2, - TxIndex: 3, - TxHash: common.HexToHash("0x3b198bfd5d2907285af009e9ae84a0ecd63677110d89d7e030251acb87f6487e"), + + Data: []byte{}, + Index: 2, + TxIndex: 3, + TxHash: common.HexToHash("0x3b198bfd5d2907285af009e9ae84a0ecd63677110d89d7e030251acb87f6487e"), Topics: []common.Hash{ common.HexToHash("0xddf252ad1be2c89b69c2b068fc378daa952ba7f163c4a11628f55a4df523b3ef"), }, @@ -96,7 +99,7 @@ var unmarshalLogTests = map[string]struct { }, }, "missing data": { - input: `{"address":"0xecf8f87f810ecf450940c9f60066b4a7a501d6a7","blockHash":"0x656c34545f90a730a19008c0e7a7cd4fb3895064b48d6d69761bd5abad681056","blockNumber":"0x1ecfa4","logIndex":"0x2","topics":["0xddf252ad1be2c89b69c2b068fc378daa952ba7f163c4a11628f55a4df523b3ef","0x00000000000000000000000080b2c9d7cbbf30a1b0fc8983c647d754c6525615","0x000000000000000000000000f9dff387dcb5cc4cca5b91adb07a95f54e9f1bb6"],"transactionHash":"0x3b198bfd5d2907285af009e9ae84a0ecd63677110d89d7e030251acb87f6487e","transactionIndex":"0x3"}`, + input: `{"address":"0xecf8f87f810ecf450940c9f60066b4a7a501d6a7","blockHash":"0x656c34545f90a730a19008c0e7a7cd4fb3895064b48d6d69761bd5abad681056","blockNumber":"0x1ecfa4","timestamp":"0x57a53d3a","logIndex":"0x2","topics":["0xddf252ad1be2c89b69c2b068fc378daa952ba7f163c4a11628f55a4df523b3ef","0x00000000000000000000000080b2c9d7cbbf30a1b0fc8983c647d754c6525615","0x000000000000000000000000f9dff387dcb5cc4cca5b91adb07a95f54e9f1bb6"],"transactionHash":"0x3b198bfd5d2907285af009e9ae84a0ecd63677110d89d7e030251acb87f6487e","transactionIndex":"0x3"}`, wantError: fmt.Errorf("missing required field 'data' for Log"), }, } diff --git a/core/vm/absint_cfg.go b/core/vm/absint_cfg.go index 65473813f65..8b9c8830083 100644 --- a/core/vm/absint_cfg.go +++ b/core/vm/absint_cfg.go @@ -131,7 +131,7 @@ func AbsValueDestringify(s string) AbsValue { return AbsValue{} } -////////////////////////////////////////////////// +// //////////////////////////////////////////////// type astack struct { values []AbsValue hash uint64 @@ -295,7 +295,7 @@ func (state *astate) Add(stack *astack) { ////////////////////////////////////////////////// -//-1 block id is invalid jump +// -1 block id is invalid jump type CfgProofState struct { Pc int Stacks [][]string @@ -361,15 +361,15 @@ func (proof *CfgProof) ToString() string { return string(proof.Serialize()) } -//block.{Entry|Exit}.Pc in code, block.{Succs|Preds} in some block.{Entry}.Pc -//Entry <= Exit -//No overlap of blocks -//Must have block starting at 0 with a empty state -//Succs,Preds consistency -//No duplicate succs -//No duplicate preds -//succs are sorted -//preds are sorted +// block.{Entry|Exit}.Pc in code, block.{Succs|Preds} in some block.{Entry}.Pc +// Entry <= Exit +// No overlap of blocks +// Must have block starting at 0 with a empty state +// Succs,Preds consistency +// No duplicate succs +// No duplicate preds +// succs are sorted +// preds are sorted func (proof *CfgProof) isValid() bool { return true } diff --git a/core/vm/absint_cfg_proof_check.go b/core/vm/absint_cfg_proof_check.go index e10375dd4d8..ac16f618d25 100644 --- a/core/vm/absint_cfg_proof_check.go +++ b/core/vm/absint_cfg_proof_check.go @@ -2,9 +2,10 @@ package vm import ( "errors" - "github.com/holiman/uint256" "log" "reflect" + + "github.com/holiman/uint256" ) type CfgOpSem struct { diff --git a/core/vm/absint_cfg_proof_gen.go b/core/vm/absint_cfg_proof_gen.go index 8a2ce1c5f83..216bed304c3 100644 --- a/core/vm/absint_cfg_proof_gen.go +++ b/core/vm/absint_cfg_proof_gen.go @@ -15,7 +15,7 @@ import ( "github.com/holiman/uint256" ) -////////////////////////////////////////////////// +// //////////////////////////////////////////////// type AbsValueKind int ////////////////////////// diff --git a/core/vm/access_list_tracer.go b/core/vm/access_list_tracer.go index f63d6336ed0..3388a128650 100644 --- a/core/vm/access_list_tracer.go +++ b/core/vm/access_list_tracer.go @@ -17,10 +17,11 @@ package vm import ( - "github.com/ledgerwatch/erigon/common" - "github.com/ledgerwatch/erigon/core/types" "math/big" "time" + + "github.com/ledgerwatch/erigon/common" + "github.com/ledgerwatch/erigon/core/types" ) // accessList is an accumulator for the set of accounts and storage slots an EVM diff --git a/core/vm/analysis_test.go b/core/vm/analysis_test.go index fa2d7cc8c6b..83793b1bcef 100644 --- a/core/vm/analysis_test.go +++ b/core/vm/analysis_test.go @@ -89,7 +89,7 @@ func BenchmarkJumpDest(b *testing.B) { b.ResetTimer() for n := 0; n < b.N; n++ { - contract := NewContract(contractRef, contractRef, nil, 0, false /* skipAnalysis */, false) + contract := NewContract(contractRef, contractRef, nil, 0, false /* skipAnalysis */) contract.Code = code contract.CodeHash = hash diff --git a/core/vm/contract.go b/core/vm/contract.go index 64bdfc14650..fe9153631a7 100644 --- a/core/vm/contract.go +++ b/core/vm/contract.go @@ -51,7 +51,6 @@ type Contract struct { jumpdests map[common.Hash][]uint64 // Aggregated result of JUMPDEST analysis. analysis []uint64 // Locally cached result of JUMPDEST analysis skipAnalysis bool - vmType VmType Code []byte CodeHash common.Hash @@ -63,7 +62,7 @@ type Contract struct { } // NewContract returns a new contract environment for the execution of EVM. -func NewContract(caller ContractRef, object ContractRef, value *uint256.Int, gas uint64, skipAnalysis bool, isTEVM bool) *Contract { +func NewContract(caller ContractRef, object ContractRef, value *uint256.Int, gas uint64, skipAnalysis bool) *Contract { c := &Contract{CallerAddress: caller.Address(), caller: caller, self: object} if parent, ok := caller.(*Contract); ok { @@ -81,11 +80,6 @@ func NewContract(caller ContractRef, object ContractRef, value *uint256.Int, gas c.skipAnalysis = skipAnalysis - c.vmType = EVMType - if isTEVM { - c.vmType = TEVMType - } - return c } diff --git a/core/vm/contracts.go b/core/vm/contracts.go index 404ead71338..e0285cc7220 100644 --- a/core/vm/contracts.go +++ b/core/vm/contracts.go @@ -95,6 +95,21 @@ var PrecompiledContractsIstanbulForBSC = map[common.Address]PrecompiledContract{ common.BytesToAddress([]byte{101}): &iavlMerkleProofValidate{}, } +var PrecompiledContractsNano = map[common.Address]PrecompiledContract{ + common.BytesToAddress([]byte{1}): &ecrecover{}, + common.BytesToAddress([]byte{2}): &sha256hash{}, + common.BytesToAddress([]byte{3}): &ripemd160hash{}, + common.BytesToAddress([]byte{4}): &dataCopy{}, + common.BytesToAddress([]byte{5}): &bigModExp{}, + common.BytesToAddress([]byte{6}): &bn256AddIstanbul{}, + common.BytesToAddress([]byte{7}): &bn256ScalarMulIstanbul{}, + common.BytesToAddress([]byte{8}): &bn256PairingIstanbul{}, + common.BytesToAddress([]byte{9}): &blake2F{}, + + common.BytesToAddress([]byte{100}): &tmHeaderValidateNano{}, + common.BytesToAddress([]byte{101}): &iavlMerkleProofValidateNano{}, +} + // PrecompiledContractsBerlin contains the default set of pre-compiled Ethereum // contracts used in the Berlin release. var PrecompiledContractsBerlin = map[common.Address]PrecompiledContract{ @@ -124,6 +139,7 @@ var PrecompiledContractsBLS = map[common.Address]PrecompiledContract{ } var ( + PrecompiledAddressesNano []common.Address PrecompiledAddressesBerlin []common.Address PrecompiledAddressesIstanbul []common.Address PrecompiledAddressesIstanbulForBSC []common.Address @@ -147,11 +163,16 @@ func init() { for k := range PrecompiledContractsBerlin { PrecompiledAddressesBerlin = append(PrecompiledAddressesBerlin, k) } + for k := range PrecompiledContractsNano { + PrecompiledAddressesNano = append(PrecompiledAddressesNano, k) + } } // ActivePrecompiles returns the precompiles enabled with the current configuration. func ActivePrecompiles(rules *params.Rules) []common.Address { switch { + case rules.IsNano: + return PrecompiledAddressesNano case rules.IsBerlin: return PrecompiledAddressesBerlin case rules.IsIstanbul: @@ -289,9 +310,10 @@ var ( // modexpMultComplexity implements bigModexp multComplexity formula, as defined in EIP-198 // // def mult_complexity(x): -// if x <= 64: return x ** 2 -// elif x <= 1024: return x ** 2 // 4 + 96 * x - 3072 -// else: return x ** 2 // 16 + 480 * x - 199680 +// +// if x <= 64: return x ** 2 +// elif x <= 1024: return x ** 2 // 4 + 96 * x - 3072 +// else: return x ** 2 // 16 + 480 * x - 199680 // // where is x is max(length_of_MODULUS, length_of_BASE) func modexpMultComplexity(x *big.Int) *big.Int { @@ -614,7 +636,7 @@ func (c *blake2F) Run(input []byte) ([]byte, error) { // Parse the input into the Blake2b call parameters var ( rounds = binary.BigEndian.Uint32(input[0:4]) - final = (input[212] == blake2FFinalBlockBytes) + final = input[212] == blake2FFinalBlockBytes h [8]uint64 m [16]uint64 diff --git a/core/vm/contracts_lightclient.go b/core/vm/contracts_lightclient.go index 409f116112e..1a7d1d6fc11 100644 --- a/core/vm/contracts_lightclient.go +++ b/core/vm/contracts_lightclient.go @@ -48,7 +48,7 @@ func (c *tmHeaderValidate) RequiredGas(input []byte) uint64 { func (c *tmHeaderValidate) Run(input []byte) (result []byte, err error) { defer func() { if r := recover(); r != nil { - err = fmt.Errorf("internal error: %v\n", r) + err = fmt.Errorf("internal error: %v", r) } }() @@ -106,7 +106,7 @@ func (c *iavlMerkleProofValidate) RequiredGas(input []byte) uint64 { func (c *iavlMerkleProofValidate) Run(input []byte) (result []byte, err error) { defer func() { if r := recover(); r != nil { - err = fmt.Errorf("internal error: %v\n", r) + err = fmt.Errorf("internal error: %v", r) } }() @@ -133,3 +133,28 @@ func (c *iavlMerkleProofValidate) Run(input []byte) (result []byte, err error) { binary.BigEndian.PutUint64(result[merkleProofValidateResultLength-uint64TypeLength:], 0x01) return result, nil } + +// tmHeaderValidate implemented as a native contract. +type tmHeaderValidateNano struct{} + +func (c *tmHeaderValidateNano) RequiredGas(input []byte) uint64 { + return params.TendermintHeaderValidateGas +} + +func (c *tmHeaderValidateNano) Run(input []byte) (result []byte, err error) { + return nil, fmt.Errorf("suspend") +} + +// ------------------------------------------------------------------------------------------------------------------------------------------------ +type iavlMerkleProofValidateNano struct{} + +func (c *iavlMerkleProofValidateNano) RequiredGas(input []byte) uint64 { + return params.IAVLMerkleProofValidateGas +} + +// input: +// | payload length | payload | +// | 32 bytes | | +func (c *iavlMerkleProofValidateNano) Run(input []byte) (result []byte, err error) { + return nil, fmt.Errorf("suspend") +} diff --git a/core/vm/eips.go b/core/vm/eips.go index dfadd38aa0c..8f4d1838793 100644 --- a/core/vm/eips.go +++ b/core/vm/eips.go @@ -26,6 +26,7 @@ import ( ) var activators = map[int]func(*JumpTable){ + 3855: enable3855, 3529: enable3529, 3198: enable3198, 2929: enable2929, @@ -171,3 +172,20 @@ func opBaseFee(pc *uint64, interpreter *EVMInterpreter, callContext *ScopeContex callContext.Stack.Push(baseFee) return nil, nil } + +// enable3855 applies EIP-3855 (PUSH0 opcode) +func enable3855(jt *JumpTable) { + // New opcode + jt[PUSH0] = &operation{ + execute: opPush0, + constantGas: GasQuickStep, + minStack: minStack(0, 1), + maxStack: maxStack(0, 1), + } +} + +// opPush0 implements the PUSH0 opcode +func opPush0(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byte, error) { + scope.Stack.Push(new(uint256.Int)) + return nil, nil +} diff --git a/core/vm/evm.go b/core/vm/evm.go index ce9545b3929..ac97f52256b 100644 --- a/core/vm/evm.go +++ b/core/vm/evm.go @@ -17,7 +17,6 @@ package vm import ( - "errors" "math/big" "sync/atomic" "time" @@ -46,6 +45,8 @@ type ( func (evm *EVM) precompile(addr common.Address) (PrecompiledContract, bool) { var precompiles map[common.Address]PrecompiledContract switch { + case evm.chainRules.IsNano: + precompiles = PrecompiledContractsNano case evm.chainRules.IsBerlin: precompiles = PrecompiledContractsBerlin case evm.chainRules.IsIstanbul: @@ -65,34 +66,9 @@ func (evm *EVM) precompile(addr common.Address) (PrecompiledContract, bool) { // run runs the given contract and takes care of running precompiles with a fallback to the byte code interpreter. func run(evm *EVM, contract *Contract, input []byte, readOnly bool) ([]byte, error) { - callback, err := selectInterpreter(evm, contract) - if err != nil { - return nil, err - } - - defer callback() - return evm.interpreter.Run(contract, input, readOnly) } -func selectInterpreter(evm *EVM, contract *Contract) (func(), error) { - interpreter := evm.interpreter - callback := func() { - evm.interpreter = interpreter - } - - switch contract.vmType { - case EVMType: - evm.interpreter = evm.interpreters[EVMType] - case TEVMType: - evm.interpreter = evm.interpreters[TEVMType] - default: - return nil, errors.New("no compatible interpreter") - } - - return callback, nil -} - // BlockContext provides the EVM with auxiliary information. Once provided // it shouldn't be modified. type BlockContext struct { @@ -103,8 +79,6 @@ type BlockContext struct { Transfer TransferFunc // GetHash returns the hash corresponding to n GetHash GetHashFunc - // ContractHasTEVM returns true if the contract has TEVM code - ContractHasTEVM func(codeHash common.Hash) (bool, error) // Block information Coinbase common.Address // Provides information for COINBASE @@ -153,8 +127,7 @@ type EVM struct { config Config // global (to this context) ethereum virtual machine // used throughout the execution of the tx. - interpreters []Interpreter - interpreter Interpreter + interpreter Interpreter // abort is used to abort the EVM calling operations // NOTE: must be set atomically abort int32 @@ -176,12 +149,7 @@ func NewEVM(blockCtx BlockContext, txCtx TxContext, state IntraBlockState, chain chainRules: chainConfig.Rules(blockCtx.BlockNumber), } - evmInterp := NewEVMInterpreter(evm, vmConfig) - evm.interpreters = []Interpreter{ - EVMType: evmInterp, - TEVMType: NewTEVMInterpreterByVM(evmInterp.VM), - } - evm.interpreter = evm.interpreters[EVMType] + evm.interpreter = NewEVMInterpreter(evm, vmConfig) return evm } @@ -265,15 +233,10 @@ func (evm *EVM) Call(caller ContractRef, addr common.Address, input []byte, gas // The depth-check is already done, and precompiles handled above codehash := evm.intraBlockState.GetCodeHash(addrCopy) - var contractHasTEVM bool - contractHasTEVM, err = evm.context.ContractHasTEVM(codehash) - - if err == nil { - contract := NewContract(caller, AccountRef(addrCopy), value, gas, evm.config.SkipAnalysis, contractHasTEVM) - contract.SetCallCode(&addrCopy, codehash, code) - ret, err = run(evm, contract, input, false) - gas = contract.Gas - } + contract := NewContract(caller, AccountRef(addrCopy), value, gas, evm.config.SkipAnalysis) + contract.SetCallCode(&addrCopy, codehash, code) + ret, err = run(evm, contract, input, false) + gas = contract.Gas } } // When an error was returned by the EVM or when setting the creation code @@ -336,13 +299,11 @@ func (evm *EVM) CallCode(caller ContractRef, addr common.Address, input []byte, addrCopy := addr // Initialise a new contract and set the code that is to be used by the EVM. // The contract is a scoped environment for this execution context only. - var isTEVM bool codeHash := evm.intraBlockState.GetCodeHash(addrCopy) - isTEVM, err = evm.context.ContractHasTEVM(codeHash) if err == nil { - contract := NewContract(caller, AccountRef(caller.Address()), value, gas, evm.config.SkipAnalysis, isTEVM) + contract := NewContract(caller, AccountRef(caller.Address()), value, gas, evm.config.SkipAnalysis) contract.SetCallCode(&addrCopy, codeHash, code) ret, err = run(evm, contract, input, false) gas = contract.Gas @@ -390,12 +351,10 @@ func (evm *EVM) DelegateCall(caller ContractRef, addr common.Address, input []by } else { addrCopy := addr // Initialise a new contract and make initialise the delegate values - var isTEVM bool codeHash := evm.intraBlockState.GetCodeHash(addrCopy) - isTEVM, err = evm.context.ContractHasTEVM(codeHash) if err == nil { - contract := NewContract(caller, AccountRef(caller.Address()), nil, gas, evm.config.SkipAnalysis, isTEVM).AsDelegate() + contract := NewContract(caller, AccountRef(caller.Address()), nil, gas, evm.config.SkipAnalysis).AsDelegate() contract.SetCallCode(&addrCopy, codeHash, code) ret, err = run(evm, contract, input, false) gas = contract.Gas @@ -456,12 +415,10 @@ func (evm *EVM) StaticCall(caller ContractRef, addr common.Address, input []byte addrCopy := addr // Initialise a new contract and set the code that is to be used by the EVM. // The contract is a scoped environment for this execution context only. - var isTEVM bool codeHash := evm.intraBlockState.GetCodeHash(addrCopy) - isTEVM, err = evm.context.ContractHasTEVM(codeHash) if err == nil { - contract := NewContract(caller, AccountRef(addrCopy), new(uint256.Int), gas, evm.config.SkipAnalysis, isTEVM) + contract := NewContract(caller, AccountRef(addrCopy), new(uint256.Int), gas, evm.config.SkipAnalysis) contract.SetCallCode(&addrCopy, codeHash, code) // When an error was returned by the EVM or when setting the creation code // above we revert to the snapshot and consume any gas remaining. Additionally @@ -503,7 +460,7 @@ func (evm *EVM) create(caller ContractRef, codeAndHash *codeAndHash, gas uint64, if !evm.context.CanTransfer(evm.intraBlockState, caller.Address(), value) { return nil, common.Address{}, gas, ErrInsufficientBalance } - if evm.config.Debug || evm.config.EnableTEMV { + if evm.config.Debug { evm.config.Tracer.CaptureStart(evm, evm.depth, caller.Address(), address, false /* precompile */, true /* create */, calltype, codeAndHash.code, gas, value.ToBig(), nil) defer func(startGas uint64, startTime time.Time) { // Lazy evaluation of the parameters evm.config.Tracer.CaptureEnd(evm.depth, ret, startGas, gas, time.Since(startTime), err) @@ -535,7 +492,7 @@ func (evm *EVM) create(caller ContractRef, codeAndHash *codeAndHash, gas uint64, // Initialise a new contract and set the code that is to be used by the EVM. // The contract is a scoped environment for this execution context only. - contract := NewContract(caller, AccountRef(address), value, gas, evm.config.SkipAnalysis, false) + contract := NewContract(caller, AccountRef(address), value, gas, evm.config.SkipAnalysis) contract.SetCodeOptionalHash(&address, codeAndHash) if evm.config.NoRecursion && evm.depth > 0 { @@ -599,7 +556,7 @@ func (evm *EVM) Create(caller ContractRef, code []byte, gas uint64, value *uint2 // DESCRIBED: docs/programmers_guide/guide.md#nonce func (evm *EVM) Create2(caller ContractRef, code []byte, gas uint64, endowment *uint256.Int, salt *uint256.Int) (ret []byte, contractAddr common.Address, leftOverGas uint64, err error) { codeAndHash := &codeAndHash{code: code} - contractAddr = crypto.CreateAddress2(caller.Address(), common.Hash(salt.Bytes32()), codeAndHash.Hash().Bytes()) + contractAddr = crypto.CreateAddress2(caller.Address(), salt.Bytes32(), codeAndHash.Hash().Bytes()) return evm.create(caller, codeAndHash, gas, endowment, contractAddr, CREATE2T) } diff --git a/core/vm/evm_test.go b/core/vm/evm_test.go index 9e02dd1784d..39aac4b3c17 100644 --- a/core/vm/evm_test.go +++ b/core/vm/evm_test.go @@ -4,7 +4,6 @@ import ( "fmt" "testing" - "github.com/ledgerwatch/erigon/common" "github.com/ledgerwatch/erigon/params" "github.com/holiman/uint256" @@ -13,9 +12,7 @@ import ( func TestInterpreterReadonly(t *testing.T) { rapid.Check(t, func(t *rapid.T) { - env := NewEVM(BlockContext{ - ContractHasTEVM: func(common.Hash) (bool, error) { return false, nil }, - }, TxContext{}, &dummyStatedb{}, params.TestChainConfig, Config{EnableTEMV: true}) + env := NewEVM(BlockContext{}, TxContext{}, &dummyStatedb{}, params.TestChainConfig, Config{}) isEVMSliceTest := rapid.SliceOfN(rapid.Bool(), 1, -1).Draw(t, "tevm").([]bool) readOnlySliceTest := rapid.SliceOfN(rapid.Bool(), len(isEVMSliceTest), len(isEVMSliceTest)).Draw(t, "readonly").([]bool) @@ -26,8 +23,7 @@ func TestInterpreterReadonly(t *testing.T) { *currentIdx = -1 evmInterpreter := &testVM{ - readonlyGetSetter: env.interpreters[EVMType].(*EVMInterpreter), - isTEMV: false, + readonlyGetSetter: env.interpreter.(*EVMInterpreter), recordedReadOnlies: &readOnlies, recordedIsEVMCalled: &isEVMCalled, @@ -37,21 +33,8 @@ func TestInterpreterReadonly(t *testing.T) { readOnlySliceTest: readOnlySliceTest, currentIdx: currentIdx, } - tevmInterpreter := &testVM{ - readonlyGetSetter: env.interpreters[TEVMType].(*TEVMInterpreter), - isTEMV: true, - recordedReadOnlies: &readOnlies, - recordedIsEVMCalled: &isEVMCalled, - - env: env, - isEVMSliceTest: isEVMSliceTest, - readOnlySliceTest: readOnlySliceTest, - currentIdx: currentIdx, - } - - env.interpreters[EVMType] = evmInterpreter - env.interpreters[TEVMType] = tevmInterpreter + env.interpreter = evmInterpreter dummyContract := NewContract( &dummyContractRef{}, @@ -59,7 +42,6 @@ func TestInterpreterReadonly(t *testing.T) { new(uint256.Int), 0, false, - false, ) newTestSequential(env, currentIdx, readOnlySliceTest, isEVMSliceTest).Run(dummyContract, nil, false) @@ -82,10 +64,6 @@ func TestInterpreterReadonly(t *testing.T) { } for i, readOnly := range readOnlies { - if isEVMCalled[i] != isEVMSliceTest[i] { - t.Fatalf("wrong VM was called in %d index, got EVM %t, expected EVM %t", - i, isEVMCalled[i], isEVMSliceTest[i]) - } if readOnly.outer != readOnlySliceTest[i] { t.Fatalf("outer readOnly appeared in %d index, got readOnly %t, expected %t", @@ -289,9 +267,7 @@ func TestReadonlyBasicCases(t *testing.T) { t.Run(testcase.testName+evmsTestcase.suffix, func(t *testing.T) { readonlySliceTest := testcase.readonlySliceTest - env := NewEVM(BlockContext{ - ContractHasTEVM: func(common.Hash) (bool, error) { return false, nil }, - }, TxContext{}, &dummyStatedb{}, params.TestChainConfig, Config{EnableTEMV: true}) + env := NewEVM(BlockContext{}, TxContext{}, &dummyStatedb{}, params.TestChainConfig, Config{}) readonliesGot := make([]*readOnlyState, len(testcase.readonlySliceTest)) isEVMGot := make([]bool, len(evmsTestcase.emvs)) @@ -300,8 +276,7 @@ func TestReadonlyBasicCases(t *testing.T) { *currentIdx = -1 evmInterpreter := &testVM{ - readonlyGetSetter: env.interpreters[EVMType].(*EVMInterpreter), - isTEMV: false, + readonlyGetSetter: env.interpreter.(*EVMInterpreter), recordedReadOnlies: &readonliesGot, recordedIsEVMCalled: &isEVMGot, @@ -311,21 +286,8 @@ func TestReadonlyBasicCases(t *testing.T) { readOnlySliceTest: testcase.readonlySliceTest, currentIdx: currentIdx, } - tevmInterpreter := &testVM{ - readonlyGetSetter: env.interpreters[TEVMType].(*TEVMInterpreter), - isTEMV: true, - recordedReadOnlies: &readonliesGot, - recordedIsEVMCalled: &isEVMGot, - - env: env, - isEVMSliceTest: evmsTestcase.emvs, - readOnlySliceTest: testcase.readonlySliceTest, - currentIdx: currentIdx, - } - - env.interpreters[EVMType] = evmInterpreter - env.interpreters[TEVMType] = tevmInterpreter + env.interpreter = evmInterpreter dummyContract := NewContract( &dummyContractRef{}, @@ -333,7 +295,6 @@ func TestReadonlyBasicCases(t *testing.T) { new(uint256.Int), 0, false, - false, ) newTestSequential(env, currentIdx, readonlySliceTest, evmsTestcase.emvs).Run(dummyContract, nil, false) @@ -354,11 +315,6 @@ func TestReadonlyBasicCases(t *testing.T) { var firstReadOnly int for callIndex, readOnly := range readonliesGot { - if isEVMGot[callIndex] != evmsTestcase.emvs[callIndex] { - t.Fatalf("wrong VM was called in %d index, got EVM %t, expected EVM %t. Test EVMs %v; test readonly %v", - callIndex, isEVMGot[callIndex], evmsTestcase.emvs[callIndex], evmsTestcase.emvs, readonlySliceTest) - } - if readOnly.outer != readonlySliceTest[callIndex] { t.Fatalf("outer readOnly appeared in %d index, got readOnly %t, expected %t. Test EVMs %v; test readonly %v", callIndex, readOnly.outer, readonlySliceTest[callIndex], evmsTestcase.emvs, readonlySliceTest) @@ -431,7 +387,6 @@ func (st *testSequential) Run(_ *Contract, _ []byte, _ bool) ([]byte, error) { new(uint256.Int), 0, false, - !st.isEVMCalled[*st.currentIdx], ) return run(st.env, nextContract, nil, st.readOnlys[*st.currentIdx]) diff --git a/core/vm/gas_table.go b/core/vm/gas_table.go index fcbe14b47c3..1208c39a237 100644 --- a/core/vm/gas_table.go +++ b/core/vm/gas_table.go @@ -166,19 +166,19 @@ func gasSStore(evm *EVM, contract *Contract, stack *stack.Stack, mem *Memory, me return params.NetSstoreDirtyGas, nil } -// 0. If *gasleft* is less than or equal to 2300, fail the current call. -// 1. If current value equals new value (this is a no-op), SLOAD_GAS is deducted. -// 2. If current value does not equal new value: -// 2.1. If original value equals current value (this storage slot has not been changed by the current execution context): +// 0. If *gasleft* is less than or equal to 2300, fail the current call. +// 1. If current value equals new value (this is a no-op), SLOAD_GAS is deducted. +// 2. If current value does not equal new value: +// 2.1. If original value equals current value (this storage slot has not been changed by the current execution context): // 2.1.1. If original value is 0, SSTORE_SET_GAS (20K) gas is deducted. // 2.1.2. Otherwise, SSTORE_RESET_GAS gas is deducted. If new value is 0, add SSTORE_CLEARS_SCHEDULE to refund counter. -// 2.2. If original value does not equal current value (this storage slot is dirty), SLOAD_GAS gas is deducted. Apply both of the following clauses: +// 2.2. If original value does not equal current value (this storage slot is dirty), SLOAD_GAS gas is deducted. Apply both of the following clauses: // 2.2.1. If original value is not 0: -// 2.2.1.1. If current value is 0 (also means that new value is not 0), subtract SSTORE_CLEARS_SCHEDULE gas from refund counter. -// 2.2.1.2. If new value is 0 (also means that current value is not 0), add SSTORE_CLEARS_SCHEDULE gas to refund counter. +// 2.2.1.1. If current value is 0 (also means that new value is not 0), subtract SSTORE_CLEARS_SCHEDULE gas from refund counter. +// 2.2.1.2. If new value is 0 (also means that current value is not 0), add SSTORE_CLEARS_SCHEDULE gas to refund counter. // 2.2.2. If original value equals new value (this storage slot is reset): -// 2.2.2.1. If original value is 0, add SSTORE_SET_GAS - SLOAD_GAS to refund counter. -// 2.2.2.2. Otherwise, add SSTORE_RESET_GAS - SLOAD_GAS gas to refund counter. +// 2.2.2.1. If original value is 0, add SSTORE_SET_GAS - SLOAD_GAS to refund counter. +// 2.2.2.2. Otherwise, add SSTORE_RESET_GAS - SLOAD_GAS gas to refund counter. func gasSStoreEIP2200(evm *EVM, contract *Contract, stack *stack.Stack, mem *Memory, memorySize uint64) (uint64, error) { // If we fail the minimum gas availability invariant, fail (0) if contract.Gas <= params.SstoreSentryGasEIP2200 { diff --git a/core/vm/gas_table_test.go b/core/vm/gas_table_test.go index 8d6eb2bc955..266746334a5 100644 --- a/core/vm/gas_table_test.go +++ b/core/vm/gas_table_test.go @@ -96,9 +96,8 @@ func TestEIP2200(t *testing.T) { _ = s.CommitBlock(params.AllEthashProtocolChanges.Rules(0), state.NewPlainStateWriter(tx, tx, 0)) vmctx := BlockContext{ - CanTransfer: func(IntraBlockState, common.Address, *uint256.Int) bool { return true }, - Transfer: func(IntraBlockState, common.Address, common.Address, *uint256.Int, bool) {}, - ContractHasTEVM: func(common.Hash) (bool, error) { return false, nil }, + CanTransfer: func(IntraBlockState, common.Address, *uint256.Int) bool { return true }, + Transfer: func(IntraBlockState, common.Address, common.Address, *uint256.Int, bool) {}, } vmenv := NewEVM(vmctx, TxContext{}, s, params.AllEthashProtocolChanges, Config{ExtraEips: []int{2200}}) diff --git a/core/vm/instructions.go b/core/vm/instructions.go index 33585151542..c0d352c5b2c 100644 --- a/core/vm/instructions.go +++ b/core/vm/instructions.go @@ -371,7 +371,7 @@ func opReturnDataCopy(pc *uint64, interpreter *EVMInterpreter, scope *ScopeConte func opExtCodeSize(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byte, error) { slot := scope.Stack.Peek() - slot.SetUint64(uint64(interpreter.evm.IntraBlockState().GetCodeSize(common.Address(slot.Bytes20())))) + slot.SetUint64(uint64(interpreter.evm.IntraBlockState().GetCodeSize(slot.Bytes20()))) return nil, nil } @@ -415,16 +415,21 @@ func opExtCodeCopy(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) // opExtCodeHash returns the code hash of a specified account. // There are several cases when the function is called, while we can relay everything // to `state.GetCodeHash` function to ensure the correctness. -// (1) Caller tries to get the code hash of a normal contract account, state +// +// (1) Caller tries to get the code hash of a normal contract account, state +// // should return the relative code hash and set it as the result. // -// (2) Caller tries to get the code hash of a non-existent account, state should +// (2) Caller tries to get the code hash of a non-existent account, state should +// // return common.Hash{} and zero will be set as the result. // -// (3) Caller tries to get the code hash for an account without contract code, +// (3) Caller tries to get the code hash for an account without contract code, +// // state should return emptyCodeHash(0xc5d246...) as the result. // -// (4) Caller tries to get the code hash of a precompiled account, the result +// (4) Caller tries to get the code hash of a precompiled account, the result +// // should be zero or emptyCodeHash. // // It is worth noting that in order to avoid unnecessary create and clean, @@ -433,10 +438,12 @@ func opExtCodeCopy(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) // If the precompile account is not transferred any amount on a private or // customized chain, the return value will be zero. // -// (5) Caller tries to get the code hash for an account which is marked as suicided +// (5) Caller tries to get the code hash for an account which is marked as suicided +// // in the current transaction, the code hash of this account should be returned. // -// (6) Caller tries to get the code hash for an account which is marked as deleted, +// (6) Caller tries to get the code hash for an account which is marked as deleted, +// // this account should be regarded as a non-existent account and zero should be returned. func opExtCodeHash(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byte, error) { slot := scope.Stack.Peek() @@ -862,7 +869,7 @@ func makeLog(size int) executionFunc { mStart, mSize := stack.Pop(), stack.Pop() for i := 0; i < size; i++ { addr := stack.Pop() - topics[i] = common.Hash(addr.Bytes32()) + topics[i] = addr.Bytes32() } d := scope.Memory.GetCopy(mStart.Uint64(), mSize.Uint64()) diff --git a/core/vm/instructions_test.go b/core/vm/instructions_test.go index f8d42aa41af..d794cee40d9 100644 --- a/core/vm/instructions_test.go +++ b/core/vm/instructions_test.go @@ -95,9 +95,7 @@ func init() { func testTwoOperandOp(t *testing.T, tests []TwoOperandTestcase, opFn executionFunc, name string) { var ( - env = NewEVM(BlockContext{ - ContractHasTEVM: func(common.Hash) (bool, error) { return false, nil }, - }, TxContext{}, nil, params.TestChainConfig, Config{}) + env = NewEVM(BlockContext{}, TxContext{}, nil, params.TestChainConfig, Config{}) stack = stack.New() pc = uint64(0) evmInterpreter = env.interpreter.(*EVMInterpreter) @@ -196,9 +194,7 @@ func TestSAR(t *testing.T) { func TestAddMod(t *testing.T) { var ( - env = NewEVM(BlockContext{ - ContractHasTEVM: func(common.Hash) (bool, error) { return false, nil }, - }, TxContext{}, nil, params.TestChainConfig, Config{}) + env = NewEVM(BlockContext{}, TxContext{}, nil, params.TestChainConfig, Config{}) stack = stack.New() evmInterpreter = NewEVMInterpreter(env, env.Config()) pc = uint64(0) @@ -285,9 +281,7 @@ func TestJsonTestcases(t *testing.T) { func opBenchmark(bench *testing.B, op executionFunc, args ...string) { var ( - env = NewEVM(BlockContext{ - ContractHasTEVM: func(common.Hash) (bool, error) { return false, nil }, - }, TxContext{}, nil, params.TestChainConfig, Config{}) + env = NewEVM(BlockContext{}, TxContext{}, nil, params.TestChainConfig, Config{}) stack = stack.New() evmInterpreter = NewEVMInterpreter(env, env.Config()) ) @@ -521,9 +515,7 @@ func BenchmarkOpIsZero(b *testing.B) { func TestOpMstore(t *testing.T) { var ( - env = NewEVM(BlockContext{ - ContractHasTEVM: func(common.Hash) (bool, error) { return false, nil }, - }, TxContext{}, nil, params.TestChainConfig, Config{}) + env = NewEVM(BlockContext{}, TxContext{}, nil, params.TestChainConfig, Config{}) stack = stack.New() mem = NewMemory() evmInterpreter = NewEVMInterpreter(env, env.Config()) @@ -547,9 +539,7 @@ func TestOpMstore(t *testing.T) { func BenchmarkOpMstore(bench *testing.B) { var ( - env = NewEVM(BlockContext{ - ContractHasTEVM: func(common.Hash) (bool, error) { return false, nil }, - }, TxContext{}, nil, params.TestChainConfig, Config{}) + env = NewEVM(BlockContext{}, TxContext{}, nil, params.TestChainConfig, Config{}) stack = stack.New() mem = NewMemory() evmInterpreter = NewEVMInterpreter(env, env.Config()) @@ -570,9 +560,7 @@ func BenchmarkOpMstore(bench *testing.B) { func BenchmarkOpSHA3(bench *testing.B) { var ( - env = NewEVM(BlockContext{ - ContractHasTEVM: func(common.Hash) (bool, error) { return false, nil }, - }, TxContext{}, nil, params.TestChainConfig, Config{}) + env = NewEVM(BlockContext{}, TxContext{}, nil, params.TestChainConfig, Config{}) stack = stack.New() mem = NewMemory() evmInterpreter = NewEVMInterpreter(env, env.Config()) diff --git a/core/vm/interpreter.go b/core/vm/interpreter.go index 01914913238..9db3dad3655 100644 --- a/core/vm/interpreter.go +++ b/core/vm/interpreter.go @@ -36,7 +36,6 @@ type Config struct { TraceJumpDest bool // Print transaction hashes where jumpdest analysis was useful NoReceipts bool // Do not calculate receipts ReadOnly bool // Do no perform any block finalisation - EnableTEMV bool // true if execution with TEVM enable flag ExtraEips []int // Additional EIPS that are to be enabled } @@ -73,7 +72,8 @@ type EVMInterpreter struct { jt *JumpTable // EVM instruction table } -//structcheck doesn't see embedding +// structcheck doesn't see embedding +// //nolint:structcheck type VM struct { evm *EVM @@ -90,6 +90,10 @@ type VM struct { func NewEVMInterpreter(evm *EVM, cfg Config) *EVMInterpreter { var jt *JumpTable switch { + case evm.ChainRules().IsCancun: + jt = &cancunInstructionSet + case evm.ChainRules().IsShanghai: + jt = &shanghaiInstructionSet case evm.ChainRules().IsLondon: jt = &londonInstructionSet case evm.ChainRules().IsBerlin: @@ -131,6 +135,10 @@ func NewEVMInterpreter(evm *EVM, cfg Config) *EVMInterpreter { func NewEVMInterpreterByVM(vm *VM) *EVMInterpreter { var jt *JumpTable switch { + case vm.evm.ChainRules().IsCancun: + jt = &cancunInstructionSet + case vm.evm.ChainRules().IsShanghai: + jt = &shanghaiInstructionSet case vm.evm.ChainRules().IsLondon: jt = &londonInstructionSet case vm.evm.ChainRules().IsBerlin: diff --git a/core/vm/jump_table.go b/core/vm/jump_table.go index 4f4d514e3ac..aba43fe3da7 100644 --- a/core/vm/jump_table.go +++ b/core/vm/jump_table.go @@ -65,22 +65,40 @@ var ( istanbulInstructionSet = newIstanbulInstructionSet() berlinInstructionSet = newBerlinInstructionSet() londonInstructionSet = newLondonInstructionSet() + shanghaiInstructionSet = newShanghaiInstructionSet() + cancunInstructionSet = newCancunInstructionSet() ) // JumpTable contains the EVM opcodes supported at a given fork. type JumpTable [256]*operation +// newCancunInstructionSet returns the frontier, homestead, byzantium, +// constantinople, istanbul, petersburg, berlin, london, paris, shanghai, +// and cancun instructions. +func newCancunInstructionSet() JumpTable { + instructionSet := newShanghaiInstructionSet() + return instructionSet +} + +// newShanghaiInstructionSet returns the frontier, homestead, byzantium, +// constantinople, istanbul, petersburg, berlin, london, paris, and shanghai instructions. +func newShanghaiInstructionSet() JumpTable { + instructionSet := newLondonInstructionSet() + enable3855(&instructionSet) // PUSH0 instruction https://eips.ethereum.org/EIPS/eip-3855 + return instructionSet +} + // newLondonInstructionSet returns the frontier, homestead, byzantium, -// contantinople, istanbul, petersburg, berlin, and london instructions. +// constantinople, istanbul, petersburg, berlin, and london instructions. func newLondonInstructionSet() JumpTable { instructionSet := newBerlinInstructionSet() - enable3529(&instructionSet) // EIP-3529: Reduction in refunds https://eips.ethereum.org/EIPS/eip-3529 + enable3529(&instructionSet) // Reduction in refunds https://eips.ethereum.org/EIPS/eip-3529 enable3198(&instructionSet) // Base fee opcode https://eips.ethereum.org/EIPS/eip-3198 return instructionSet } // newBerlinInstructionSet returns the frontier, homestead, byzantium, -// contantinople, istanbul, petersburg and berlin instructions. +// constantinople, istanbul, petersburg and berlin instructions. func newBerlinInstructionSet() JumpTable { instructionSet := newIstanbulInstructionSet() enable2929(&instructionSet) // Access lists for trie accesses https://eips.ethereum.org/EIPS/eip-2929 @@ -88,7 +106,7 @@ func newBerlinInstructionSet() JumpTable { } // newIstanbulInstructionSet returns the frontier, homestead, byzantium, -// contantinople, istanbul and petersburg instructions. +// constantinople, istanbul and petersburg instructions. func newIstanbulInstructionSet() JumpTable { instructionSet := newConstantinopleInstructionSet() @@ -100,7 +118,7 @@ func newIstanbulInstructionSet() JumpTable { } // newConstantinopleInstructionSet returns the frontier, homestead, -// byzantium and contantinople instructions. +// byzantium and constantinople instructions. func newConstantinopleInstructionSet() JumpTable { instructionSet := newByzantiumInstructionSet() instructionSet[SHL] = &operation{ diff --git a/core/vm/lightclient/iavl/doc.go b/core/vm/lightclient/iavl/doc.go index 7751bccadd2..d3ccfb80176 100644 --- a/core/vm/lightclient/iavl/doc.go +++ b/core/vm/lightclient/iavl/doc.go @@ -1,49 +1,47 @@ // Package iavl implements a versioned, snapshottable (immutable) AVL+ tree // for persisting key-value pairs. // -// // Basic usage of MutableTree. // -// import "github.com/tendermint/iavl" -// import "github.com/tendermint/tendermint/libs/db" -// ... +// import "github.com/tendermint/iavl" +// import "github.com/tendermint/tendermint/libs/db" +// ... // -// tree := iavl.NewMutableTree(db.NewMemDB(), 128) +// tree := iavl.NewMutableTree(db.NewMemDB(), 128) // -// tree.IsEmpty() // true +// tree.IsEmpty() // true // -// tree.Set([]byte("alice"), []byte("abc")) -// tree.SaveVersion(1) +// tree.Set([]byte("alice"), []byte("abc")) +// tree.SaveVersion(1) // -// tree.Set([]byte("alice"), []byte("xyz")) -// tree.Set([]byte("bob"), []byte("xyz")) -// tree.SaveVersion(2) +// tree.Set([]byte("alice"), []byte("xyz")) +// tree.Set([]byte("bob"), []byte("xyz")) +// tree.SaveVersion(2) // -// tree.LatestVersion() // 2 +// tree.LatestVersion() // 2 // -// tree.GetVersioned([]byte("alice"), 1) // "abc" -// tree.GetVersioned([]byte("alice"), 2) // "xyz" +// tree.GetVersioned([]byte("alice"), 1) // "abc" +// tree.GetVersioned([]byte("alice"), 2) // "xyz" // // Proof of existence: // -// root := tree.Hash() -// val, proof, err := tree.GetVersionedWithProof([]byte("bob"), 2) // "xyz", RangeProof, nil -// proof.Verify([]byte("bob"), val, root) // nil +// root := tree.Hash() +// val, proof, err := tree.GetVersionedWithProof([]byte("bob"), 2) // "xyz", RangeProof, nil +// proof.Verify([]byte("bob"), val, root) // nil // // Proof of absence: // -// _, proof, err = tree.GetVersionedWithProof([]byte("tom"), 2) // nil, RangeProof, nil -// proof.Verify([]byte("tom"), nil, root) // nil +// _, proof, err = tree.GetVersionedWithProof([]byte("tom"), 2) // nil, RangeProof, nil +// proof.Verify([]byte("tom"), nil, root) // nil // // Now we delete an old version: // -// tree.DeleteVersion(1) -// tree.VersionExists(1) // false -// tree.Get([]byte("alice")) // "xyz" -// tree.GetVersioned([]byte("alice"), 1) // nil +// tree.DeleteVersion(1) +// tree.VersionExists(1) // false +// tree.Get([]byte("alice")) // "xyz" +// tree.GetVersioned([]byte("alice"), 1) // nil // // Can't create a proof of absence for a version we no longer have: // -// _, proof, err = tree.GetVersionedWithProof([]byte("tom"), 1) // nil, nil, error -// +// _, proof, err = tree.GetVersionedWithProof([]byte("tom"), 1) // nil, nil, error package iavl diff --git a/core/vm/lightclient/iavl/key_format.go b/core/vm/lightclient/iavl/key_format.go index b63f9ea7b38..e54d9fd8c83 100644 --- a/core/vm/lightclient/iavl/key_format.go +++ b/core/vm/lightclient/iavl/key_format.go @@ -18,20 +18,20 @@ type KeyFormat struct { // For example, to store keys that could index some objects by a version number and their SHA256 hash using the form: // 'c' then you would define the KeyFormat with: // -// var keyFormat = NewKeyFormat('c', 8, 32) +// var keyFormat = NewKeyFormat('c', 8, 32) // // Then you can create a key with: // -// func ObjectKey(version uint64, objectBytes []byte) []byte { -// hasher := sha256.New() -// hasher.Sum(nil) -// return keyFormat.Key(version, hasher.Sum(nil)) -// } +// func ObjectKey(version uint64, objectBytes []byte) []byte { +// hasher := sha256.New() +// hasher.Sum(nil) +// return keyFormat.Key(version, hasher.Sum(nil)) +// } func NewKeyFormat(prefix byte, layout ...int) *KeyFormat { // For prefix byte length := 1 for _, l := range layout { - length += int(l) + length += l } return &KeyFormat{ prefix: prefix, diff --git a/core/vm/lightclient/iavl/proof_iavl_absence.go b/core/vm/lightclient/iavl/proof_iavl_absence.go index 88a6587aee3..b35f0beeed7 100644 --- a/core/vm/lightclient/iavl/proof_iavl_absence.go +++ b/core/vm/lightclient/iavl/proof_iavl_absence.go @@ -75,7 +75,7 @@ func (op IAVLAbsenceOp) Run(args [][]byte) ([][]byte, error) { // XXX What is the encoding for keys? // We should decode the key depending on whether it's a string or hex, // maybe based on quotes and 0x prefix? - err = op.Proof.VerifyAbsence([]byte(op.key)) + err = op.Proof.VerifyAbsence(op.key) if err != nil { return nil, cmn.ErrorWrap(err, "verifying absence") } diff --git a/core/vm/lightclient/iavl/proof_iavl_value.go b/core/vm/lightclient/iavl/proof_iavl_value.go index df3c905ef8d..ca2261da1ec 100644 --- a/core/vm/lightclient/iavl/proof_iavl_value.go +++ b/core/vm/lightclient/iavl/proof_iavl_value.go @@ -74,7 +74,7 @@ func (op IAVLValueOp) Run(args [][]byte) ([][]byte, error) { // XXX What is the encoding for keys? // We should decode the key depending on whether it's a string or hex, // maybe based on quotes and 0x prefix? - err = op.Proof.VerifyItem([]byte(op.key), value) + err = op.Proof.VerifyItem(op.key, value) if err != nil { return nil, cmn.ErrorWrap(err, "verifying value") } diff --git a/core/vm/lightclient/iavl/proof_path.go b/core/vm/lightclient/iavl/proof_path.go index 5b2609654bb..6388d393667 100644 --- a/core/vm/lightclient/iavl/proof_path.go +++ b/core/vm/lightclient/iavl/proof_path.go @@ -125,9 +125,10 @@ func (pl PathToLeaf) dropRoot() PathToLeaf { if pl.isEmpty() { return pl } - return PathToLeaf(pl[:len(pl)-1]) + return pl[:len(pl)-1] } +// TODO: (leonard) unused linter complains these are unused methods func (pl PathToLeaf) hasCommonRoot(pl2 PathToLeaf) bool { if pl.isEmpty() || pl2.isEmpty() { return false diff --git a/core/vm/lightclient/iavl/proof_range.go b/core/vm/lightclient/iavl/proof_range.go index caca3fa293a..2b1f7a881da 100644 --- a/core/vm/lightclient/iavl/proof_range.go +++ b/core/vm/lightclient/iavl/proof_range.go @@ -147,13 +147,6 @@ func (proof *RangeProof) VerifyAbsence(key []byte) error { return nil // proof ok } else if cmp == 0 { return cmn.NewError("absence disproved via item #%v", i) - } else { - if i == len(proof.Leaves)-1 { - // If last item, check whether - // it's the last item in teh tree. - - } - continue } } diff --git a/core/vm/logger.go b/core/vm/logger.go index b9d511b8858..0f97ad96d44 100644 --- a/core/vm/logger.go +++ b/core/vm/logger.go @@ -222,7 +222,7 @@ func (l *StructLogger) CaptureState(env *EVM, pc uint64, op OpCode, gas, cost ui value uint256.Int ) env.IntraBlockState().GetState(contract.Address(), &address, &value) - l.storage[contract.Address()][address] = common.Hash(value.Bytes32()) + l.storage[contract.Address()][address] = value.Bytes32() } // capture SSTORE opcodes and record the written entry in the local storage. if op == SSTORE && stack.Len() >= 2 { diff --git a/core/vm/logger_test.go b/core/vm/logger_test.go index c5a3d554034..5364cf0eb58 100644 --- a/core/vm/logger_test.go +++ b/core/vm/logger_test.go @@ -29,13 +29,11 @@ import ( func TestStoreCapture(t *testing.T) { var ( - env = NewEVM(BlockContext{ - ContractHasTEVM: func(common.Hash) (bool, error) { return false, nil }, - }, TxContext{}, &dummyStatedb{}, params.TestChainConfig, Config{}) + env = NewEVM(BlockContext{}, TxContext{}, &dummyStatedb{}, params.TestChainConfig, Config{}) logger = NewStructLogger(nil) mem = NewMemory() stack = stack.New() - contract = NewContract(&dummyContractRef{}, &dummyContractRef{}, new(uint256.Int), 0, false /* skipAnalysis */, false) + contract = NewContract(&dummyContractRef{}, &dummyContractRef{}, new(uint256.Int), 0, false /* skipAnalysis */) ) stack.Push(uint256.NewInt(1)) stack.Push(uint256.NewInt(0)) diff --git a/core/vm/mock_vm.go b/core/vm/mock_vm.go index 6228a307ec4..cda6c6d8442 100644 --- a/core/vm/mock_vm.go +++ b/core/vm/mock_vm.go @@ -17,7 +17,6 @@ type readonlyGetSetter interface { type testVM struct { readonlyGetSetter - isTEMV bool recordedReadOnlies *[]*readOnlyState recordedIsEVMCalled *[]bool @@ -45,7 +44,7 @@ func (evm *testVM) Run(_ *Contract, _ []byte, readOnly bool) (ret []byte, err er currentReadOnly.in = evm.getReadonly() (*evm.recordedReadOnlies)[currentIndex] = currentReadOnly - (*evm.recordedIsEVMCalled)[currentIndex] = !evm.isTEMV + (*evm.recordedIsEVMCalled)[currentIndex] = true *evm.currentIdx++ @@ -56,7 +55,6 @@ func (evm *testVM) Run(_ *Contract, _ []byte, readOnly bool) (ret []byte, err er new(uint256.Int), 0, false, - !evm.isEVMSliceTest[*evm.currentIdx], ), nil, evm.readOnlySliceTest[*evm.currentIdx]) return res, err } diff --git a/core/vm/opcodes.go b/core/vm/opcodes.go index 286307ae91a..8bdfda0a3f3 100644 --- a/core/vm/opcodes.go +++ b/core/vm/opcodes.go @@ -120,6 +120,7 @@ const ( MSIZE OpCode = 0x59 GAS OpCode = 0x5a JUMPDEST OpCode = 0x5b + PUSH0 OpCode = 0x5f ) // 0x60 range. @@ -298,6 +299,7 @@ var opCodeToString = map[OpCode]string{ MSIZE: "MSIZE", GAS: "GAS", JUMPDEST: "JUMPDEST", + PUSH0: "PUSH0", // 0x60 range - push. PUSH1: "PUSH1", @@ -464,6 +466,7 @@ var stringToOp = map[string]OpCode{ "MSIZE": MSIZE, "GAS": GAS, "JUMPDEST": JUMPDEST, + "PUSH0": PUSH0, "PUSH1": PUSH1, "PUSH2": PUSH2, "PUSH3": PUSH3, diff --git a/core/vm/runtime/env.go b/core/vm/runtime/env.go index bf514f74e03..a13e8f08007 100644 --- a/core/vm/runtime/env.go +++ b/core/vm/runtime/env.go @@ -19,7 +19,6 @@ package runtime import ( "github.com/ledgerwatch/erigon/core" "github.com/ledgerwatch/erigon/core/vm" - "github.com/ledgerwatch/erigon/ethdb" ) func NewEnv(cfg *Config) *vm.EVM { @@ -29,16 +28,15 @@ func NewEnv(cfg *Config) *vm.EVM { } blockContext := vm.BlockContext{ - CanTransfer: core.CanTransfer, - Transfer: core.Transfer, - GetHash: cfg.GetHashFn, - ContractHasTEVM: ethdb.GetHasTEVM(cfg.kv), - Coinbase: cfg.Coinbase, - BlockNumber: cfg.BlockNumber.Uint64(), - Time: cfg.Time.Uint64(), - Difficulty: cfg.Difficulty, - GasLimit: cfg.GasLimit, - BaseFee: cfg.BaseFee, + CanTransfer: core.CanTransfer, + Transfer: core.Transfer, + GetHash: cfg.GetHashFn, + Coinbase: cfg.Coinbase, + BlockNumber: cfg.BlockNumber.Uint64(), + Time: cfg.Time.Uint64(), + Difficulty: cfg.Difficulty, + GasLimit: cfg.GasLimit, + BaseFee: cfg.BaseFee, } return vm.NewEVM(blockContext, txContext, cfg.State, cfg.ChainConfig, cfg.EVMConfig) diff --git a/core/vm/runtime/runtime.go b/core/vm/runtime/runtime.go index 3d6a8f217ff..9310c4b71b6 100644 --- a/core/vm/runtime/runtime.go +++ b/core/vm/runtime/runtime.go @@ -76,6 +76,8 @@ func setDefaults(cfg *Config) { LondonBlock: new(big.Int), ArrowGlacierBlock: new(big.Int), GrayGlacierBlock: new(big.Int), + ShanghaiBlock: new(big.Int), + CancunBlock: new(big.Int), } } diff --git a/core/vm/stack/stack.go b/core/vm/stack/stack.go index 9f8122f9b78..9b6e291201e 100644 --- a/core/vm/stack/stack.go +++ b/core/vm/stack/stack.go @@ -21,6 +21,7 @@ import ( "sync" "github.com/holiman/uint256" + "github.com/ledgerwatch/log/v3" ) var stackPool = sync.Pool{ @@ -37,7 +38,11 @@ type Stack struct { } func New() *Stack { - return stackPool.Get().(*Stack) + stack, ok := stackPool.Get().(*Stack) + if !ok { + log.Error("Type assertion failure", "err", "cannot get Stack pointer from stackPool") + } + return stack } func (st *Stack) Push(d *uint256.Int) { @@ -120,7 +125,11 @@ type ReturnStack struct { } func NewReturnStack() *ReturnStack { - return rStackPool.Get().(*ReturnStack) + rStack, ok := rStackPool.Get().(*ReturnStack) + if !ok { + log.Error("Type assertion failure", "err", "cannot get ReturnStack pointer from rStackPool") + } + return rStack } func (st *ReturnStack) Push(d uint32) { diff --git a/core/vm/tevm_interpreter.go b/core/vm/tevm_interpreter.go deleted file mode 100644 index 4053ce944b1..00000000000 --- a/core/vm/tevm_interpreter.go +++ /dev/null @@ -1,24 +0,0 @@ -package vm - -// todo: TBD actual TEVM interpreter - -// TEVMInterpreter represents an TEVM interpreter -type TEVMInterpreter struct { - *EVMInterpreter -} - -type VmType int8 - -const ( - EVMType VmType = 0 - TEVMType VmType = 1 -) - -// NewTEVMInterpreter returns a new instance of the Interpreter. -func NewTEVMInterpreter(evm *EVM, cfg Config) *TEVMInterpreter { - return &TEVMInterpreter{NewEVMInterpreter(evm, cfg)} -} - -func NewTEVMInterpreterByVM(vm *VM) *TEVMInterpreter { - return &TEVMInterpreter{NewEVMInterpreterByVM(vm)} -} diff --git a/crypto/blake2b/blake2b_test.go b/crypto/blake2b/blake2b_test.go index 9d24444a27b..14f2e3bb73d 100644 --- a/crypto/blake2b/blake2b_test.go +++ b/crypto/blake2b/blake2b_test.go @@ -9,6 +9,7 @@ import ( "encoding" "encoding/hex" "fmt" + "github.com/ledgerwatch/log/v3" "hash" "io" "testing" @@ -83,7 +84,11 @@ func TestMarshal(t *testing.T) { if err != nil { t.Fatalf("size=%d, len(input)=%d: could not marshal: %v", size, i, err) } - err = h2.(encoding.BinaryUnmarshaler).UnmarshalBinary(halfstate) + binUnmarshaler, ok := h2.(encoding.BinaryUnmarshaler) + if !ok { + log.Warn("Failed to type convert h2 to encoding.BinaryUnmarshaler") + } + err = binUnmarshaler.UnmarshalBinary(halfstate) if err != nil { t.Fatalf("size=%d, len(input)=%d: could not unmarshal: %v", size, i, err) } diff --git a/crypto/crypto.go b/crypto/crypto.go index 59adaa2370d..1f993eb5b93 100644 --- a/crypto/crypto.go +++ b/crypto/crypto.go @@ -39,7 +39,7 @@ import ( "github.com/ledgerwatch/erigon/rlp" ) -//SignatureLength indicates the byte length required to carry a signature with recovery id. +// SignatureLength indicates the byte length required to carry a signature with recovery id. const SignatureLength = 64 + 1 // 64 bytes ECDSA signature + 1 byte recovery id // RecoveryIDOffset points to the byte offset within the signature that contains the recovery id. diff --git a/docker-compose.yml b/docker-compose.yml index 1d5567d8817..7c59558c1e1 100644 --- a/docker-compose.yml +++ b/docker-compose.yml @@ -34,6 +34,8 @@ services: erigon ${ERIGON_FLAGS-} --private.api.addr=0.0.0.0:9090 --sentry.api.addr=sentry:9091 --downloader.api.addr=downloader:9093 --txpool.disable --metrics --metrics.addr=0.0.0.0 --metrics.port=6060 --pprof --pprof.addr=0.0.0.0 --pprof.port=6061 + --authrpc.jwtsecret=/home/erigon/.local/share/erigon/jwt.hex --datadir=/home/erigon/.local/share/erigon + ports: [ "8551:8551" ] volumes: # It's ok to mount sub-dirs of "datadir" to different drives - ${XDG_DATA_HOME:-~/.local/share}/erigon:/home/erigon/.local/share/erigon @@ -42,31 +44,30 @@ services: sentry: <<: *default-erigon-service - command: sentry ${SENTRY_FLAGS-} --sentry.api.addr=0.0.0.0:9091 + command: sentry ${SENTRY_FLAGS-} --sentry.api.addr=0.0.0.0:9091 --datadir=/home/erigon/.local/share/erigon ports: [ "30303:30303/tcp", "30303:30303/udp" ] downloader: <<: *default-erigon-service - command: downloader ${DOWNLOADER_FLAGS-} --downloader.api.addr=0.0.0.0:9093 + command: downloader ${DOWNLOADER_FLAGS-} --downloader.api.addr=0.0.0.0:9093 --datadir=/home/erigon/.local/share/erigon ports: [ "42069:42069/tcp", "42069:42069/udp" ] txpool: <<: *default-erigon-service - command: txpool ${TXPOOL_FLAGS-} --private.api.addr=erigon:9090 --txpool.api.addr=0.0.0.0:9094 + command: txpool ${TXPOOL_FLAGS-} --private.api.addr=erigon:9090 --txpool.api.addr=0.0.0.0:9094 --sentry.api.addr=sentry:9091 --datadir=/home/erigon/.local/share/erigon rpcdaemon: <<: *default-erigon-service command: | rpcdaemon ${RPCDAEMON_FLAGS-} --http.addr=0.0.0.0 --http.vhosts=* --http.corsdomain=* --ws - --private.api.addr=erigon:9090 --txpool.api.addr=txpool:9094 - --authrpc.jwtsecret=/home/erigon/.local/share/erigon/jwt.hex - ports: [ "8545:8545" ] # "8551:8551" + --private.api.addr=erigon:9090 --txpool.api.addr=txpool:9094 --datadir=/home/erigon/.local/share/erigon + ports: [ "8545:8545" ] prometheus: - image: prom/prometheus:v2.37.0 + image: prom/prometheus:v2.37.1 user: ${DOCKER_UID:-1000}:${DOCKER_GID:-1000} # Uses erigon user from Dockerfile command: --log.level=warn --config.file=/etc/prometheus/prometheus.yml --storage.tsdb.path=/prometheus --storage.tsdb.retention.time=150d --web.console.libraries=/usr/share/prometheus/console_libraries --web.console.templates=/usr/share/prometheus/consoles ports: [ "9090:9090" ] @@ -76,7 +77,7 @@ services: restart: unless-stopped grafana: - image: grafana/grafana:9.0.3 + image: grafana/grafana:9.1.6 user: "472:0" # required for grafana version >= 7.3 ports: [ "3000:3000" ] volumes: diff --git a/eth/backend.go b/eth/backend.go index cdb2b078089..eecf472b748 100644 --- a/eth/backend.go +++ b/eth/backend.go @@ -31,6 +31,7 @@ import ( "github.com/holiman/uint256" libcommon "github.com/ledgerwatch/erigon-lib/common" + "github.com/ledgerwatch/erigon-lib/common/dir" "github.com/ledgerwatch/erigon-lib/direct" proto_downloader "github.com/ledgerwatch/erigon-lib/gointerfaces/downloader" "github.com/ledgerwatch/erigon-lib/gointerfaces/grpcutil" @@ -41,11 +42,17 @@ import ( "github.com/ledgerwatch/erigon-lib/kv" "github.com/ledgerwatch/erigon-lib/kv/kvcache" "github.com/ledgerwatch/erigon-lib/kv/remotedbserver" + libstate "github.com/ledgerwatch/erigon-lib/state" txpool2 "github.com/ledgerwatch/erigon-lib/txpool" "github.com/ledgerwatch/erigon-lib/txpool/txpooluitl" types2 "github.com/ledgerwatch/erigon-lib/types" "github.com/ledgerwatch/erigon/cmd/downloader/downloader" + "github.com/ledgerwatch/erigon/cmd/downloader/downloader/downloadercfg" "github.com/ledgerwatch/erigon/cmd/downloader/downloadergrpc" + "github.com/ledgerwatch/erigon/cmd/lightclient/clparams" + "github.com/ledgerwatch/erigon/cmd/lightclient/lightclient" + "github.com/ledgerwatch/erigon/cmd/lightclient/sentinel" + "github.com/ledgerwatch/erigon/cmd/lightclient/sentinel/service" "github.com/ledgerwatch/erigon/cmd/rpcdaemon/cli" "github.com/ledgerwatch/erigon/cmd/rpcdaemon/commands" "github.com/ledgerwatch/erigon/cmd/sentry/sentry" @@ -67,10 +74,12 @@ import ( "github.com/ledgerwatch/erigon/eth/ethutils" "github.com/ledgerwatch/erigon/eth/protocols/eth" "github.com/ledgerwatch/erigon/eth/stagedsync" + "github.com/ledgerwatch/erigon/eth/stagedsync/stages" "github.com/ledgerwatch/erigon/ethdb/privateapi" "github.com/ledgerwatch/erigon/ethdb/prune" "github.com/ledgerwatch/erigon/ethstats" "github.com/ledgerwatch/erigon/node" + "github.com/ledgerwatch/erigon/node/nodecfg/datadir" "github.com/ledgerwatch/erigon/p2p" "github.com/ledgerwatch/erigon/params" "github.com/ledgerwatch/erigon/rpc" @@ -80,6 +89,7 @@ import ( "github.com/ledgerwatch/erigon/turbo/snapshotsync" "github.com/ledgerwatch/erigon/turbo/snapshotsync/snap" stages2 "github.com/ledgerwatch/erigon/turbo/stages" + "github.com/ledgerwatch/erigon/turbo/stages/headerdownload" "github.com/ledgerwatch/log/v3" "golang.org/x/exp/slices" "google.golang.org/grpc" @@ -124,7 +134,8 @@ type Ethereum struct { downloaderClient proto_downloader.DownloaderClient - notifications *stagedsync.Notifications + notifications *shards.Notifications + unsubscribeEthstat func() waitForStageLoopStop chan struct{} waitForMiningStop chan struct{} @@ -138,6 +149,8 @@ type Ethereum struct { notifyMiningAboutNewTxs chan struct{} forkValidator *engineapi.ForkValidator downloader *downloader.Downloader + + agg *libstate.Aggregator22 } // New creates a new Ethereum object (including the @@ -148,7 +161,8 @@ func New(stack *node.Node, config *ethconfig.Config, logger log.Logger) (*Ethere config.Miner.GasPrice = new(big.Int).Set(ethconfig.Defaults.Miner.GasPrice) } - tmpdir := stack.Config().Dirs.Tmp + dirs := stack.Config().Dirs + tmpdir := dirs.Tmp if err := RemoveContents(tmpdir); err != nil { // clean it on startup return nil, fmt.Errorf("clean tmp dir: %s, %w", tmpdir, err) } @@ -159,28 +173,36 @@ func New(stack *node.Node, config *ethconfig.Config, logger log.Logger) (*Ethere return nil, err } + if config.Genesis != nil && config.Genesis.Config != nil { + types.SetHeaderSealFlag(config.Genesis.Config.IsHeaderWithSeal()) + } + var currentBlock *types.Block + // Check if we have an already initialized chain and fall back to // that if so. Otherwise we need to generate a new genesis spec. - if err := chainKv.View(context.Background(), func(tx kv.Tx) error { + var chainConfig *params.ChainConfig + var genesis *types.Block + if err := chainKv.Update(context.Background(), func(tx kv.RwTx) error { h, err := rawdb.ReadCanonicalHash(tx, 0) if err != nil { panic(err) } - if h != (common.Hash{}) { - config.Genesis = nil // fallback to db content + genesisSpec := config.Genesis + if h != (common.Hash{}) { // fallback to db content + genesisSpec = nil + } + var genesisErr error + chainConfig, genesis, genesisErr = core.WriteGenesisBlock(tx, genesisSpec, config.OverrideMergeNetsplitBlock, config.OverrideTerminalTotalDifficulty) + if _, ok := genesisErr.(*params.ConfigCompatError); genesisErr != nil && !ok { + return genesisErr } + currentBlock = rawdb.ReadCurrentBlock(tx) return nil }); err != nil { panic(err) } - - chainConfig, genesis, genesisErr := core.CommitGenesisBlockWithOverride(chainKv, config.Genesis, config.OverrideMergeNetsplitBlock, config.OverrideTerminalTotalDifficulty) - if _, ok := genesisErr.(*params.ConfigCompatError); genesisErr != nil && !ok { - return nil, genesisErr - } - config.Snapshot.Enabled = ethconfig.UseSnapshotsByChainName(chainConfig.ChainName) && config.Sync.UseSnapshots types.SetHeaderSealFlag(chainConfig.IsHeaderWithSeal()) @@ -204,13 +226,19 @@ func New(stack *node.Node, config *ethconfig.Config, logger log.Logger) (*Ethere if err != nil { return err } + + config.HistoryV3, err = rawdb.HistoryV3.WriteOnce(tx, config.HistoryV3) + if err != nil { + return err + } + // if we are in the incorrect syncmode then we change it to the appropriate one if !isCorrectSync { log.Warn("Incorrect snapshot enablement", "got", config.Sync.UseSnapshots, "change_to", useSnapshots) config.Sync.UseSnapshots = useSnapshots config.Snapshot.Enabled = ethconfig.UseSnapshotsByChainName(chainConfig.ChainName) && useSnapshots } - log.Info("Effective", "prune_flags", config.Prune.String(), "snapshot_flags", config.Snapshot.String()) + log.Info("Effective", "prune_flags", config.Prune.String(), "snapshot_flags", config.Snapshot.String(), "history.v3", config.HistoryV3) return nil }); err != nil { @@ -218,10 +246,8 @@ func New(stack *node.Node, config *ethconfig.Config, logger log.Logger) (*Ethere } ctx, ctxCancel := context.WithCancel(context.Background()) - log.Info("Using snapshots", "on", config.Snapshot.Enabled) // kv_remote architecture does blocks on stream.Send - means current architecture require unlimited amount of txs to provide good throughput - //limiter := make(chan struct{}, kv.ReadersLimit) backend := &Ethereum{ sentryCtx: ctx, sentryCancel: ctxCancel, @@ -234,17 +260,18 @@ func New(stack *node.Node, config *ethconfig.Config, logger log.Logger) (*Ethere genesisHash: genesis.Hash(), waitForStageLoopStop: make(chan struct{}), waitForMiningStop: make(chan struct{}), - notifications: &stagedsync.Notifications{ - Events: privateapi.NewEvents(), - Accumulator: shards.NewAccumulator(chainConfig), + notifications: &shards.Notifications{ + Events: shards.NewEvents(), + Accumulator: shards.NewAccumulator(), }, } - blockReader, allSnapshots, err := backend.setUpBlockReader(ctx, config.Snapshot.Enabled, config) + blockReader, allSnapshots, agg, err := backend.setUpBlockReader(ctx, config.Dirs, config.Snapshot, config.Downloader) if err != nil { return nil, err } + backend.agg = agg - kvRPC := remotedbserver.NewKvServer(ctx, chainKv, allSnapshots) + kvRPC := remotedbserver.NewKvServer(ctx, chainKv, allSnapshots, agg) backend.notifications.StateChangesConsumer = kvRPC backend.gasPrice, _ = uint256.FromBig(config.Miner.GasPrice) @@ -301,6 +328,33 @@ func New(stack *node.Node, config *ethconfig.Config, logger log.Logger) (*Ethere }() } + inMemoryExecution := func(batch kv.RwTx, header *types.Header, body *types.RawBody, unwindPoint uint64, headersChain []*types.Header, bodiesChain []*types.RawBody, + notifications *shards.Notifications) error { + // Needs its own notifications to not update RPC daemon and txpool about pending blocks + stateSync, err := stages2.NewInMemoryExecution(backend.sentryCtx, backend.chainDB, config, backend.sentriesClient, dirs, notifications, allSnapshots, backend.agg) + if err != nil { + return err + } + // We start the mining step + if err := stages2.StateStep(ctx, batch, stateSync, header, body, unwindPoint, headersChain, bodiesChain, true /* quiet */); err != nil { + log.Warn("Could not validate block", "err", err) + return err + } + progress, err := stages.GetStageProgress(batch, stages.IntermediateHashes) + if err != nil { + return err + } + if progress < header.Number.Uint64() { + return fmt.Errorf("unsuccessful execution, progress %d < expected %d", progress, header.Number.Uint64()) + } + return nil + } + currentBlockNumber := uint64(0) + if currentBlock != nil { + currentBlockNumber = currentBlock.NumberU64() + } + + log.Info("Initialising Ethereum protocol", "network", config.NetworkID) var consensusConfig interface{} if chainConfig.Clique != nil { @@ -315,9 +369,8 @@ func New(stack *node.Node, config *ethconfig.Config, logger log.Logger) (*Ethere } else { consensusConfig = &config.Ethash } - - log.Info("Initialising Ethereum protocol", "network", config.NetworkID) - backend.engine = ethconsensusconfig.CreateConsensusEngine(chainConfig, logger, consensusConfig, config.Miner.Notify, config.Miner.Noverify, config.HeimdallURL, config.WithoutHeimdall, stack.DataDir(), allSnapshots) + backend.engine = ethconsensusconfig.CreateConsensusEngine(chainConfig, logger, consensusConfig, config.Miner.Notify, config.Miner.Noverify, config.HeimdallURL, config.WithoutHeimdall, stack.DataDir(), allSnapshots, false /* readonly */, backend.chainDB) + backend.forkValidator = engineapi.NewForkValidator(currentBlockNumber, inMemoryExecution) backend.sentriesClient, err = sentry.NewMultiClient( chainKv, @@ -330,6 +383,7 @@ func New(stack *node.Node, config *ethconfig.Config, logger log.Logger) (*Ethere config.Sync, blockReader, stack.Config().SentryLogPeerInfo, + backend.forkValidator, ) if err != nil { return nil, err @@ -366,9 +420,9 @@ func New(stack *node.Node, config *ethconfig.Config, logger log.Logger) (*Ethere mining := stagedsync.New( stagedsync.MiningStages(backend.sentryCtx, stagedsync.StageMiningCreateBlockCfg(backend.chainDB, miner, *backend.chainConfig, backend.engine, backend.txPool2, backend.txPool2DB, nil, tmpdir), - stagedsync.StageMiningExecCfg(backend.chainDB, miner, backend.notifications.Events, *backend.chainConfig, backend.engine, &vm.Config{}, tmpdir, nil), - stagedsync.StageHashStateCfg(backend.chainDB, tmpdir), - stagedsync.StageTrieCfg(backend.chainDB, false, true, true, tmpdir, blockReader, nil), + stagedsync.StageMiningExecCfg(backend.chainDB, miner, backend.notifications.Events, *backend.chainConfig, backend.engine, &vm.Config{}, tmpdir, nil, 0), + stagedsync.StageHashStateCfg(backend.chainDB, dirs, config.HistoryV3, backend.agg), + stagedsync.StageTrieCfg(backend.chainDB, false, true, true, tmpdir, blockReader, nil, config.HistoryV3, backend.agg), stagedsync.StageMiningFinishCfg(backend.chainDB, *backend.chainConfig, backend.engine, miner, backend.miningSealingQuit), ), stagedsync.MiningUnwindOrder, stagedsync.MiningPruneOrder) @@ -384,9 +438,9 @@ func New(stack *node.Node, config *ethconfig.Config, logger log.Logger) (*Ethere proposingSync := stagedsync.New( stagedsync.MiningStages(backend.sentryCtx, stagedsync.StageMiningCreateBlockCfg(backend.chainDB, miningStatePos, *backend.chainConfig, backend.engine, backend.txPool2, backend.txPool2DB, param, tmpdir), - stagedsync.StageMiningExecCfg(backend.chainDB, miningStatePos, backend.notifications.Events, *backend.chainConfig, backend.engine, &vm.Config{}, tmpdir, interrupt), - stagedsync.StageHashStateCfg(backend.chainDB, tmpdir), - stagedsync.StageTrieCfg(backend.chainDB, false, true, true, tmpdir, blockReader, nil), + stagedsync.StageMiningExecCfg(backend.chainDB, miningStatePos, backend.notifications.Events, *backend.chainConfig, backend.engine, &vm.Config{}, tmpdir, interrupt, param.PayloadId), + stagedsync.StageHashStateCfg(backend.chainDB, dirs, config.HistoryV3, backend.agg), + stagedsync.StageTrieCfg(backend.chainDB, false, true, true, tmpdir, blockReader, nil, config.HistoryV3, backend.agg), stagedsync.StageMiningFinishCfg(backend.chainDB, *backend.chainConfig, backend.engine, miningStatePos, backend.miningSealingQuit), ), stagedsync.MiningUnwindOrder, stagedsync.MiningPruneOrder) // We start the mining step @@ -397,24 +451,37 @@ func New(stack *node.Node, config *ethconfig.Config, logger log.Logger) (*Ethere return block, nil } - inMemoryExecution := func(batch kv.RwTx, header *types.Header, body *types.RawBody, unwindPoint uint64, headersChain []*types.Header, bodiesChain []*types.RawBody) error { - stateSync, err := stages2.NewInMemoryExecution(backend.sentryCtx, backend.log, backend.chainDB, *config, backend.sentriesClient, tmpdir, backend.notifications, allSnapshots) - if err != nil { - return err - } - // We start the mining step - if err := stages2.StateStep(ctx, batch, stateSync, blockReader, header, body, unwindPoint, headersChain, bodiesChain); err != nil { - log.Warn("Could not validate block", "err", err) - return err - } - return nil - } - // Initialize ethbackend ethBackendRPC := privateapi.NewEthBackendServer(ctx, backend, backend.chainDB, backend.notifications.Events, blockReader, chainConfig, assembleBlockPOS, backend.sentriesClient.Hd, config.Miner.EnabledPOS) miningRPC = privateapi.NewMiningServer(ctx, backend, ethashApi) + if config.CL { + // Chains supported are Sepolia, Mainnet and Goerli + if config.NetworkID == 1 || config.NetworkID == 5 || config.NetworkID == 11155111 { + genesisCfg, networkCfg, beaconCfg := clparams.GetConfigsByNetwork(clparams.NetworkType(config.NetworkID)) + if err != nil { + return nil, err + } + client, err := service.StartSentinelService(&sentinel.SentinelConfig{ + IpAddr: "127.0.0.1", + Port: 4000, + TCPPort: 4001, + GenesisConfig: genesisCfg, + NetworkConfig: networkCfg, + BeaconConfig: beaconCfg, + }, &service.ServerConfig{Network: "tcp", Addr: "localhost:7777"}) + if err != nil { + return nil, err + } + + lc := lightclient.NewLightClient(ethBackendRPC, client) + go lc.Start(ctx) + } else { + log.Warn("Cannot run lightclient on a non-supported chain. only goerli, sepolia and mainnet are allowed") + } + } + if stack.Config().PrivateApiAddr != "" { var creds credentials.TransportCredentials if stack.Config().TLSConnection { @@ -483,6 +550,13 @@ func New(stack *node.Node, config *ethconfig.Config, logger log.Logger) (*Ethere if err := miningRPC.(*privateapi.MiningServer).BroadcastMinedBlock(b); err != nil { log.Error("txpool rpc mined block broadcast", "err", err) } + log.Trace("BroadcastMinedBlock successful", "number", b.Number(), "GasUsed", b.GasUsed(), "txn count", b.Transactions().Len()) + backend.sentriesClient.PropagateNewBlockHashes(ctx, []headerdownload.Announce{ + { + Number: b.NumberU64(), + Hash: b.Hash(), + }, + }) if err := backend.sentriesClient.Hd.AddMinedHeader(b.Header()); err != nil { log.Error("add mined block to header downloader", "err", err) } @@ -504,12 +578,7 @@ func New(stack *node.Node, config *ethconfig.Config, logger log.Logger) (*Ethere return nil, err } - var headCh chan *types.Block - if config.Ethstats != "" { - headCh = make(chan *types.Block, 1) - } - backend.forkValidator = engineapi.NewForkValidator(currentBlock.NumberU64(), inMemoryExecution) - backend.stagedSync, err = stages2.NewStagedSync(backend.sentryCtx, backend.log, backend.chainDB, stack.Config().P2P, *config, backend.sentriesClient, tmpdir, backend.notifications, backend.downloaderClient, allSnapshots, headCh, backend.forkValidator) + backend.stagedSync, err = stages2.NewStagedSync(backend.sentryCtx, backend.chainDB, stack.Config().P2P, config, backend.sentriesClient, backend.notifications, backend.downloaderClient, allSnapshots, backend.agg, backend.forkValidator) if err != nil { return nil, err } @@ -540,35 +609,31 @@ func New(stack *node.Node, config *ethconfig.Config, logger log.Logger) (*Ethere } //eth.APIBackend.gpo = gasprice.NewOracle(eth.APIBackend, gpoParams) if config.Ethstats != "" { + var headCh chan [][]byte + headCh, backend.unsubscribeEthstat = backend.notifications.Events.AddHeaderSubscription() if err := ethstats.New(stack, backend.sentryServers, chainKv, backend.engine, config.Ethstats, backend.networkID, ctx.Done(), headCh); err != nil { return nil, err } } // start HTTP API httpRpcCfg := stack.Config().Http - if httpRpcCfg.Enabled { - ethRpcClient, txPoolRpcClient, miningRpcClient, starkNetRpcClient, stateCache, ff, err := cli.EmbeddedServices( - ctx, chainKv, httpRpcCfg.StateCache, blockReader, allSnapshots, - ethBackendRPC, - backend.txPool2GrpcServer, - miningRPC, - ) - if err != nil { - return nil, err - } + ethRpcClient, txPoolRpcClient, miningRpcClient, stateCache, ff, err := cli.EmbeddedServices(ctx, chainKv, httpRpcCfg.StateCache, blockReader, allSnapshots, backend.agg, ethBackendRPC, backend.txPool2GrpcServer, miningRPC) + if err != nil { + return nil, err + } - var borDb kv.RoDB - if casted, ok := backend.engine.(*bor.Bor); ok { - borDb = casted.DB - } - apiList := commands.APIList(chainKv, borDb, ethRpcClient, txPoolRpcClient, miningRpcClient, starkNetRpcClient, ff, stateCache, blockReader, httpRpcCfg) - go func() { - if err := cli.StartRpcServer(ctx, httpRpcCfg, apiList); err != nil { - log.Error(err.Error()) - return - } - }() + var borDb kv.RoDB + if casted, ok := backend.engine.(*bor.Bor); ok { + borDb = casted.DB } + apiList := commands.APIList(chainKv, borDb, ethRpcClient, txPoolRpcClient, miningRpcClient, ff, stateCache, blockReader, backend.agg, httpRpcCfg) + authApiList := commands.AuthAPIList(chainKv, ethRpcClient, txPoolRpcClient, miningRpcClient, ff, stateCache, blockReader, backend.agg, httpRpcCfg) + go func() { + if err := cli.StartRpcServer(ctx, httpRpcCfg, apiList, authApiList); err != nil { + log.Error(err.Error()) + return + } + }() // Register the backend on the node stack.RegisterLifecycle(backend) @@ -786,43 +851,49 @@ func (s *Ethereum) NodesInfo(limit int) (*remote.NodesInfoReply, error) { } // sets up blockReader and client downloader -func (s *Ethereum) setUpBlockReader(ctx context.Context, isSnapshotEnabled bool, cfg *ethconfig.Config) (services.FullBlockReader, *snapshotsync.RoSnapshots, error) { - if !isSnapshotEnabled { +func (s *Ethereum) setUpBlockReader(ctx context.Context, dirs datadir.Dirs, snConfig ethconfig.Snapshot, downloaderCfg *downloadercfg.Cfg) (services.FullBlockReader, *snapshotsync.RoSnapshots, *libstate.Aggregator22, error) { + if !snConfig.Enabled { blockReader := snapshotsync.NewBlockReader() - return blockReader, nil, nil - } - - allSnapshots := snapshotsync.NewRoSnapshots(cfg.Snapshot, cfg.Dirs.Snap) - _, err := snapshotsync.EnforceSnapshotsInvariant(s.chainDB, cfg.Dirs.Snap, allSnapshots, s.notifications.Events) - if err != nil { - return nil, nil, err + return blockReader, nil, nil, nil } + allSnapshots := snapshotsync.NewRoSnapshots(snConfig, dirs.Snap) + var err error blockReader := snapshotsync.NewBlockReaderWithSnapshots(allSnapshots) - if !cfg.Snapshot.NoDownloader { - if cfg.Snapshot.DownloaderAddr != "" { + if !snConfig.NoDownloader { + if snConfig.DownloaderAddr != "" { // connect to external Downloader - s.downloaderClient, err = downloadergrpc.NewClient(ctx, cfg.Snapshot.DownloaderAddr) + s.downloaderClient, err = downloadergrpc.NewClient(ctx, snConfig.DownloaderAddr) } else { // start embedded Downloader - s.downloader, err = downloader.New(cfg.Downloader) + s.downloader, err = downloader.New(downloaderCfg) if err != nil { - return nil, nil, err + return nil, nil, nil, err } go downloader.MainLoop(ctx, s.downloader, true) bittorrentServer, err := downloader.NewGrpcServer(s.downloader) if err != nil { - return nil, nil, fmt.Errorf("new server: %w", err) + return nil, nil, nil, fmt.Errorf("new server: %w", err) } s.downloaderClient = direct.NewDownloaderClient(bittorrentServer) } if err != nil { - return nil, nil, err + return nil, nil, nil, err } } - return blockReader, allSnapshots, nil + + dir.MustExist(dirs.SnapHistory) + agg, err := libstate.NewAggregator22(dirs.SnapHistory, ethconfig.HistoryV3AggregationStep) + if err != nil { + return nil, nil, nil, err + } + if err = agg.ReopenFiles(); err != nil { + return nil, nil, nil, err + } + + return blockReader, allSnapshots, agg, nil } func (s *Ethereum) Peers(ctx context.Context) (*remote.PeersReply, error) { @@ -840,7 +911,7 @@ func (s *Ethereum) Peers(ctx context.Context) (*remote.PeersReply, error) { // Protocols returns all the currently configured // network protocols to start. func (s *Ethereum) Protocols() []p2p.Protocol { - var protocols []p2p.Protocol + protocols := make([]p2p.Protocol, 0, len(s.sentryServers)) for i := range s.sentryServers { protocols = append(protocols, s.sentryServers[i].Protocol) } @@ -853,7 +924,7 @@ func (s *Ethereum) Start() error { s.sentriesClient.StartStreamLoops(s.sentryCtx) time.Sleep(10 * time.Millisecond) // just to reduce logs order confusion - go stages2.StageLoop(s.sentryCtx, s.chainDB, s.stagedSync, s.sentriesClient.Hd, s.notifications, s.sentriesClient.UpdateHead, s.waitForStageLoopStop, s.config.Sync.LoopThrottle) + go stages2.StageLoop(s.sentryCtx, s.chainConfig, s.chainDB, s.stagedSync, s.sentriesClient.Hd, s.notifications, s.sentriesClient.UpdateHead, s.waitForStageLoopStop, s.config.Sync.LoopThrottle) return nil } @@ -863,6 +934,9 @@ func (s *Ethereum) Start() error { func (s *Ethereum) Stop() error { // Stop all the peer-related stuff first. s.sentryCancel() + if s.unsubscribeEthstat != nil { + s.unsubscribeEthstat() + } if s.downloader != nil { s.downloader.Close() } @@ -892,6 +966,9 @@ func (s *Ethereum) Stop() error { if s.txPool2DB != nil { s.txPool2DB.Close() } + if s.agg != nil { + s.agg.Close() + } return nil } @@ -899,11 +976,15 @@ func (s *Ethereum) ChainDB() kv.RwDB { return s.chainDB } +func (s *Ethereum) ChainConfig() *params.ChainConfig { + return s.chainConfig +} + func (s *Ethereum) StagedSync() *stagedsync.Sync { return s.stagedSync } -func (s *Ethereum) Notifications() *stagedsync.Notifications { +func (s *Ethereum) Notifications() *shards.Notifications { return s.notifications } diff --git a/eth/calltracer/calltracer.go b/eth/calltracer/calltracer.go index 2c697a3f2ab..35fac03f0eb 100644 --- a/eth/calltracer/calltracer.go +++ b/eth/calltracer/calltracer.go @@ -11,21 +11,17 @@ import ( "github.com/ledgerwatch/erigon/common" "github.com/ledgerwatch/erigon/core/types" "github.com/ledgerwatch/erigon/core/vm" - "github.com/ledgerwatch/erigon/crypto" - "github.com/ledgerwatch/log/v3" ) type CallTracer struct { - froms map[common.Address]struct{} - tos map[common.Address]bool // address -> isCreated - hasTEVM func(contractHash common.Hash) (bool, error) + froms map[common.Address]struct{} + tos map[common.Address]bool // address -> isCreated } -func NewCallTracer(hasTEVM func(contractHash common.Hash) (bool, error)) *CallTracer { +func NewCallTracer() *CallTracer { return &CallTracer{ - froms: make(map[common.Address]struct{}), - tos: make(map[common.Address]bool), - hasTEVM: hasTEVM, + froms: make(map[common.Address]struct{}), + tos: make(map[common.Address]bool), } } @@ -38,15 +34,8 @@ func (ct *CallTracer) CaptureStart(evm *vm.EVM, depth int, from common.Address, } if !created && create { - if len(code) > 0 && ct.hasTEVM != nil { - has, err := ct.hasTEVM(common.BytesToHash(crypto.Keccak256(code))) - if !has { - ct.tos[to] = true - } - - if err != nil { - log.Warn("while CaptureStart", "err", err) - } + if len(code) > 0 { + ct.tos[to] = true } } } @@ -87,7 +76,6 @@ func (ct *CallTracer) WriteToDb(tx kv.StatelessWriteTx, block *types.Block, vmCo var blockNumEnc [8]byte binary.BigEndian.PutUint64(blockNumEnc[:], block.Number().Uint64()) var prev common.Address - var created bool for j, addr := range list { if j > 0 && prev == addr { continue @@ -100,12 +88,6 @@ func (ct *CallTracer) WriteToDb(tx kv.StatelessWriteTx, block *types.Block, vmCo if _, ok := ct.tos[addr]; ok { v[length.Addr] |= 2 } - // TEVM marking still untranslated contracts - if vmConfig.EnableTEMV { - if created = ct.tos[addr]; created { - v[length.Addr] |= 4 - } - } if j == 0 { if err := tx.Append(kv.CallTraceSet, blockNumEnc[:], v[:]); err != nil { return err diff --git a/eth/ethconfig/config.go b/eth/ethconfig/config.go index dc54c9b31cb..01fbe11008b 100644 --- a/eth/ethconfig/config.go +++ b/eth/ethconfig/config.go @@ -39,6 +39,9 @@ import ( "github.com/ledgerwatch/erigon/params/networkname" ) +// AggregationStep number of transactions in smallest static file +const HistoryV3AggregationStep = 3_125_000 // 100M / 32 + // FullNodeGPO contains default gasprice oracle settings for full node. var FullNodeGPO = gasprice.Config{ Blocks: 20, @@ -64,6 +67,7 @@ var LightClientGPO = gasprice.Config{ var Defaults = Config{ Sync: Sync{ UseSnapshots: false, + ExecWorkerCount: 1, BlockDownloaderWindow: 32768, BodyDownloadTimeoutSeconds: 30, }, @@ -215,11 +219,12 @@ type Config struct { StateStream bool - MemoryOverlay bool - // Enable WatchTheBurn stage EnabledIssuance bool + // New DB and Snapshots format of history allows: parallel blocks execution, get state as of given transaction without executing whole block.", + HistoryV3 bool + // URL to connect to Heimdall node HeimdallURL string @@ -227,6 +232,8 @@ type Config struct { WithoutHeimdall bool // Ethstats service Ethstats string + // ConsenSUS layer + CL bool // FORK_NEXT_VALUE (see EIP-3675) block override OverrideMergeNetsplitBlock *big.Int `toml:",omitempty"` @@ -237,14 +244,15 @@ type Config struct { type Sync struct { UseSnapshots bool // LoopThrottle sets a minimum time between staged loop iterations - LoopThrottle time.Duration + LoopThrottle time.Duration + ExecWorkerCount int BlockDownloaderWindow int BodyDownloadTimeoutSeconds int // TODO: change to duration } // Chains where snapshots are enabled by default -var ChainsWithSnapshots map[string]struct{} = map[string]struct{}{ +var ChainsWithSnapshots = map[string]struct{}{ networkname.MainnetChainName: {}, networkname.BSCChainName: {}, networkname.GoerliChainName: {}, diff --git a/eth/ethconfig/erigon3_test_disable.go b/eth/ethconfig/erigon3_test_disable.go new file mode 100644 index 00000000000..dff3df5b023 --- /dev/null +++ b/eth/ethconfig/erigon3_test_disable.go @@ -0,0 +1,5 @@ +//go:build !erigon3 + +package ethconfig + +const EnableHistoryV3InTest = false diff --git a/eth/ethconfig/erigon3_test_enable.go b/eth/ethconfig/erigon3_test_enable.go new file mode 100644 index 00000000000..a69e3557849 --- /dev/null +++ b/eth/ethconfig/erigon3_test_enable.go @@ -0,0 +1,5 @@ +//go:build erigon3 + +package ethconfig + +const EnableHistoryV3InTest = true diff --git a/eth/ethconfig/estimate/esitmated_ram.go b/eth/ethconfig/estimate/esitmated_ram.go new file mode 100644 index 00000000000..97621e6baa1 --- /dev/null +++ b/eth/ethconfig/estimate/esitmated_ram.go @@ -0,0 +1,24 @@ +package estimate + +import ( + "runtime" + + "github.com/c2h5oh/datasize" + "github.com/ledgerwatch/erigon-lib/common/cmp" + "github.com/pbnjay/memory" +) + +type estimatedRamPerWorker datasize.ByteSize + +// Workers - return max workers amount based on total Memory/CPU's and estimated RAM per worker +func (r estimatedRamPerWorker) Workers() int { + maxWorkersForGivenMemory := memory.TotalMemory() / uint64(r) + maxWorkersForGivenCPU := runtime.NumCPU() - 1 // reserve 1 cpu for "work-producer thread", also IO software on machine in cloud-providers using 1 CPU + return cmp.InRange(1, maxWorkersForGivenCPU, int(maxWorkersForGivenMemory)) +} + +const ( + IndexSnapshot = estimatedRamPerWorker(2 * datasize.MB) //elias-fano index building is single-threaded + CompressSnapshot = estimatedRamPerWorker(1 * datasize.GB) //1-file-compression is multi-threaded + ReconstituteState = estimatedRamPerWorker(4 * datasize.GB) //state-reconstitution is multi-threaded +) diff --git a/eth/ethconsensusconfig/config.go b/eth/ethconsensusconfig/config.go index fadd2ab887a..2d52c260050 100644 --- a/eth/ethconsensusconfig/config.go +++ b/eth/ethconsensusconfig/config.go @@ -1,6 +1,7 @@ package ethconsensusconfig import ( + "github.com/ledgerwatch/erigon-lib/kv" "path/filepath" "github.com/davecgh/go-spew/spew" @@ -18,7 +19,7 @@ import ( "github.com/ledgerwatch/log/v3" ) -func CreateConsensusEngine(chainConfig *params.ChainConfig, logger log.Logger, config interface{}, notify []string, noverify bool, HeimdallURL string, WithoutHeimdall bool, datadir string, snapshots *snapshotsync.RoSnapshots) consensus.Engine { +func CreateConsensusEngine(chainConfig *params.ChainConfig, logger log.Logger, config interface{}, notify []string, noverify bool, HeimdallURL string, WithoutHeimdall bool, datadir string, snapshots *snapshotsync.RoSnapshots, readonly bool, chainDb ...kv.RwDB) consensus.Engine { var eng consensus.Engine switch consensusCfg := config.(type) { @@ -45,24 +46,24 @@ func CreateConsensusEngine(chainConfig *params.ChainConfig, logger log.Logger, c } case *params.ConsensusSnapshotConfig: if chainConfig.Clique != nil { - eng = clique.New(chainConfig, consensusCfg, db.OpenDatabase(consensusCfg.DBPath, logger, consensusCfg.InMemory)) + eng = clique.New(chainConfig, consensusCfg, db.OpenDatabase(consensusCfg.DBPath, logger, consensusCfg.InMemory, readonly)) } case *params.AuRaConfig: if chainConfig.Aura != nil { var err error - eng, err = aura.NewAuRa(chainConfig.Aura, db.OpenDatabase(consensusCfg.DBPath, logger, consensusCfg.InMemory), chainConfig.Aura.Etherbase, consensusconfig.GetConfigByChain(chainConfig.ChainName)) + eng, err = aura.NewAuRa(chainConfig.Aura, db.OpenDatabase(consensusCfg.DBPath, logger, consensusCfg.InMemory, readonly), chainConfig.Aura.Etherbase, consensusconfig.GetConfigByChain(chainConfig.ChainName)) if err != nil { panic(err) } } case *params.ParliaConfig: if chainConfig.Parlia != nil { - eng = parlia.New(chainConfig, db.OpenDatabase(consensusCfg.DBPath, logger, consensusCfg.InMemory), snapshots) + eng = parlia.New(chainConfig, db.OpenDatabase(consensusCfg.DBPath, logger, consensusCfg.InMemory, readonly), snapshots, chainDb[0]) } case *params.BorConfig: if chainConfig.Bor != nil { borDbPath := filepath.Join(datadir, "bor") // bor consensus path: datadir/bor - eng = bor.New(chainConfig, db.OpenDatabase(borDbPath, logger, false), HeimdallURL, WithoutHeimdall) + eng = bor.New(chainConfig, db.OpenDatabase(borDbPath, logger, false, readonly), HeimdallURL, WithoutHeimdall) } } diff --git a/eth/filters/filter.go b/eth/filters/filter.go deleted file mode 100644 index df78dffeb29..00000000000 --- a/eth/filters/filter.go +++ /dev/null @@ -1,346 +0,0 @@ -// Copyright 2014 The go-ethereum Authors -// This file is part of the go-ethereum library. -// -// The go-ethereum library is free software: you can redistribute it and/or modify -// it under the terms of the GNU Lesser General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// The go-ethereum library is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Lesser General Public License for more details. -// -// You should have received a copy of the GNU Lesser General Public License -// along with the go-ethereum library. If not, see . - -package filters - -import ( - "context" - "errors" - "math/big" - - "github.com/ledgerwatch/erigon/common" - "github.com/ledgerwatch/erigon/core" - "github.com/ledgerwatch/erigon/core/bloombits" - "github.com/ledgerwatch/erigon/core/types" - "github.com/ledgerwatch/erigon/event" - "github.com/ledgerwatch/erigon/rpc" - "github.com/ledgerwatch/log/v3" -) - -type Backend interface { - HeaderByNumber(ctx context.Context, blockNr rpc.BlockNumber) (*types.Header, error) - HeaderByHash(ctx context.Context, blockHash common.Hash) (*types.Header, error) - GetReceipts(ctx context.Context, blockHash common.Hash) (types.Receipts, error) - GetLogs(ctx context.Context, blockHash common.Hash) ([][]*types.Log, error) - - SubscribeNewTxsEvent(chan<- core.NewTxsEvent) event.Subscription - SubscribeChainEvent(ch chan<- core.ChainEvent) event.Subscription - SubscribeRemovedLogsEvent(ch chan<- core.RemovedLogsEvent) event.Subscription - SubscribeLogsEvent(ch chan<- []*types.Log) event.Subscription - SubscribePendingLogsEvent(ch chan<- []*types.Log) event.Subscription - - BloomStatus() (uint64, uint64) - ServiceFilter(ctx context.Context, session *bloombits.MatcherSession) -} - -// Filter can be used to retrieve and filter logs. -type Filter struct { - backend Backend - - addresses []common.Address - topics [][]common.Hash - - block common.Hash // Block hash if filtering a single block - begin, end int64 // Range interval if filtering multiple blocks - - matcher *bloombits.Matcher -} - -// NewRangeFilter creates a new filter which uses a bloom filter on blocks to -// figure out whether a particular block is interesting or not. -func NewRangeFilter(backend Backend, begin, end int64, addresses []common.Address, topics [][]common.Hash) *Filter { - log.Error("Log filter not used in Erigon, please see implementation of eth_getLogs in RPCDaemon for more details") - // Flatten the address and topic filter clauses into a single bloombits filter - // system. Since the bloombits are not positional, nil topics are permitted, - // which get flattened into a nil byte slice. - var filters [][][]byte - if len(addresses) > 0 { - filter := make([][]byte, len(addresses)) - for i, address := range addresses { - filter[i] = address.Bytes() - } - filters = append(filters, filter) - } - for _, topicList := range topics { - filter := make([][]byte, len(topicList)) - for i, topic := range topicList { - filter[i] = topic.Bytes() - } - filters = append(filters, filter) - } - size, _ := backend.BloomStatus() - - // Create a generic filter and convert it into a range filter - filter := newFilter(backend, addresses, topics) - - filter.matcher = bloombits.NewMatcher(size, filters) - filter.begin = begin - filter.end = end - - return filter -} - -// NewBlockFilter creates a new filter which directly inspects the contents of -// a block to figure out whether it is interesting or not. -func NewBlockFilter(backend Backend, block common.Hash, addresses []common.Address, topics [][]common.Hash) *Filter { - // Create a generic filter and convert it into a block filter - filter := newFilter(backend, addresses, topics) - filter.block = block - return filter -} - -// newFilter creates a generic filter that can either filter based on a block hash, -// or based on range queries. The search criteria needs to be explicitly set. -func newFilter(backend Backend, addresses []common.Address, topics [][]common.Hash) *Filter { - return &Filter{ - backend: backend, - addresses: addresses, - topics: topics, - } -} - -// Logs searches the blockchain for matching log entries, returning all from the -// first block that contains matches, updating the start of the filter accordingly. -func (f *Filter) Logs(ctx context.Context) ([]*types.Log, error) { - // If we're doing singleton block filtering, execute and return - if f.block != (common.Hash{}) { - header, err := f.backend.HeaderByHash(ctx, f.block) - if err != nil { - return nil, err - } - if header == nil { - return nil, errors.New("unknown block") - } - return f.blockLogs(ctx, header) - } - // Figure out the limits of the filter range - header, _ := f.backend.HeaderByNumber(ctx, rpc.LatestBlockNumber) - if header == nil { - return nil, nil - } - head := header.Number.Uint64() - - if f.begin == -1 { - f.begin = int64(head) - } - end := uint64(f.end) - if f.end == -1 { - end = head - } - // Gather all indexed logs, and finish with non indexed ones - var ( - logs []*types.Log - err error - ) - size, sections := f.backend.BloomStatus() - if indexed := sections * size; indexed > uint64(f.begin) { - if indexed > end { - logs, err = f.indexedLogs(ctx, end) - } else { - logs, err = f.indexedLogs(ctx, indexed-1) - } - if err != nil { - return logs, err - } - } - rest, err := f.unindexedLogs(ctx, end) - logs = append(logs, rest...) - return logs, err -} - -// indexedLogs returns the logs matching the filter criteria based on the bloom -// bits indexed available locally or via the network. -func (f *Filter) indexedLogs(ctx context.Context, end uint64) ([]*types.Log, error) { - // Create a matcher session and request servicing from the backend - matches := make(chan uint64, 64) - - session, err := f.matcher.Start(ctx, uint64(f.begin), end, matches) - if err != nil { - return nil, err - } - defer session.Close() - - f.backend.ServiceFilter(ctx, session) - - // Iterate over the matches until exhausted or context closed - var logs []*types.Log - - for { - select { - case number, ok := <-matches: - // Abort if all matches have been fulfilled - if !ok { - err := session.Error() - if err == nil { - f.begin = int64(end) + 1 - } - return logs, err - } - f.begin = int64(number) + 1 - - // Retrieve the suggested block and pull any truly matching logs - header, err := f.backend.HeaderByNumber(ctx, rpc.BlockNumber(number)) - if header == nil || err != nil { - return logs, err - } - found, err := f.checkMatches(ctx, header) - if err != nil { - return logs, err - } - logs = append(logs, found...) - - case <-ctx.Done(): - return logs, ctx.Err() - } - } -} - -// unindexedLogs returns the logs matching the filter criteria based on raw block -// iteration and bloom matching. -func (f *Filter) unindexedLogs(ctx context.Context, end uint64) ([]*types.Log, error) { - var logs []*types.Log - - for ; f.begin <= int64(end); f.begin++ { - header, err := f.backend.HeaderByNumber(ctx, rpc.BlockNumber(f.begin)) - if header == nil || err != nil { - return logs, err - } - found, err := f.blockLogs(ctx, header) - if err != nil { - return logs, err - } - logs = append(logs, found...) - } - return logs, nil -} - -// blockLogs returns the logs matching the filter criteria within a single block. -func (f *Filter) blockLogs(ctx context.Context, header *types.Header) (logs []*types.Log, err error) { - if bloomFilter(header.Bloom, f.addresses, f.topics) { - found, err := f.checkMatches(ctx, header) - if err != nil { - return logs, err - } - logs = append(logs, found...) - } - return logs, nil -} - -// checkMatches checks if the receipts belonging to the given header contain any log events that -// match the filter criteria. This function is called when the bloom filter signals a potential match. -func (f *Filter) checkMatches(ctx context.Context, header *types.Header) (logs []*types.Log, err error) { - // Get the logs of the block - logsList, err := f.backend.GetLogs(ctx, header.Hash()) - if err != nil { - return nil, err - } - var unfiltered []*types.Log - for _, logs := range logsList { - unfiltered = append(unfiltered, logs...) - } - logs = filterLogs(unfiltered, nil, nil, f.addresses, f.topics) - if len(logs) > 0 { - // We have matching logs, check if we need to resolve full logs via the light client - if logs[0].TxHash == (common.Hash{}) { - receipts, err := f.backend.GetReceipts(ctx, header.Hash()) - if err != nil { - return nil, err - } - unfiltered = unfiltered[:0] - for _, receipt := range receipts { - unfiltered = append(unfiltered, receipt.Logs...) - } - logs = filterLogs(unfiltered, nil, nil, f.addresses, f.topics) - } - return logs, nil - } - return nil, nil -} - -func includes(addresses []common.Address, a common.Address) bool { - for _, addr := range addresses { - if addr == a { - return true - } - } - - return false -} - -// filterLogs creates a slice of logs matching the given criteria. -func filterLogs(logs []*types.Log, fromBlock, toBlock *big.Int, addresses []common.Address, topics [][]common.Hash) []*types.Log { - var ret []*types.Log -Logs: - for _, log := range logs { - if fromBlock != nil && fromBlock.Int64() >= 0 && uint64(fromBlock.Int64()) > log.BlockNumber { - continue - } - if toBlock != nil && toBlock.Int64() >= 0 && uint64(toBlock.Int64()) < log.BlockNumber { - continue - } - - if len(addresses) > 0 && !includes(addresses, log.Address) { - continue - } - // If the to filtered topics is greater than the amount of topics in logs, skip. - if len(topics) > len(log.Topics) { - continue - } - for i, sub := range topics { - match := len(sub) == 0 // empty rule set == wildcard - for _, topic := range sub { - if log.Topics[i] == topic { - match = true - break - } - } - if !match { - continue Logs - } - } - ret = append(ret, log) - } - return ret -} - -func bloomFilter(bloom types.Bloom, addresses []common.Address, topics [][]common.Hash) bool { - if len(addresses) > 0 { - var included bool - for _, addr := range addresses { - if types.BloomLookup(bloom, addr) { - included = true - break - } - } - if !included { - return false - } - } - - for _, sub := range topics { - included := len(sub) == 0 // empty rule set == wildcard - for _, topic := range sub { - if types.BloomLookup(bloom, topic) { - included = true - break - } - } - if !included { - return false - } - } - return true -} diff --git a/eth/filters/filter_system.go b/eth/filters/filter_system.go deleted file mode 100644 index d0d65925fdd..00000000000 --- a/eth/filters/filter_system.go +++ /dev/null @@ -1,417 +0,0 @@ -// Copyright 2015 The go-ethereum Authors -// This file is part of the go-ethereum library. -// -// The go-ethereum library is free software: you can redistribute it and/or modify -// it under the terms of the GNU Lesser General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// The go-ethereum library is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Lesser General Public License for more details. -// -// You should have received a copy of the GNU Lesser General Public License -// along with the go-ethereum library. If not, see . - -// Package filters implements an ethereum filtering system for block, -// transactions and log events. -package filters - -import ( - "fmt" - "sync" - "time" - - ethereum "github.com/ledgerwatch/erigon" - "github.com/ledgerwatch/erigon/common" - "github.com/ledgerwatch/erigon/common/debug" - "github.com/ledgerwatch/erigon/core" - "github.com/ledgerwatch/erigon/core/types" - "github.com/ledgerwatch/erigon/event" - "github.com/ledgerwatch/erigon/rpc" - "github.com/ledgerwatch/log/v3" -) - -// Type determines the kind of filter and is used to put the filter in to -// the correct bucket when added. -type Type byte - -const ( - // UnknownSubscription indicates an unknown subscription type - UnknownSubscription Type = iota - // LogsSubscription queries for new or removed (chain reorg) logs - LogsSubscription - // PendingLogsSubscription queries for logs in pending blocks - PendingLogsSubscription - // MinedAndPendingLogsSubscription queries for logs in mined and pending blocks. - MinedAndPendingLogsSubscription - // PendingTransactionsSubscription queries tx hashes for pending - // transactions entering the pending state - PendingTransactionsSubscription - // BlocksSubscription queries hashes for blocks that are imported - BlocksSubscription - // LastSubscription keeps track of the last index - LastIndexSubscription -) - -const ( - // txChanSize is the size of channel listening to NewTxsEvent. - // The number is referenced from the size of tx pool. - txChanSize = 4096 - // rmLogsChanSize is the size of channel listening to RemovedLogsEvent. - rmLogsChanSize = 10 - // logsChanSize is the size of channel listening to LogsEvent. - logsChanSize = 10 - // chainEvChanSize is the size of channel listening to ChainEvent. - chainEvChanSize = 10 -) - -type subscription struct { - id rpc.ID - typ Type - created time.Time - logsCrit ethereum.FilterQuery - logs chan []*types.Log - hashes chan []common.Hash - headers chan *types.Header - installed chan struct{} // closed when the filter is installed - err chan error // closed when the filter is uninstalled -} - -// EventSystem creates subscriptions, processes events and broadcasts them to the -// subscription which match the subscription criteria. -type EventSystem struct { - backend Backend - - // Subscriptions - txsSub event.Subscription // Subscription for new transaction event - logsSub event.Subscription // Subscription for new log event - rmLogsSub event.Subscription // Subscription for removed log event - chainSub event.Subscription // Subscription for new chain event - - // Channels - install chan *subscription // install filter for event notification - uninstall chan *subscription // remove filter for event notification - txsCh chan core.NewTxsEvent // Channel to receive new transactions event - logsCh chan []*types.Log // Channel to receive new log event - pendingLogsCh chan []*types.Log // Channel to receive new log event - rmLogsCh chan core.RemovedLogsEvent // Channel to receive removed log event - chainCh chan core.ChainEvent // Channel to receive new chain event -} - -// NewEventSystem creates a new manager that listens for event on the given mux, -// parses and filters them. It uses the all map to retrieve filter changes. The -// work loop holds its own index that is used to forward events to filters. -// -// The returned manager has a loop that needs to be stopped with the Stop function -// or by stopping the given mux. -func NewEventSystem(backend Backend) *EventSystem { - m := &EventSystem{ - backend: backend, - install: make(chan *subscription), - uninstall: make(chan *subscription), - txsCh: make(chan core.NewTxsEvent, txChanSize), - logsCh: make(chan []*types.Log, logsChanSize), - rmLogsCh: make(chan core.RemovedLogsEvent, rmLogsChanSize), - pendingLogsCh: make(chan []*types.Log, logsChanSize), - chainCh: make(chan core.ChainEvent, chainEvChanSize), - } - - // Subscribe events - m.txsSub = m.backend.SubscribeNewTxsEvent(m.txsCh) - m.logsSub = m.backend.SubscribeLogsEvent(m.logsCh) - m.rmLogsSub = m.backend.SubscribeRemovedLogsEvent(m.rmLogsCh) - m.chainSub = m.backend.SubscribeChainEvent(m.chainCh) - //m.pendingLogsSub = m.backend.SubscribePendingLogsEvent(m.pendingLogsCh) - - // Make sure none of the subscriptions are empty - //if m.txsSub == nil || m.logsSub == nil || m.rmLogsSub == nil || m.chainSub == nil || m.pendingLogsSub == nil { - if m.txsSub == nil || m.logsSub == nil || m.rmLogsSub == nil || m.chainSub == nil { - log.Crit("Subscribe for event system failed") - } - - go m.eventLoop() - return m -} - -// Subscription is created when the client registers itself for a particular event. -type Subscription struct { - ID rpc.ID - f *subscription - es *EventSystem - unsubOnce sync.Once -} - -// Err returns a channel that is closed when unsubscribed. -func (sub *Subscription) Err() <-chan error { - return sub.f.err -} - -// Unsubscribe uninstalls the subscription from the event broadcast loop. -func (sub *Subscription) Unsubscribe() { - sub.unsubOnce.Do(func() { - uninstallLoop: - for { - // write uninstall request and consume logs/hashes. This prevents - // the eventLoop broadcast method to deadlock when writing to the - // filter event channel while the subscription loop is waiting for - // this method to return (and thus not reading these events). - select { - case sub.es.uninstall <- sub.f: - break uninstallLoop - case <-sub.f.logs: - case <-sub.f.hashes: - case <-sub.f.headers: - } - } - - // wait for filter to be uninstalled in work loop before returning - // this ensures that the manager won't use the event channel which - // will probably be closed by the client asap after this method returns. - <-sub.Err() - }) -} - -// subscribe installs the subscription in the event broadcast loop. -func (es *EventSystem) subscribe(sub *subscription) *Subscription { - es.install <- sub - <-sub.installed - return &Subscription{ID: sub.id, f: sub, es: es} -} - -// SubscribeLogs creates a subscription that will write all logs matching the -// given criteria to the given logs channel. Default value for the from and to -// block is "latest". If the fromBlock > toBlock an error is returned. -func (es *EventSystem) SubscribeLogs(crit ethereum.FilterQuery, logs chan []*types.Log) (*Subscription, error) { - var from, to rpc.BlockNumber - if crit.FromBlock == nil { - from = rpc.LatestBlockNumber - } else { - from = rpc.BlockNumber(crit.FromBlock.Int64()) - } - if crit.ToBlock == nil { - to = rpc.LatestBlockNumber - } else { - to = rpc.BlockNumber(crit.ToBlock.Int64()) - } - - // only interested in pending logs - if from == rpc.PendingBlockNumber && to == rpc.PendingBlockNumber { - return es.subscribePendingLogs(crit, logs), nil - } - // only interested in new mined logs - if from == rpc.LatestBlockNumber && to == rpc.LatestBlockNumber { - return es.subscribeLogs(crit, logs), nil - } - // only interested in mined logs within a specific block range - if from >= 0 && to >= 0 && to >= from { - return es.subscribeLogs(crit, logs), nil - } - // interested in mined logs from a specific block number, new logs and pending logs - if from >= rpc.LatestBlockNumber && to == rpc.PendingBlockNumber { - return es.subscribeMinedPendingLogs(crit, logs), nil - } - // interested in logs from a specific block number to new mined blocks - if from >= 0 && to == rpc.LatestBlockNumber { - return es.subscribeLogs(crit, logs), nil - } - return nil, fmt.Errorf("invalid from and to block combination: from > to") -} - -// subscribeMinedPendingLogs creates a subscription that returned mined and -// pending logs that match the given criteria. -func (es *EventSystem) subscribeMinedPendingLogs(crit ethereum.FilterQuery, logs chan []*types.Log) *Subscription { - sub := &subscription{ - id: rpc.NewID(), - typ: MinedAndPendingLogsSubscription, - logsCrit: crit, - created: time.Now(), - logs: logs, - hashes: make(chan []common.Hash), - headers: make(chan *types.Header), - installed: make(chan struct{}), - err: make(chan error), - } - return es.subscribe(sub) -} - -// subscribeLogs creates a subscription that will write all logs matching the -// given criteria to the given logs channel. -func (es *EventSystem) subscribeLogs(crit ethereum.FilterQuery, logs chan []*types.Log) *Subscription { - sub := &subscription{ - id: rpc.NewID(), - typ: LogsSubscription, - logsCrit: crit, - created: time.Now(), - logs: logs, - hashes: make(chan []common.Hash), - headers: make(chan *types.Header), - installed: make(chan struct{}), - err: make(chan error), - } - return es.subscribe(sub) -} - -// subscribePendingLogs creates a subscription that writes transaction hashes for -// transactions that enter the transaction pool. -func (es *EventSystem) subscribePendingLogs(crit ethereum.FilterQuery, logs chan []*types.Log) *Subscription { - sub := &subscription{ - id: rpc.NewID(), - typ: PendingLogsSubscription, - logsCrit: crit, - created: time.Now(), - logs: logs, - hashes: make(chan []common.Hash), - headers: make(chan *types.Header), - installed: make(chan struct{}), - err: make(chan error), - } - return es.subscribe(sub) -} - -// SubscribeNewHeads creates a subscription that writes the header of a block that is -// imported in the chain. -func (es *EventSystem) SubscribeNewHeads(headers chan *types.Header) *Subscription { - sub := &subscription{ - id: rpc.NewID(), - typ: BlocksSubscription, - created: time.Now(), - logs: make(chan []*types.Log), - hashes: make(chan []common.Hash), - headers: headers, - installed: make(chan struct{}), - err: make(chan error), - } - return es.subscribe(sub) -} - -// SubscribePendingTxs creates a subscription that writes transaction hashes for -// transactions that enter the transaction pool. -func (es *EventSystem) SubscribePendingTxs(hashes chan []common.Hash) *Subscription { - sub := &subscription{ - id: rpc.NewID(), - typ: PendingTransactionsSubscription, - created: time.Now(), - logs: make(chan []*types.Log), - hashes: hashes, - headers: make(chan *types.Header), - installed: make(chan struct{}), - err: make(chan error), - } - return es.subscribe(sub) -} - -type filterIndex map[Type]map[rpc.ID]*subscription - -func (es *EventSystem) handleLogs(filters filterIndex, ev []*types.Log) { - if len(ev) == 0 { - return - } - for _, f := range filters[LogsSubscription] { - matchedLogs := filterLogs(ev, f.logsCrit.FromBlock, f.logsCrit.ToBlock, f.logsCrit.Addresses, f.logsCrit.Topics) - if len(matchedLogs) > 0 { - f.logs <- matchedLogs - } - } -} - -func (es *EventSystem) handlePendingLogs(filters filterIndex, ev []*types.Log) { - if len(ev) == 0 { - return - } - for _, f := range filters[PendingLogsSubscription] { - matchedLogs := filterLogs(ev, nil, f.logsCrit.ToBlock, f.logsCrit.Addresses, f.logsCrit.Topics) - if len(matchedLogs) > 0 { - f.logs <- matchedLogs - } - } -} - -func (es *EventSystem) handleRemovedLogs(filters filterIndex, ev core.RemovedLogsEvent) { - for _, f := range filters[LogsSubscription] { - matchedLogs := filterLogs(ev.Logs, f.logsCrit.FromBlock, f.logsCrit.ToBlock, f.logsCrit.Addresses, f.logsCrit.Topics) - if len(matchedLogs) > 0 { - f.logs <- matchedLogs - } - } -} - -func (es *EventSystem) handleTxsEvent(filters filterIndex, ev core.NewTxsEvent) { - hashes := make([]common.Hash, 0, len(ev.Txs)) - for _, tx := range ev.Txs { - hashes = append(hashes, tx.Hash()) - } - for _, f := range filters[PendingTransactionsSubscription] { - f.hashes <- hashes - } -} - -func (es *EventSystem) handleChainEvent(filters filterIndex, ev core.ChainEvent) { - for _, f := range filters[BlocksSubscription] { - f.headers <- ev.Block.Header() - } -} - -// eventLoop (un)installs filters and processes mux events. -func (es *EventSystem) eventLoop() { - // Ensure all subscriptions get cleaned up - defer func() { - es.txsSub.Unsubscribe() - es.logsSub.Unsubscribe() - es.rmLogsSub.Unsubscribe() - //es.pendingLogsSub.Unsubscribe() - es.chainSub.Unsubscribe() - debug.LogPanic() - }() - - index := make(filterIndex) - for i := UnknownSubscription; i < LastIndexSubscription; i++ { - index[i] = make(map[rpc.ID]*subscription) - } - - for { - select { - case ev := <-es.txsCh: - es.handleTxsEvent(index, ev) - case ev := <-es.logsCh: - es.handleLogs(index, ev) - case ev := <-es.rmLogsCh: - es.handleRemovedLogs(index, ev) - case ev := <-es.pendingLogsCh: - es.handlePendingLogs(index, ev) - case ev := <-es.chainCh: - es.handleChainEvent(index, ev) - - case f := <-es.install: - if f.typ == MinedAndPendingLogsSubscription { - // the type are logs and pending logs subscriptions - index[LogsSubscription][f.id] = f - index[PendingLogsSubscription][f.id] = f - } else { - index[f.typ][f.id] = f - } - close(f.installed) - - case f := <-es.uninstall: - if f.typ == MinedAndPendingLogsSubscription { - // the type are logs and pending logs subscriptions - delete(index[LogsSubscription], f.id) - delete(index[PendingLogsSubscription], f.id) - } else { - delete(index[f.typ], f.id) - } - close(f.err) - - // System stopped - case <-es.txsSub.Err(): - return - case <-es.logsSub.Err(): - return - case <-es.rmLogsSub.Err(): - return - case <-es.chainSub.Err(): - return - } - } -} diff --git a/eth/gasprice/feehistory.go b/eth/gasprice/feehistory.go index 673f53bcec5..1013485d365 100644 --- a/eth/gasprice/feehistory.go +++ b/eth/gasprice/feehistory.go @@ -83,7 +83,7 @@ func (oracle *Oracle) processBlock(bf *blockFees, percentiles []float64) { if bf.baseFee = bf.header.BaseFee; bf.baseFee == nil { bf.baseFee = new(big.Int) } - if chainconfig.IsLondon(uint64(bf.blockNumber + 1)) { + if chainconfig.IsLondon(bf.blockNumber + 1) { bf.nextBaseFee = misc.CalcBaseFee(chainconfig, bf.header) } else { bf.nextBaseFee = new(big.Int) @@ -194,10 +194,11 @@ func (oracle *Oracle) resolveBlockRange(ctx context.Context, lastBlock rpc.Block // actually processed range is returned to avoid ambiguity when parts of the requested range // are not available or when the head has changed during processing this request. // Three arrays are returned based on the processed blocks: -// - reward: the requested percentiles of effective priority fees per gas of transactions in each -// block, sorted in ascending order and weighted by gas used. -// - baseFee: base fee per gas in the given block -// - gasUsedRatio: gasUsed/gasLimit in the given block +// - reward: the requested percentiles of effective priority fees per gas of transactions in each +// block, sorted in ascending order and weighted by gas used. +// - baseFee: base fee per gas in the given block +// - gasUsedRatio: gasUsed/gasLimit in the given block +// // Note: baseFee includes the next block after the newest of the returned range, because this // value can be derived from the newest block. func (oracle *Oracle) FeeHistory(ctx context.Context, blocks int, unresolvedLastBlock rpc.BlockNumber, rewardPercentiles []float64) (*big.Int, [][]*big.Int, []*big.Int, []float64, error) { diff --git a/eth/gasprice/gasprice.go b/eth/gasprice/gasprice.go index 50557984de5..60ec3678de5 100644 --- a/eth/gasprice/gasprice.go +++ b/eth/gasprice/gasprice.go @@ -198,7 +198,10 @@ func (t transactionsByGasPrice) Less(i, j int) bool { func (t *transactionsByGasPrice) Push(x interface{}) { // Push and Pop use pointer receivers because they modify the slice's length, // not just its contents. - l := x.(types.Transaction) + l, ok := x.(types.Transaction) + if !ok { + log.Error("Type assertion failure", "err", "cannot get types.Transaction from interface") + } t.txs = append(t.txs, l) } @@ -224,10 +227,15 @@ func (gpo *Oracle) getBlockPrices(ctx context.Context, blockNum uint64, limit in return err } block, err := gpo.backend.BlockByNumber(ctx, rpc.BlockNumber(blockNum)) - if block == nil { + if err != nil { log.Error("gasprice.go: getBlockPrices", "err", err) return err } + + if block == nil { + return nil + } + blockTxs := block.Transactions() plainTxs := make([]types.Transaction, len(blockTxs)) copy(plainTxs, blockTxs) diff --git a/eth/protocols/eth/discovery.go b/eth/protocols/eth/discovery.go index 4bd009da7e4..7613a28d255 100644 --- a/eth/protocols/eth/discovery.go +++ b/eth/protocols/eth/discovery.go @@ -18,6 +18,7 @@ package eth import ( "fmt" + "github.com/ledgerwatch/erigon/common" "github.com/ledgerwatch/erigon/core/forkid" "github.com/ledgerwatch/erigon/p2p/enr" diff --git a/eth/protocols/eth/handler_test.go b/eth/protocols/eth/handler_test.go index 034ddaa66af..85c2108cf30 100644 --- a/eth/protocols/eth/handler_test.go +++ b/eth/protocols/eth/handler_test.go @@ -118,10 +118,14 @@ func TestGetBlockReceipts(t *testing.T) { expect, err := rlp.EncodeToBytes(eth.ReceiptsRLPPacket66{RequestId: 1, ReceiptsRLPPacket: receipts}) require.NoError(t, err) - m.ReceiveWg.Wait() - sent := m.SentMessage(0) - require.Equal(t, eth.ToProto[m.SentryClient.Protocol()][eth.ReceiptsMsg], sent.Id) - require.Equal(t, expect, sent.Data) + if m.HistoryV3 { + // GetReceiptsMsg disabled for historyV3 + } else { + m.ReceiveWg.Wait() + sent := m.SentMessage(0) + require.Equal(t, eth.ToProto[m.SentryClient.Protocol()][eth.ReceiptsMsg], sent.Id) + require.Equal(t, expect, sent.Data) + } } // newTestBackend creates a chain with a number of explicitly defined blocks and diff --git a/eth/protocols/eth/handlers.go b/eth/protocols/eth/handlers.go index ee5e2b3e74d..4b30b0462d7 100644 --- a/eth/protocols/eth/handlers.go +++ b/eth/protocols/eth/handlers.go @@ -85,7 +85,7 @@ func AnswerGetBlockHeadersQuery(db kv.Tx, query *GetBlockHeadersPacket, blockRea unknown = true } else { query.Origin.Hash, query.Origin.Number = rawdb.ReadAncestor(db, query.Origin.Hash, query.Origin.Number, ancestor, &maxNonCanonical, blockReader) - unknown = (query.Origin.Hash == common.Hash{}) + unknown = query.Origin.Hash == common.Hash{} } case hashMode && !query.Reverse: // Hash based traversal towards the leaf block @@ -133,10 +133,9 @@ func AnswerGetBlockHeadersQuery(db kv.Tx, query *GetBlockHeadersPacket, blockRea func AnswerGetBlockBodiesQuery(db kv.Tx, query GetBlockBodiesPacket) []rlp.RawValue { //nolint:unparam // Gather blocks until the fetch or network limits is reached - var ( - bytes int - bodies []rlp.RawValue - ) + var bytes int + bodies := make([]rlp.RawValue, 0, len(query)) + for lookups, hash := range query { if bytes >= softResponseLimit || len(bodies) >= MaxBodiesServe || lookups >= 2*MaxBodiesServe { diff --git a/eth/stagedsync/README.md b/eth/stagedsync/README.md index 79d216cdf0e..2cf35b1d493 100644 --- a/eth/stagedsync/README.md +++ b/eth/stagedsync/README.md @@ -91,7 +91,7 @@ At that stage, we download bodies for block headers that we already downloaded. That is the most intensive stage for the network connection, the vast majority of data is downloaded here. -### Stage 6: [Recover Senders Stage](/eth/stagedsync/stage_senders.go) +### Stage 5: [Recover Senders Stage](/eth/stagedsync/stage_senders.go) This stage recovers and stores senders for each transaction in each downloaded block. @@ -99,7 +99,7 @@ This is also a CPU intensive stage and also benefits from multi-core CPUs. This stage doesn't use any network connection. -### Stage 7: [Execute Blocks Stage](/eth/stagedsync/stage_execute.go) +### Stage 6: [Execute Blocks Stage](/eth/stagedsync/stage_execute.go) During this stage, we execute block-by-block everything that we downloaded before. @@ -113,11 +113,11 @@ This stage is disk intensive. This stage can spawn unwinds if the block execution fails. -### Stage 8: [Transpile marked VM contracts to TEVM](/eth/stagedsync/stage_tevm.go) +### Stage 7: [Transpile marked VM contracts to TEVM](/eth/stagedsync/stage_tevm.go) [TODO] -### Stage 10: [Generate Hashed State Stage](/eth/stagedsync/stage_hashstate.go) +### Stage 8: [Generate Hashed State Stage](/eth/stagedsync/stage_hashstate.go) Erigon during execution uses Plain state storage. @@ -129,7 +129,7 @@ If the hashed state is not empty, then we are looking at the History ChangeSets This stage doesn't use a network connection. -### Stage 11: [Compute State Root Stage](/eth/stagedsync/stage_interhashes.go) +### Stage 9: [Compute State Root Stage](/eth/stagedsync/stage_interhashes.go) This stage build the Merkle trie and checks the root hash for the current state. @@ -143,11 +143,11 @@ If the root hash doesn't match, it initiates an unwind one block backwards. This stage doesn't use a network connection. -### Stage 12: [Generate call traces index](/eth/stagedsync/stage_call_traces.go) +### Stage 10: [Generate call traces index](/eth/stagedsync/stage_call_traces.go) [TODO] -### Stages 13, 14, 15, 16: Generate Indexes Stages [8, 9](/eth/stagedsync/stage_indexes.go), [10](/eth/stagedsync/stage_log_index.go), and [11](/eth/stagedsync/stage_txlookup.go) +### Stages [11, 12](/eth/stagedsync/stage_indexes.go), [13](/eth/stagedsync/stage_log_index.go), and [14](/eth/stagedsync/stage_txlookup.go): Generate Indexes There are 4 indexes that are generated during sync. @@ -171,7 +171,7 @@ This index sets up a link from the [TODO] to [TODO]. This index sets up a link from the transaction hash to the block number. -### Stage 17: [Transaction Pool Stage](/eth/stagedsync/stage_txpool.go) +### Stage 15: [Transaction Pool Stage](/eth/stagedsync/stage_txpool.go) During this stage we start the transaction pool or update its state. For instance, we remove the transactions from the blocks we have downloaded from the pool. @@ -179,6 +179,6 @@ On unwinds, we add the transactions from the blocks we unwind, back to the pool. This stage doesn't use a network connection. -### Stage 18: Finish +### Stage 16: Finish This stage sets the current block number that is then used by [RPC calls](../../cmd/rpcdaemon/Readme.md), such as [`eth_blockNumber`](../../README.md). diff --git a/eth/stagedsync/default_stages.go b/eth/stagedsync/default_stages.go index df37c6210e8..cdaca2edb43 100644 --- a/eth/stagedsync/default_stages.go +++ b/eth/stagedsync/default_stages.go @@ -8,12 +8,28 @@ import ( "github.com/ledgerwatch/erigon/ethdb/prune" ) -func DefaultStages(ctx context.Context, sm prune.Mode, headers HeadersCfg, cumulativeIndex CumulativeIndexCfg, blockHashCfg BlockHashesCfg, bodies BodiesCfg, issuance IssuanceCfg, senders SendersCfg, exec ExecuteBlockCfg, trans TranspileCfg, hashState HashStateCfg, trieCfg TrieCfg, history HistoryCfg, logIndex LogIndexCfg, callTraces CallTracesCfg, txLookup TxLookupCfg, finish FinishCfg, test bool) []*Stage { +func DefaultStages(ctx context.Context, sm prune.Mode, snapshots SnapshotsCfg, headers HeadersCfg, cumulativeIndex CumulativeIndexCfg, blockHashCfg BlockHashesCfg, bodies BodiesCfg, issuance IssuanceCfg, senders SendersCfg, exec ExecuteBlockCfg, hashState HashStateCfg, trieCfg TrieCfg, history HistoryCfg, logIndex LogIndexCfg, callTraces CallTracesCfg, txLookup TxLookupCfg, finish FinishCfg, test bool) []*Stage { return []*Stage{ + { + ID: stages.Snapshots, + Description: "Download snapshots", + Forward: func(firstCycle bool, badBlockUnwind bool, s *StageState, u Unwinder, tx kv.RwTx, quiet bool) error { + if badBlockUnwind { + return nil + } + return SpawnStageSnapshots(s, ctx, tx, snapshots, firstCycle) + }, + Unwind: func(firstCycle bool, u *UnwindState, s *StageState, tx kv.RwTx) error { + return nil + }, + Prune: func(firstCycle bool, p *PruneState, tx kv.RwTx) error { + return SnapshotsPrune(p, snapshots, ctx, tx) + }, + }, { ID: stages.Headers, Description: "Download headers", - Forward: func(firstCycle bool, badBlockUnwind bool, s *StageState, u Unwinder, tx kv.RwTx) error { + Forward: func(firstCycle bool, badBlockUnwind bool, s *StageState, u Unwinder, tx kv.RwTx, quiet bool) error { if badBlockUnwind { return nil } @@ -29,7 +45,7 @@ func DefaultStages(ctx context.Context, sm prune.Mode, headers HeadersCfg, cumul { ID: stages.CumulativeIndex, Description: "Write Cumulative Index", - Forward: func(firstCycle bool, badBlockUnwind bool, s *StageState, u Unwinder, tx kv.RwTx) error { + Forward: func(firstCycle bool, badBlockUnwind bool, s *StageState, u Unwinder, tx kv.RwTx, quiet bool) error { return SpawnStageCumulativeIndex(cumulativeIndex, s, tx, ctx) }, Unwind: func(firstCycle bool, u *UnwindState, s *StageState, tx kv.RwTx) error { @@ -42,7 +58,7 @@ func DefaultStages(ctx context.Context, sm prune.Mode, headers HeadersCfg, cumul { ID: stages.BlockHashes, Description: "Write block hashes", - Forward: func(firstCycle bool, badBlockUnwind bool, s *StageState, u Unwinder, tx kv.RwTx) error { + Forward: func(firstCycle bool, badBlockUnwind bool, s *StageState, u Unwinder, tx kv.RwTx, quiet bool) error { return SpawnBlockHashStage(s, tx, blockHashCfg, ctx) }, Unwind: func(firstCycle bool, u *UnwindState, s *StageState, tx kv.RwTx) error { @@ -55,7 +71,7 @@ func DefaultStages(ctx context.Context, sm prune.Mode, headers HeadersCfg, cumul { ID: stages.Bodies, Description: "Download block bodies", - Forward: func(firstCycle bool, badBlockUnwind bool, s *StageState, u Unwinder, tx kv.RwTx) error { + Forward: func(firstCycle bool, badBlockUnwind bool, s *StageState, u Unwinder, tx kv.RwTx, quiet bool) error { return BodiesForward(s, u, ctx, tx, bodies, test, firstCycle) }, Unwind: func(firstCycle bool, u *UnwindState, s *StageState, tx kv.RwTx) error { @@ -68,8 +84,8 @@ func DefaultStages(ctx context.Context, sm prune.Mode, headers HeadersCfg, cumul { ID: stages.Senders, Description: "Recover senders from tx signatures", - Forward: func(firstCycle bool, badBlockUnwind bool, s *StageState, u Unwinder, tx kv.RwTx) error { - return SpawnRecoverSendersStage(senders, s, u, tx, 0, ctx) + Forward: func(firstCycle bool, badBlockUnwind bool, s *StageState, u Unwinder, tx kv.RwTx, quiet bool) error { + return SpawnRecoverSendersStage(senders, s, u, tx, 0, ctx, quiet) }, Unwind: func(firstCycle bool, u *UnwindState, s *StageState, tx kv.RwTx) error { return UnwindSendersStage(u, tx, senders, ctx) @@ -81,8 +97,8 @@ func DefaultStages(ctx context.Context, sm prune.Mode, headers HeadersCfg, cumul { ID: stages.Execution, Description: "Execute blocks w/o hash checks", - Forward: func(firstCycle bool, badBlockUnwind bool, s *StageState, u Unwinder, tx kv.RwTx) error { - return SpawnExecuteBlocksStage(s, u, tx, 0, ctx, exec, firstCycle) + Forward: func(firstCycle bool, badBlockUnwind bool, s *StageState, u Unwinder, tx kv.RwTx, quiet bool) error { + return SpawnExecuteBlocksStage(s, u, tx, 0, ctx, exec, firstCycle, quiet) }, Unwind: func(firstCycle bool, u *UnwindState, s *StageState, tx kv.RwTx) error { return UnwindExecutionStage(u, s, tx, ctx, exec, firstCycle) @@ -91,26 +107,11 @@ func DefaultStages(ctx context.Context, sm prune.Mode, headers HeadersCfg, cumul return PruneExecutionStage(p, tx, exec, ctx, firstCycle) }, }, - { - ID: stages.Translation, - Description: "Transpile marked EVM contracts to TEVM", - Disabled: !sm.Experiments.TEVM, - DisabledDescription: "Enable by adding `tevm` to --experiments", - Forward: func(firstCycle bool, badBlockUnwind bool, s *StageState, u Unwinder, tx kv.RwTx) error { - return SpawnTranspileStage(s, tx, 0, trans, ctx) - }, - Unwind: func(firstCycle bool, u *UnwindState, s *StageState, tx kv.RwTx) error { - return UnwindTranspileStage(u, s, tx, trans, ctx) - }, - Prune: func(firstCycle bool, p *PruneState, tx kv.RwTx) error { - return PruneTranspileStage(p, tx, trans, firstCycle, ctx) - }, - }, { ID: stages.HashState, Description: "Hash the key in the state", - Forward: func(firstCycle bool, badBlockUnwind bool, s *StageState, u Unwinder, tx kv.RwTx) error { - return SpawnHashStateStage(s, tx, hashState, ctx) + Forward: func(firstCycle bool, badBlockUnwind bool, s *StageState, u Unwinder, tx kv.RwTx, quiet bool) error { + return SpawnHashStateStage(s, tx, hashState, ctx, quiet) }, Unwind: func(firstCycle bool, u *UnwindState, s *StageState, tx kv.RwTx) error { return UnwindHashStateStage(u, s, tx, hashState, ctx) @@ -122,11 +123,18 @@ func DefaultStages(ctx context.Context, sm prune.Mode, headers HeadersCfg, cumul { ID: stages.IntermediateHashes, Description: "Generate intermediate hashes and computing state root", - Forward: func(firstCycle bool, badBlockUnwind bool, s *StageState, u Unwinder, tx kv.RwTx) error { - _, err := SpawnIntermediateHashesStage(s, u, tx, trieCfg, ctx) + Forward: func(firstCycle bool, badBlockUnwind bool, s *StageState, u Unwinder, tx kv.RwTx, quiet bool) error { + if exec.chainConfig.IsCancun(0) { + _, err := SpawnVerkleTrie(s, u, tx, trieCfg, ctx) + return err + } + _, err := SpawnIntermediateHashesStage(s, u, tx, trieCfg, ctx, quiet) return err }, Unwind: func(firstCycle bool, u *UnwindState, s *StageState, tx kv.RwTx) error { + if exec.chainConfig.IsCancun(0) { + return UnwindVerkleTrie(u, s, tx, trieCfg, ctx) + } return UnwindIntermediateHashesStage(u, s, tx, trieCfg, ctx) }, Prune: func(firstCycle bool, p *PruneState, tx kv.RwTx) error { @@ -137,7 +145,8 @@ func DefaultStages(ctx context.Context, sm prune.Mode, headers HeadersCfg, cumul ID: stages.CallTraces, Description: "Generate call traces index", DisabledDescription: "Work In Progress", - Forward: func(firstCycle bool, badBlockUnwind bool, s *StageState, u Unwinder, tx kv.RwTx) error { + Disabled: bodies.historyV3, + Forward: func(firstCycle bool, badBlockUnwind bool, s *StageState, u Unwinder, tx kv.RwTx, quiet bool) error { return SpawnCallTraces(s, tx, callTraces, ctx) }, Unwind: func(firstCycle bool, u *UnwindState, s *StageState, tx kv.RwTx) error { @@ -150,7 +159,8 @@ func DefaultStages(ctx context.Context, sm prune.Mode, headers HeadersCfg, cumul { ID: stages.AccountHistoryIndex, Description: "Generate account history index", - Forward: func(firstCycle bool, badBlockUnwind bool, s *StageState, u Unwinder, tx kv.RwTx) error { + Disabled: bodies.historyV3, + Forward: func(firstCycle bool, badBlockUnwind bool, s *StageState, u Unwinder, tx kv.RwTx, quiet bool) error { return SpawnAccountHistoryIndex(s, tx, history, ctx) }, Unwind: func(firstCycle bool, u *UnwindState, s *StageState, tx kv.RwTx) error { @@ -163,7 +173,8 @@ func DefaultStages(ctx context.Context, sm prune.Mode, headers HeadersCfg, cumul { ID: stages.StorageHistoryIndex, Description: "Generate storage history index", - Forward: func(firstCycle bool, badBlockUnwind bool, s *StageState, u Unwinder, tx kv.RwTx) error { + Disabled: bodies.historyV3, + Forward: func(firstCycle bool, badBlockUnwind bool, s *StageState, u Unwinder, tx kv.RwTx, quiet bool) error { return SpawnStorageHistoryIndex(s, tx, history, ctx) }, Unwind: func(firstCycle bool, u *UnwindState, s *StageState, tx kv.RwTx) error { @@ -176,7 +187,8 @@ func DefaultStages(ctx context.Context, sm prune.Mode, headers HeadersCfg, cumul { ID: stages.LogIndex, Description: "Generate receipt logs index", - Forward: func(firstCycle bool, badBlockUnwind bool, s *StageState, u Unwinder, tx kv.RwTx) error { + Disabled: bodies.historyV3, + Forward: func(firstCycle bool, badBlockUnwind bool, s *StageState, u Unwinder, tx kv.RwTx, quiet bool) error { return SpawnLogIndex(s, tx, logIndex, ctx, 0) }, Unwind: func(firstCycle bool, u *UnwindState, s *StageState, tx kv.RwTx) error { @@ -189,7 +201,7 @@ func DefaultStages(ctx context.Context, sm prune.Mode, headers HeadersCfg, cumul { ID: stages.TxLookup, Description: "Generate tx lookup index", - Forward: func(firstCycle bool, badBlockUnwind bool, s *StageState, u Unwinder, tx kv.RwTx) error { + Forward: func(firstCycle bool, badBlockUnwind bool, s *StageState, u Unwinder, tx kv.RwTx, quiet bool) error { return SpawnTxLookup(s, tx, 0 /* toBlock */, txLookup, ctx) }, Unwind: func(firstCycle bool, u *UnwindState, s *StageState, tx kv.RwTx) error { @@ -202,7 +214,7 @@ func DefaultStages(ctx context.Context, sm prune.Mode, headers HeadersCfg, cumul { ID: stages.Issuance, Description: "Issuance computation", - Forward: func(firstCycle bool, badBlockUnwind bool, s *StageState, u Unwinder, tx kv.RwTx) error { + Forward: func(firstCycle bool, badBlockUnwind bool, s *StageState, u Unwinder, tx kv.RwTx, quiet bool) error { return SpawnStageIssuance(issuance, s, tx, ctx) }, Unwind: func(firstCycle bool, u *UnwindState, s *StageState, tx kv.RwTx) error { @@ -215,7 +227,7 @@ func DefaultStages(ctx context.Context, sm prune.Mode, headers HeadersCfg, cumul { ID: stages.Finish, Description: "Final: update current block for the RPC API", - Forward: func(firstCycle bool, badBlockUnwind bool, s *StageState, _ Unwinder, tx kv.RwTx) error { + Forward: func(firstCycle bool, badBlockUnwind bool, s *StageState, _ Unwinder, tx kv.RwTx, quiet bool) error { return FinishForward(s, tx, finish, firstCycle) }, Unwind: func(firstCycle bool, u *UnwindState, s *StageState, tx kv.RwTx) error { @@ -234,7 +246,7 @@ func StateStages(ctx context.Context, headers HeadersCfg, bodies BodiesCfg, bloc { ID: stages.Headers, Description: "Download headers", - Forward: func(firstCycle bool, badBlockUnwind bool, s *StageState, u Unwinder, tx kv.RwTx) error { + Forward: func(firstCycle bool, badBlockUnwind bool, s *StageState, u Unwinder, tx kv.RwTx, quiet bool) error { return nil }, Unwind: func(firstCycle bool, u *UnwindState, s *StageState, tx kv.RwTx) error { @@ -244,7 +256,7 @@ func StateStages(ctx context.Context, headers HeadersCfg, bodies BodiesCfg, bloc { ID: stages.Bodies, Description: "Download block bodies", - Forward: func(firstCycle bool, badBlockUnwind bool, s *StageState, u Unwinder, tx kv.RwTx) error { + Forward: func(firstCycle bool, badBlockUnwind bool, s *StageState, u Unwinder, tx kv.RwTx, quiet bool) error { return BodiesForward(s, u, ctx, tx, bodies, false, false) }, Unwind: func(firstCycle bool, u *UnwindState, s *StageState, tx kv.RwTx) error { @@ -254,7 +266,7 @@ func StateStages(ctx context.Context, headers HeadersCfg, bodies BodiesCfg, bloc { ID: stages.BlockHashes, Description: "Write block hashes", - Forward: func(firstCycle bool, badBlockUnwind bool, s *StageState, u Unwinder, tx kv.RwTx) error { + Forward: func(firstCycle bool, badBlockUnwind bool, s *StageState, u Unwinder, tx kv.RwTx, quiet bool) error { return SpawnBlockHashStage(s, tx, blockHashCfg, ctx) }, Unwind: func(firstCycle bool, u *UnwindState, s *StageState, tx kv.RwTx) error { @@ -264,8 +276,8 @@ func StateStages(ctx context.Context, headers HeadersCfg, bodies BodiesCfg, bloc { ID: stages.Senders, Description: "Recover senders from tx signatures", - Forward: func(firstCycle bool, badBlockUnwind bool, s *StageState, u Unwinder, tx kv.RwTx) error { - return SpawnRecoverSendersStage(senders, s, u, tx, 0, ctx) + Forward: func(firstCycle bool, badBlockUnwind bool, s *StageState, u Unwinder, tx kv.RwTx, quiet bool) error { + return SpawnRecoverSendersStage(senders, s, u, tx, 0, ctx, quiet) }, Unwind: func(firstCycle bool, u *UnwindState, s *StageState, tx kv.RwTx) error { return UnwindSendersStage(u, tx, senders, ctx) @@ -274,8 +286,8 @@ func StateStages(ctx context.Context, headers HeadersCfg, bodies BodiesCfg, bloc { ID: stages.Execution, Description: "Execute blocks w/o hash checks", - Forward: func(firstCycle bool, badBlockUnwind bool, s *StageState, u Unwinder, tx kv.RwTx) error { - return SpawnExecuteBlocksStage(s, u, tx, 0, ctx, exec, firstCycle) + Forward: func(firstCycle bool, badBlockUnwind bool, s *StageState, u Unwinder, tx kv.RwTx, quiet bool) error { + return SpawnExecuteBlocksStage(s, u, tx, 0, ctx, exec, firstCycle, quiet) }, Unwind: func(firstCycle bool, u *UnwindState, s *StageState, tx kv.RwTx) error { return UnwindExecutionStage(u, s, tx, ctx, exec, firstCycle) @@ -284,8 +296,8 @@ func StateStages(ctx context.Context, headers HeadersCfg, bodies BodiesCfg, bloc { ID: stages.HashState, Description: "Hash the key in the state", - Forward: func(firstCycle bool, badBlockUnwind bool, s *StageState, u Unwinder, tx kv.RwTx) error { - return SpawnHashStateStage(s, tx, hashState, ctx) + Forward: func(firstCycle bool, badBlockUnwind bool, s *StageState, u Unwinder, tx kv.RwTx, quiet bool) error { + return SpawnHashStateStage(s, tx, hashState, ctx, quiet) }, Unwind: func(firstCycle bool, u *UnwindState, s *StageState, tx kv.RwTx) error { return UnwindHashStateStage(u, s, tx, hashState, ctx) @@ -294,8 +306,8 @@ func StateStages(ctx context.Context, headers HeadersCfg, bodies BodiesCfg, bloc { ID: stages.IntermediateHashes, Description: "Generate intermediate hashes and computing state root", - Forward: func(firstCycle bool, badBlockUnwind bool, s *StageState, u Unwinder, tx kv.RwTx) error { - _, err := SpawnIntermediateHashesStage(s, u, tx, trieCfg, ctx) + Forward: func(firstCycle bool, badBlockUnwind bool, s *StageState, u Unwinder, tx kv.RwTx, quiet bool) error { + _, err := SpawnIntermediateHashesStage(s, u, tx, trieCfg, ctx, quiet) return err }, Unwind: func(firstCycle bool, u *UnwindState, s *StageState, tx kv.RwTx) error { @@ -306,6 +318,7 @@ func StateStages(ctx context.Context, headers HeadersCfg, bodies BodiesCfg, bloc } var DefaultForwardOrder = UnwindOrder{ + stages.Snapshots, stages.Headers, stages.BlockHashes, stages.Bodies, @@ -365,6 +378,7 @@ var StateUnwindOrder = UnwindOrder{ var DefaultPruneOrder = PruneOrder{ stages.Finish, + stages.Snapshots, stages.TxLookup, stages.LogIndex, stages.StorageHistoryIndex, diff --git a/eth/stagedsync/exec3.go b/eth/stagedsync/exec3.go new file mode 100644 index 00000000000..8ddeff4e56f --- /dev/null +++ b/eth/stagedsync/exec3.go @@ -0,0 +1,974 @@ +package stagedsync + +import ( + "container/heap" + "context" + "encoding/binary" + "fmt" + "math/big" + "os" + "path/filepath" + "runtime" + "sync" + "sync/atomic" + "time" + + "github.com/RoaringBitmap/roaring/roaring64" + "github.com/c2h5oh/datasize" + "github.com/ledgerwatch/erigon-lib/common" + "github.com/ledgerwatch/erigon-lib/common/dir" + "github.com/ledgerwatch/erigon-lib/etl" + "github.com/ledgerwatch/erigon-lib/kv" + kv2 "github.com/ledgerwatch/erigon-lib/kv/mdbx" + state2 "github.com/ledgerwatch/erigon-lib/state" + "github.com/ledgerwatch/erigon/cmd/state/exec3" + "github.com/ledgerwatch/erigon/consensus" + "github.com/ledgerwatch/erigon/core" + "github.com/ledgerwatch/erigon/core/rawdb" + "github.com/ledgerwatch/erigon/core/state" + "github.com/ledgerwatch/erigon/core/types" + "github.com/ledgerwatch/erigon/eth/stagedsync/stages" + "github.com/ledgerwatch/erigon/node/nodecfg/datadir" + "github.com/ledgerwatch/erigon/params" + "github.com/ledgerwatch/erigon/turbo/services" + "github.com/ledgerwatch/log/v3" + atomic2 "go.uber.org/atomic" + "golang.org/x/sync/semaphore" +) + +func NewProgress(prevOutputBlockNum, commitThreshold uint64) *Progress { + return &Progress{prevTime: time.Now(), prevOutputBlockNum: prevOutputBlockNum, commitThreshold: commitThreshold} +} + +type Progress struct { + prevTime time.Time + prevCount uint64 + prevOutputBlockNum uint64 + prevRepeatCount uint64 + commitThreshold uint64 +} + +func (p *Progress) Log(logPrefix string, rs *state.State22, rws state.TxTaskQueue, count, inputBlockNum, outputBlockNum, repeatCount uint64, resultsSize uint64, resultCh chan *state.TxTask) { + var m runtime.MemStats + common.ReadMemStats(&m) + sizeEstimate := rs.SizeEstimate() + currentTime := time.Now() + interval := currentTime.Sub(p.prevTime) + speedTx := float64(count-p.prevCount) / (float64(interval) / float64(time.Second)) + speedBlock := float64(outputBlockNum-p.prevOutputBlockNum) / (float64(interval) / float64(time.Second)) + var repeatRatio float64 + if count > p.prevCount { + repeatRatio = 100.0 * float64(repeatCount-p.prevRepeatCount) / float64(count-p.prevCount) + } + log.Info(fmt.Sprintf("[%s] Transaction replay", logPrefix), + //"workers", workerCount, + "at blk", outputBlockNum, + "input blk", atomic.LoadUint64(&inputBlockNum), + "blk/s", fmt.Sprintf("%.1f", speedBlock), + "tx/s", fmt.Sprintf("%.1f", speedTx), + "resultCh", fmt.Sprintf("%d/%d", len(resultCh), cap(resultCh)), + "resultQueue", rws.Len(), + "resultsSize", common.ByteCount(resultsSize), + "repeatRatio", fmt.Sprintf("%.2f%%", repeatRatio), + "buffer", fmt.Sprintf("%s/%s", common.ByteCount(sizeEstimate), common.ByteCount(p.commitThreshold)), + "alloc", common.ByteCount(m.Alloc), "sys", common.ByteCount(m.Sys), + ) + p.prevTime = currentTime + p.prevCount = count + p.prevOutputBlockNum = outputBlockNum + p.prevRepeatCount = repeatCount +} + +func Exec3(ctx context.Context, + execStage *StageState, workerCount int, batchSize datasize.ByteSize, chainDb kv.RwDB, applyTx kv.RwTx, + rs *state.State22, blockReader services.FullBlockReader, + logger log.Logger, agg *state2.Aggregator22, engine consensus.Engine, + maxBlockNum uint64, chainConfig *params.ChainConfig, + genesis *core.Genesis, +) (err error) { + parallel := workerCount > 1 + useExternalTx := applyTx != nil + if !useExternalTx && !parallel { + applyTx, err = chainDb.BeginRw(ctx) + if err != nil { + return err + } + defer applyTx.Rollback() + } + if !useExternalTx { + defer blockReader.(WithSnapshots).Snapshots().EnableMadvNormal().DisableReadAhead() + } + + var block, stageProgress uint64 + var outputTxNum, maxTxNum = atomic2.NewUint64(0), atomic2.NewUint64(0) + var inputTxNum uint64 + var inputBlockNum, outputBlockNum = atomic2.NewUint64(0), atomic2.NewUint64(0) + var count uint64 + var repeatCount, triggerCount = atomic2.NewUint64(0), atomic2.NewUint64(0) + var resultsSize = atomic2.NewInt64(0) + var lock sync.RWMutex + var rws state.TxTaskQueue + var rwsLock sync.Mutex + + if execStage.BlockNumber > 0 { + stageProgress = execStage.BlockNumber + block = execStage.BlockNumber + 1 + } + + // erigon3 execution doesn't support power-off shutdown yet. it need to do quite a lot of work on exit + // too keep consistency + // will improve it in future versions + interruptCh := ctx.Done() + ctx = context.Background() + queueSize := workerCount * 4 + var wg sync.WaitGroup + reconWorkers, resultCh, clear := exec3.NewWorkersPool(lock.RLocker(), parallel, chainDb, &wg, rs, blockReader, chainConfig, logger, genesis, engine, workerCount) + defer clear() + if !parallel { + reconWorkers[0].ResetTx(applyTx) + agg.SetTx(applyTx) + _maxTxNum, err := rawdb.TxNums.Max(applyTx, maxBlockNum) + if err != nil { + return err + } + maxTxNum.Store(_maxTxNum) + if block > 0 { + _outputTxNum, err := rawdb.TxNums.Max(applyTx, execStage.BlockNumber) + if err != nil { + return err + } + outputTxNum.Store(_outputTxNum) + outputTxNum.Add(1) + inputTxNum = outputTxNum.Load() + } + } else { + if err := chainDb.View(ctx, func(tx kv.Tx) error { + _maxTxNum, err := rawdb.TxNums.Max(tx, maxBlockNum) + if err != nil { + return err + } + maxTxNum.Store(_maxTxNum) + if block > 0 { + _outputTxNum, err := rawdb.TxNums.Max(tx, execStage.BlockNumber) + if err != nil { + return err + } + outputTxNum.Store(_outputTxNum) + outputTxNum.Add(1) + inputTxNum = outputTxNum.Load() + } + return nil + }); err != nil { + return err + } + } + + commitThreshold := batchSize.Bytes() * 4 + resultsThreshold := int64(batchSize.Bytes() * 4) + progress := NewProgress(block, commitThreshold) + logEvery := time.NewTicker(logInterval) + defer logEvery.Stop() + rwsReceiveCond := sync.NewCond(&rwsLock) + heap.Init(&rws) + agg.SetTxNum(inputTxNum) + if parallel { + // Go-routine gathering results from the workers + go func() { + tx, err := chainDb.BeginRw(ctx) + if err != nil { + panic(err) + } + defer tx.Rollback() + agg.SetTx(tx) + defer rs.Finish() + for outputTxNum.Load() < maxTxNum.Load() { + select { + case txTask := <-resultCh: + //fmt.Printf("Saved %d block %d txIndex %d\n", txTask.TxNum, txTask.BlockNum, txTask.TxIndex) + func() { + rwsLock.Lock() + defer rwsLock.Unlock() + resultsSize.Add(txTask.ResultsSize) + heap.Push(&rws, txTask) + processResultQueue(&rws, outputTxNum, rs, agg, tx, triggerCount, outputBlockNum, repeatCount, resultsSize) + rwsReceiveCond.Signal() + }() + case <-logEvery.C: + progress.Log(execStage.LogPrefix(), rs, rws, rs.DoneCount(), inputBlockNum.Load(), outputBlockNum.Load(), repeatCount.Load(), uint64(resultsSize.Load()), resultCh) + sizeEstimate := rs.SizeEstimate() + //prevTriggerCount = triggerCount + if sizeEstimate >= commitThreshold { + commitStart := time.Now() + log.Info("Committing...") + err := func() error { + rwsLock.Lock() + defer rwsLock.Unlock() + // Drain results (and process) channel because read sets do not carry over + for { + var drained bool + for !drained { + select { + case txTask := <-resultCh: + resultsSize.Add(txTask.ResultsSize) + heap.Push(&rws, txTask) + default: + drained = true + } + } + processResultQueue(&rws, outputTxNum, rs, agg, tx, triggerCount, outputBlockNum, repeatCount, resultsSize) + if rws.Len() == 0 { + break + } + } + rwsReceiveCond.Signal() + lock.Lock() // This is to prevent workers from starting work on any new txTask + defer lock.Unlock() + // Drain results channel because read sets do not carry over + var drained bool + for !drained { + select { + case txTask := <-resultCh: + rs.AddWork(txTask) + default: + drained = true + } + } + + // Drain results queue as well + for rws.Len() > 0 { + txTask := heap.Pop(&rws).(*state.TxTask) + resultsSize.Add(-txTask.ResultsSize) + rs.AddWork(txTask) + syncMetrics[stages.Execution].Set(txTask.BlockNum) + } + if err := rs.Flush(tx); err != nil { + return err + } + tx.CollectMetrics() + if err = execStage.Update(tx, outputBlockNum.Load()); err != nil { + return err + } + //TODO: can't commit - because we are in the middle of the block. Need make sure that we are always processed whole block. + if err = tx.Commit(); err != nil { + return err + } + if tx, err = chainDb.BeginRw(ctx); err != nil { + return err + } + for i := 0; i < len(reconWorkers); i++ { + reconWorkers[i].ResetTx(nil) + } + agg.SetTx(tx) + return nil + }() + if err != nil { + panic(err) + } + log.Info("Committed", "time", time.Since(commitStart)) + } + } + } + if err = tx.Commit(); err != nil { + panic(err) + } + }() + } + + var b *types.Block + var blockNum uint64 +loop: + for blockNum = block; blockNum <= maxBlockNum; blockNum++ { + inputBlockNum.Store(blockNum) + rules := chainConfig.Rules(blockNum) + b, err = blockWithSenders(chainDb, applyTx, blockReader, blockNum) + if err != nil { + return err + } + if parallel { + func() { + rwsLock.Lock() + defer rwsLock.Unlock() + for rws.Len() > queueSize || resultsSize.Load() >= resultsThreshold || rs.SizeEstimate() >= commitThreshold { + rwsReceiveCond.Wait() + } + }() + } + txs := b.Transactions() + for txIndex := -1; txIndex <= len(txs); txIndex++ { + // Do not oversend, wait for the result heap to go under certain size + txTask := &state.TxTask{ + BlockNum: blockNum, + Rules: rules, + Block: b, + TxNum: inputTxNum, + TxIndex: txIndex, + BlockHash: b.Hash(), + Final: txIndex == len(txs), + } + if txIndex >= 0 && txIndex < len(txs) { + txTask.Tx = txs[txIndex] + txTask.TxAsMessage, err = txTask.Tx.AsMessage(*types.MakeSigner(chainConfig, txTask.BlockNum), txTask.Block.Header().BaseFee, txTask.Rules) + if err != nil { + panic(err) + } + + if sender, ok := txs[txIndex].GetSender(); ok { + txTask.Sender = &sender + } + if parallel { + if ok := rs.RegisterSender(txTask); ok { + rs.AddWork(txTask) + } + } + } else if parallel { + rs.AddWork(txTask) + } + if parallel { + stageProgress = blockNum + } else { + count++ + reconWorkers[0].RunTxTask(txTask) + if txTask.Error == nil { + if err := rs.Apply(reconWorkers[0].Tx(), txTask, agg); err != nil { + panic(fmt.Errorf("State22.Apply: %w", err)) + } + outputTxNum.Add(1) + outputBlockNum.Store(txTask.BlockNum) + //fmt.Printf("Applied %d block %d txIndex %d\n", txTask.TxNum, txTask.BlockNum, txTask.TxIndex) + } else { + return fmt.Errorf("rolled back %d block %d txIndex %d, err = %v", txTask.TxNum, txTask.BlockNum, txTask.TxIndex, txTask.Error) + } + + stageProgress = blockNum + } + + inputTxNum++ + } + + if rs.SizeEstimate() >= commitThreshold { + commitStart := time.Now() + log.Info("Committing...") + if err := rs.Flush(applyTx); err != nil { + return err + } + if !useExternalTx { + if err = execStage.Update(applyTx, stageProgress); err != nil { + return err + } + applyTx.CollectMetrics() + if err := applyTx.Commit(); err != nil { + return err + } + if applyTx, err = chainDb.BeginRw(ctx); err != nil { + return err + } + defer applyTx.Rollback() + agg.SetTx(applyTx) + reconWorkers[0].ResetTx(applyTx) + log.Info("Committed", "time", time.Since(commitStart), "toProgress", stageProgress) + } + } + + // Check for interrupts + select { + case <-logEvery.C: + progress.Log(execStage.LogPrefix(), rs, rws, count, inputBlockNum.Load(), outputBlockNum.Load(), repeatCount.Load(), uint64(resultsSize.Load()), resultCh) + case <-interruptCh: + log.Info(fmt.Sprintf("interrupted, please wait for cleanup, next run will start with block %d", blockNum)) + maxTxNum.Store(inputTxNum) + break loop + default: + } + } + if parallel { + wg.Wait() + if err = chainDb.Update(ctx, func(tx kv.RwTx) error { + if err = rs.Flush(tx); err != nil { + return err + } + if err = execStage.Update(tx, stageProgress); err != nil { + return err + } + return nil + }); err != nil { + return err + } + } else { + if err = rs.Flush(applyTx); err != nil { + return err + } + if err = execStage.Update(applyTx, stageProgress); err != nil { + return err + } + } + + if !useExternalTx && applyTx != nil { + if err = applyTx.Commit(); err != nil { + return err + } + } + return nil +} +func blockWithSenders(db kv.RoDB, tx kv.Tx, blockReader services.BlockReader, blockNum uint64) (b *types.Block, err error) { + if tx == nil { + tx, err = db.BeginRo(context.Background()) + if err != nil { + return nil, err + } + defer tx.Rollback() + } + blockHash, err := rawdb.ReadCanonicalHash(tx, blockNum) + if err != nil { + return nil, err + } + b, _, err = blockReader.BlockWithSenders(context.Background(), tx, blockHash, blockNum) + if err != nil { + return nil, err + } + return b, nil +} + +func processResultQueue(rws *state.TxTaskQueue, outputTxNum *atomic2.Uint64, rs *state.State22, agg *state2.Aggregator22, applyTx kv.Tx, + triggerCount, outputBlockNum, repeatCount *atomic2.Uint64, resultsSize *atomic2.Int64) { + for rws.Len() > 0 && (*rws)[0].TxNum == outputTxNum.Load() { + txTask := heap.Pop(rws).(*state.TxTask) + resultsSize.Add(-txTask.ResultsSize) + if txTask.Error == nil && rs.ReadsValid(txTask.ReadLists) { + if err := rs.Apply(applyTx, txTask, agg); err != nil { + panic(fmt.Errorf("State22.Apply: %w", err)) + } + triggerCount.Add(rs.CommitTxNum(txTask.Sender, txTask.TxNum)) + outputTxNum.Add(1) + outputBlockNum.Store(txTask.BlockNum) + //fmt.Printf("Applied %d block %d txIndex %d\n", txTask.TxNum, txTask.BlockNum, txTask.TxIndex) + } else { + rs.AddWork(txTask) + repeatCount.Add(1) + //fmt.Printf("Rolled back %d block %d txIndex %d\n", txTask.TxNum, txTask.BlockNum, txTask.TxIndex) + } + } +} + +func ReconstituteState(ctx context.Context, s *StageState, dirs datadir.Dirs, workerCount int, batchSize datasize.ByteSize, chainDb kv.RwDB, + blockReader services.FullBlockReader, + logger log.Logger, agg *state2.Aggregator22, engine consensus.Engine, + chainConfig *params.ChainConfig, genesis *core.Genesis) (err error) { + defer agg.EnableMadvNormal().DisableReadAhead() + + reconDbPath := filepath.Join(dirs.DataDir, "recondb") + dir.Recreate(reconDbPath) + limiterB := semaphore.NewWeighted(int64(runtime.NumCPU()*2 + 1)) + db, err := kv2.NewMDBX(log.New()).Path(reconDbPath).RoTxsLimiter(limiterB). + WriteMergeThreshold(8192). + PageSize(uint64(16 * datasize.KB)). + WriteMap().WithTableCfg(func(defaultBuckets kv.TableCfg) kv.TableCfg { return kv.ReconTablesCfg }). + Open() + if err != nil { + return err + } + defer db.Close() + defer os.RemoveAll(reconDbPath) + + var ok bool + var blockNum uint64 // First block which is not covered by the history snapshot files + if err := chainDb.View(ctx, func(tx kv.Tx) error { + ok, blockNum, err = rawdb.TxNums.FindBlockNum(tx, agg.EndTxNumMinimax()) + if err != nil { + return err + } + return nil + }); err != nil { + return err + } + if !ok { + return fmt.Errorf("mininmax txNum not found in snapshot blocks: %d", agg.EndTxNumMinimax()) + } + fmt.Printf("Max blockNum = %d\n", blockNum) + if blockNum == 0 { + return fmt.Errorf("not enough transactions in the history data") + } + blockNum-- + var txNum uint64 + if err := chainDb.View(ctx, func(tx kv.Tx) error { + txNum, err = rawdb.TxNums.Max(tx, blockNum) + if err != nil { + return err + } + txNum++ + return nil + }); err != nil { + return err + } + + fmt.Printf("Corresponding block num = %d, txNum = %d\n", blockNum, txNum) + var wg sync.WaitGroup + workCh := make(chan *state.TxTask, workerCount*64) + rs := state.NewReconState(workCh) + var fromKey, toKey []byte + bigCount := big.NewInt(int64(workerCount)) + bigStep := big.NewInt(0x100000000) + bigStep.Div(bigStep, bigCount) + bigCurrent := big.NewInt(0) + fillWorkers := make([]*exec3.FillWorker, workerCount) + doneCount := atomic2.NewUint64(0) + for i := 0; i < workerCount; i++ { + fromKey = toKey + if i == workerCount-1 { + toKey = nil + } else { + bigCurrent.Add(bigCurrent, bigStep) + toKey = make([]byte, 4) + bigCurrent.FillBytes(toKey) + } + //fmt.Printf("%d) Fill worker [%x] - [%x]\n", i, fromKey, toKey) + fillWorkers[i] = exec3.NewFillWorker(txNum, doneCount, agg, fromKey, toKey) + } + logEvery := time.NewTicker(logInterval) + defer logEvery.Stop() + doneCount.Store(0) + accountCollectorsX := make([]*etl.Collector, workerCount) + for i := 0; i < workerCount; i++ { + fillWorkers[i].ResetProgress() + accountCollectorsX[i] = etl.NewCollector("account scan X", dirs.Tmp, etl.NewSortableBuffer(etl.BufferOptimalSize)) + accountCollectorsX[i].LogLvl(log.LvlDebug) + go fillWorkers[i].BitmapAccounts(accountCollectorsX[i]) + } + t := time.Now() + for doneCount.Load() < uint64(workerCount) { + <-logEvery.C + var m runtime.MemStats + common.ReadMemStats(&m) + var p float64 + for i := 0; i < workerCount; i++ { + if total := fillWorkers[i].Total(); total > 0 { + p += float64(fillWorkers[i].Progress()) / float64(total) + } + } + p *= 100.0 + log.Info("Scan accounts history", "workers", workerCount, "progress", fmt.Sprintf("%.2f%%", p), + "alloc", common.ByteCount(m.Alloc), "sys", common.ByteCount(m.Sys), + ) + } + log.Info("Scan accounts history", "took", time.Since(t)) + + accountCollectorX := etl.NewCollector("account scan total X", dirs.Tmp, etl.NewSortableBuffer(etl.BufferOptimalSize*2)) + defer accountCollectorX.Close() + accountCollectorX.LogLvl(log.LvlDebug) + for i := 0; i < workerCount; i++ { + if err = accountCollectorsX[i].Load(nil, "", func(k, v []byte, table etl.CurrentTableReader, next etl.LoadNextFunc) error { + return accountCollectorX.Collect(k, v) + }, etl.TransformArgs{}); err != nil { + return err + } + accountCollectorsX[i].Close() + accountCollectorsX[i] = nil + } + if err = db.Update(ctx, func(tx kv.RwTx) error { + return accountCollectorX.Load(tx, kv.XAccount, etl.IdentityLoadFunc, etl.TransformArgs{}) + }); err != nil { + return err + } + accountCollectorX.Close() + accountCollectorX = nil + doneCount.Store(0) + storageCollectorsX := make([]*etl.Collector, workerCount) + for i := 0; i < workerCount; i++ { + fillWorkers[i].ResetProgress() + storageCollectorsX[i] = etl.NewCollector("storage scan X", dirs.Tmp, etl.NewSortableBuffer(etl.BufferOptimalSize)) + storageCollectorsX[i].LogLvl(log.LvlDebug) + go fillWorkers[i].BitmapStorage(storageCollectorsX[i]) + } + t = time.Now() + for doneCount.Load() < uint64(workerCount) { + <-logEvery.C + var m runtime.MemStats + common.ReadMemStats(&m) + var p float64 + for i := 0; i < workerCount; i++ { + if total := fillWorkers[i].Total(); total > 0 { + p += float64(fillWorkers[i].Progress()) / float64(total) + } + } + p *= 100.0 + log.Info("Scan storage history", "workers", workerCount, "progress", fmt.Sprintf("%.2f%%", p), + "alloc", common.ByteCount(m.Alloc), "sys", common.ByteCount(m.Sys), + ) + } + log.Info("Scan storage history", "took", time.Since(t)) + + storageCollectorX := etl.NewCollector("storage scan total X", dirs.Tmp, etl.NewSortableBuffer(etl.BufferOptimalSize*2)) + defer storageCollectorX.Close() + storageCollectorX.LogLvl(log.LvlDebug) + for i := 0; i < workerCount; i++ { + if err = storageCollectorsX[i].Load(nil, "", func(k, v []byte, table etl.CurrentTableReader, next etl.LoadNextFunc) error { + return storageCollectorX.Collect(k, v) + }, etl.TransformArgs{}); err != nil { + return err + } + storageCollectorsX[i].Close() + storageCollectorsX[i] = nil + } + if err = db.Update(ctx, func(tx kv.RwTx) error { + return storageCollectorX.Load(tx, kv.XStorage, etl.IdentityLoadFunc, etl.TransformArgs{}) + }); err != nil { + return err + } + storageCollectorX.Close() + storageCollectorX = nil + doneCount.Store(0) + codeCollectorsX := make([]*etl.Collector, workerCount) + for i := 0; i < workerCount; i++ { + fillWorkers[i].ResetProgress() + codeCollectorsX[i] = etl.NewCollector("code scan X", dirs.Tmp, etl.NewSortableBuffer(etl.BufferOptimalSize)) + codeCollectorsX[i].LogLvl(log.LvlDebug) + go fillWorkers[i].BitmapCode(codeCollectorsX[i]) + } + for doneCount.Load() < uint64(workerCount) { + <-logEvery.C + var m runtime.MemStats + common.ReadMemStats(&m) + var p float64 + for i := 0; i < workerCount; i++ { + if total := fillWorkers[i].Total(); total > 0 { + p += float64(fillWorkers[i].Progress()) / float64(total) + } + } + p *= 100.0 + log.Info("Scan code history", "workers", workerCount, "progress", fmt.Sprintf("%.2f%%", p), + "alloc", common.ByteCount(m.Alloc), "sys", common.ByteCount(m.Sys), + ) + } + codeCollectorX := etl.NewCollector("code scan total X", dirs.Tmp, etl.NewSortableBuffer(etl.BufferOptimalSize*2)) + defer codeCollectorX.Close() + codeCollectorX.LogLvl(log.LvlDebug) + var bitmap roaring64.Bitmap + for i := 0; i < workerCount; i++ { + bitmap.Or(fillWorkers[i].Bitmap()) + if err = codeCollectorsX[i].Load(nil, "", func(k, v []byte, table etl.CurrentTableReader, next etl.LoadNextFunc) error { + return codeCollectorX.Collect(k, v) + }, etl.TransformArgs{}); err != nil { + return err + } + codeCollectorsX[i].Close() + codeCollectorsX[i] = nil + } + if err = db.Update(ctx, func(tx kv.RwTx) error { + return codeCollectorX.Load(tx, kv.XCode, etl.IdentityLoadFunc, etl.TransformArgs{}) + }); err != nil { + return err + } + codeCollectorX.Close() + codeCollectorX = nil + log.Info("Ready to replay", "transactions", bitmap.GetCardinality(), "out of", txNum) + var lock sync.RWMutex + reconWorkers := make([]*exec3.ReconWorker, workerCount) + roTxs := make([]kv.Tx, workerCount) + chainTxs := make([]kv.Tx, workerCount) + defer func() { + for i := 0; i < workerCount; i++ { + if roTxs[i] != nil { + roTxs[i].Rollback() + } + if chainTxs[i] != nil { + chainTxs[i].Rollback() + } + } + }() + for i := 0; i < workerCount; i++ { + if roTxs[i], err = db.BeginRo(ctx); err != nil { + return err + } + if chainTxs[i], err = chainDb.BeginRo(ctx); err != nil { + return err + } + } + for i := 0; i < workerCount; i++ { + reconWorkers[i] = exec3.NewReconWorker(lock.RLocker(), &wg, rs, agg, blockReader, chainConfig, logger, genesis, engine, chainTxs[i]) + reconWorkers[i].SetTx(roTxs[i]) + } + wg.Add(workerCount) + count := uint64(0) + rollbackCount := uint64(0) + total := bitmap.GetCardinality() + for i := 0; i < workerCount; i++ { + go reconWorkers[i].Run() + } + commitThreshold := batchSize.Bytes() * 4 + prevCount := uint64(0) + prevRollbackCount := uint64(0) + prevTime := time.Now() + reconDone := make(chan struct{}) + go func() { + for { + select { + case <-reconDone: + return + case <-logEvery.C: + var m runtime.MemStats + common.ReadMemStats(&m) + sizeEstimate := rs.SizeEstimate() + count = rs.DoneCount() + rollbackCount = rs.RollbackCount() + currentTime := time.Now() + interval := currentTime.Sub(prevTime) + speedTx := float64(count-prevCount) / (float64(interval) / float64(time.Second)) + progress := 100.0 * float64(count) / float64(total) + var repeatRatio float64 + if count > prevCount { + repeatRatio = 100.0 * float64(rollbackCount-prevRollbackCount) / float64(count-prevCount) + } + prevTime = currentTime + prevCount = count + prevRollbackCount = rollbackCount + log.Info("State reconstitution", "workers", workerCount, "progress", fmt.Sprintf("%.2f%%", progress), + "tx/s", fmt.Sprintf("%.1f", speedTx), "workCh", fmt.Sprintf("%d/%d", len(workCh), cap(workCh)), + "repeat ratio", fmt.Sprintf("%.2f%%", repeatRatio), + "buffer", fmt.Sprintf("%s/%s", common.ByteCount(sizeEstimate), common.ByteCount(commitThreshold)), + "alloc", common.ByteCount(m.Alloc), "sys", common.ByteCount(m.Sys)) + if sizeEstimate >= commitThreshold { + err := func() error { + lock.Lock() + defer lock.Unlock() + for i := 0; i < workerCount; i++ { + roTxs[i].Rollback() + } + if err := db.Update(ctx, func(tx kv.RwTx) error { + if err = rs.Flush(tx); err != nil { + return err + } + return nil + }); err != nil { + return err + } + for i := 0; i < workerCount; i++ { + if roTxs[i], err = db.BeginRo(ctx); err != nil { + return err + } + reconWorkers[i].SetTx(roTxs[i]) + } + return nil + }() + if err != nil { + panic(err) + } + } + } + } + }() + + defer blockReader.(WithSnapshots).Snapshots().EnableReadAhead().DisableReadAhead() + + var inputTxNum uint64 + var b *types.Block + var txKey [8]byte + for bn := uint64(0); bn <= blockNum; bn++ { + rules := chainConfig.Rules(bn) + b, err = blockWithSenders(chainDb, nil, blockReader, bn) + if err != nil { + return err + } + txs := b.Transactions() + for txIndex := -1; txIndex <= len(txs); txIndex++ { + if bitmap.Contains(inputTxNum) { + binary.BigEndian.PutUint64(txKey[:], inputTxNum) + txTask := &state.TxTask{ + BlockNum: bn, + Block: b, + Rules: rules, + TxNum: inputTxNum, + TxIndex: txIndex, + BlockHash: b.Hash(), + Final: txIndex == len(txs), + } + if txIndex >= 0 && txIndex < len(txs) { + txTask.Tx = txs[txIndex] + txTask.TxAsMessage, err = txTask.Tx.AsMessage(*types.MakeSigner(chainConfig, txTask.BlockNum), txTask.Block.Header().BaseFee, txTask.Rules) + if err != nil { + panic(err) + } + } + workCh <- txTask + } + inputTxNum++ + } + } + close(workCh) + wg.Wait() + reconDone <- struct{}{} // Complete logging and committing go-routine + for i := 0; i < workerCount; i++ { + roTxs[i].Rollback() + } + if err := db.Update(ctx, func(tx kv.RwTx) error { + if err = rs.Flush(tx); err != nil { + return err + } + return nil + }); err != nil { + return err + } + plainStateCollector := etl.NewCollector("recon plainState", dirs.Tmp, etl.NewSortableBuffer(etl.BufferOptimalSize)) + defer plainStateCollector.Close() + codeCollector := etl.NewCollector("recon code", dirs.Tmp, etl.NewOldestEntryBuffer(etl.BufferOptimalSize)) + defer codeCollector.Close() + plainContractCollector := etl.NewCollector("recon plainContract", dirs.Tmp, etl.NewSortableBuffer(etl.BufferOptimalSize)) + defer plainContractCollector.Close() + roTx, err := db.BeginRo(ctx) + if err != nil { + return err + } + defer roTx.Rollback() + if err = roTx.ForEach(kv.PlainStateR, nil, func(k, v []byte) error { + return plainStateCollector.Collect(k[8:], v) + }); err != nil { + return err + } + if err = roTx.ForEach(kv.CodeR, nil, func(k, v []byte) error { + return codeCollector.Collect(k[8:], v) + }); err != nil { + return err + } + if err = roTx.ForEach(kv.PlainContractR, nil, func(k, v []byte) error { + return plainContractCollector.Collect(k[8:], v) + }); err != nil { + return err + } + roTx.Rollback() + + if err = db.Update(ctx, func(tx kv.RwTx) error { + if err = tx.ClearBucket(kv.PlainStateR); err != nil { + return err + } + if err = tx.ClearBucket(kv.CodeR); err != nil { + return err + } + if err = tx.ClearBucket(kv.PlainContractR); err != nil { + return err + } + return nil + }); err != nil { + return err + } + plainStateCollectors := make([]*etl.Collector, workerCount) + codeCollectors := make([]*etl.Collector, workerCount) + plainContractCollectors := make([]*etl.Collector, workerCount) + for i := 0; i < workerCount; i++ { + plainStateCollectors[i] = etl.NewCollector(fmt.Sprintf("plainState %d", i), dirs.Tmp, etl.NewSortableBuffer(etl.BufferOptimalSize)) + defer plainStateCollectors[i].Close() + codeCollectors[i] = etl.NewCollector(fmt.Sprintf("code %d", i), dirs.Tmp, etl.NewSortableBuffer(etl.BufferOptimalSize)) + defer codeCollectors[i].Close() + plainContractCollectors[i] = etl.NewCollector(fmt.Sprintf("plainContract %d", i), dirs.Tmp, etl.NewSortableBuffer(etl.BufferOptimalSize)) + defer plainContractCollectors[i].Close() + } + doneCount.Store(0) + for i := 0; i < workerCount; i++ { + fillWorkers[i].ResetProgress() + go fillWorkers[i].FillAccounts(plainStateCollectors[i]) + } + for doneCount.Load() < uint64(workerCount) { + <-logEvery.C + var m runtime.MemStats + common.ReadMemStats(&m) + var p float64 + for i := 0; i < workerCount; i++ { + if total := fillWorkers[i].Total(); total > 0 { + p += float64(fillWorkers[i].Progress()) / float64(total) + } + } + p *= 100.0 + log.Info("Filling accounts", "workers", workerCount, "progress", fmt.Sprintf("%.2f%%", p), + "alloc", common.ByteCount(m.Alloc), "sys", common.ByteCount(m.Sys), + ) + } + doneCount.Store(0) + for i := 0; i < workerCount; i++ { + fillWorkers[i].ResetProgress() + go fillWorkers[i].FillStorage(plainStateCollectors[i]) + } + for doneCount.Load() < uint64(workerCount) { + <-logEvery.C + var m runtime.MemStats + common.ReadMemStats(&m) + var p float64 + for i := 0; i < workerCount; i++ { + if total := fillWorkers[i].Total(); total > 0 { + p += float64(fillWorkers[i].Progress()) / float64(total) + } + } + p *= 100.0 + log.Info("Filling storage", "workers", workerCount, "progress", fmt.Sprintf("%.2f%%", p), + "alloc", common.ByteCount(m.Alloc), "sys", common.ByteCount(m.Sys), + ) + } + doneCount.Store(0) + for i := 0; i < workerCount; i++ { + fillWorkers[i].ResetProgress() + go fillWorkers[i].FillCode(codeCollectors[i], plainContractCollectors[i]) + } + for doneCount.Load() < uint64(workerCount) { + <-logEvery.C + var m runtime.MemStats + common.ReadMemStats(&m) + var p float64 + for i := 0; i < workerCount; i++ { + if total := fillWorkers[i].Total(); total > 0 { + p += float64(fillWorkers[i].Progress()) / float64(total) + } + } + p *= 100.0 + log.Info("Filling code", "workers", workerCount, "progress", fmt.Sprintf("%.2f%%", p), + "alloc", common.ByteCount(m.Alloc), "sys", common.ByteCount(m.Sys), + ) + } + // Load all collections into the main collector + for i := 0; i < workerCount; i++ { + if err = plainStateCollectors[i].Load(nil, "", func(k, v []byte, table etl.CurrentTableReader, next etl.LoadNextFunc) error { + return plainStateCollector.Collect(k, v) + }, etl.TransformArgs{}); err != nil { + return err + } + plainStateCollectors[i].Close() + if err = codeCollectors[i].Load(nil, "", func(k, v []byte, table etl.CurrentTableReader, next etl.LoadNextFunc) error { + return codeCollector.Collect(k, v) + }, etl.TransformArgs{}); err != nil { + return err + } + codeCollectors[i].Close() + if err = plainContractCollectors[i].Load(nil, "", func(k, v []byte, table etl.CurrentTableReader, next etl.LoadNextFunc) error { + return plainContractCollector.Collect(k, v) + }, etl.TransformArgs{}); err != nil { + return err + } + plainContractCollectors[i].Close() + } + if err = chainDb.Update(ctx, func(tx kv.RwTx) error { + if err = tx.ClearBucket(kv.PlainState); err != nil { + return err + } + if err = tx.ClearBucket(kv.Code); err != nil { + return err + } + if err = tx.ClearBucket(kv.PlainContractCode); err != nil { + return err + } + if err = plainStateCollector.Load(tx, kv.PlainState, etl.IdentityLoadFunc, etl.TransformArgs{}); err != nil { + return err + } + plainStateCollector.Close() + if err = codeCollector.Load(tx, kv.Code, etl.IdentityLoadFunc, etl.TransformArgs{}); err != nil { + return err + } + codeCollector.Close() + if err = plainContractCollector.Load(tx, kv.PlainContractCode, etl.IdentityLoadFunc, etl.TransformArgs{}); err != nil { + return err + } + plainContractCollector.Close() + if err := s.Update(tx, blockNum); err != nil { + return err + } + s.BlockNumber = blockNum + return nil + }); err != nil { + return err + } + return nil +} diff --git a/eth/stagedsync/stage.go b/eth/stagedsync/stage.go index 3565fcdb7ca..37994bac0de 100644 --- a/eth/stagedsync/stage.go +++ b/eth/stagedsync/stage.go @@ -9,7 +9,7 @@ import ( // ExecFunc is the execution function for the stage to move forward. // * state - is the current state of the stage and contains stage data. // * unwinder - if the stage needs to cause unwinding, `unwinder` methods can be used. -type ExecFunc func(firstCycle bool, badBlockUnwind bool, s *StageState, unwinder Unwinder, tx kv.RwTx) error +type ExecFunc func(firstCycle bool, badBlockUnwind bool, s *StageState, unwinder Unwinder, tx kv.RwTx, quiet bool) error // UnwindFunc is the unwinding logic of the stage. // * unwindState - contains information about the unwind itself. @@ -48,6 +48,9 @@ func (s *StageState) LogPrefix() string { return s.state.LogPrefix() } // Update updates the stage state (current block number) in the database. Can be called multiple times during stage execution. func (s *StageState) Update(db kv.Putter, newBlockNum uint64) error { + if s.ID == stages.Execution && newBlockNum == 0 { + panic(newBlockNum) + } if m, ok := syncMetrics[s.ID]; ok { m.Set(newBlockNum) } diff --git a/eth/stagedsync/stage_bodies.go b/eth/stagedsync/stage_bodies.go index 79109eb12f4..f46c0ad1e0b 100644 --- a/eth/stagedsync/stage_bodies.go +++ b/eth/stagedsync/stage_bodies.go @@ -9,6 +9,8 @@ import ( "github.com/c2h5oh/datasize" libcommon "github.com/ledgerwatch/erigon-lib/common" "github.com/ledgerwatch/erigon-lib/kv" + "github.com/ledgerwatch/log/v3" + "github.com/ledgerwatch/erigon/common" "github.com/ledgerwatch/erigon/core/rawdb" "github.com/ledgerwatch/erigon/eth/stagedsync/stages" @@ -18,9 +20,10 @@ import ( "github.com/ledgerwatch/erigon/turbo/snapshotsync" "github.com/ledgerwatch/erigon/turbo/stages/bodydownload" "github.com/ledgerwatch/erigon/turbo/stages/headerdownload" - "github.com/ledgerwatch/log/v3" ) +const requestLoopCutOff int = 50 + type BodiesCfg struct { db kv.RwDB bd *bodydownload.BodyDownload @@ -32,21 +35,11 @@ type BodiesCfg struct { batchSize datasize.ByteSize snapshots *snapshotsync.RoSnapshots blockReader services.FullBlockReader + historyV3 bool } -func StageBodiesCfg( - db kv.RwDB, - bd *bodydownload.BodyDownload, - bodyReqSend func(context.Context, *bodydownload.BodyRequest) ([64]byte, bool), - penalise func(context.Context, []headerdownload.PenaltyItem), - blockPropagator adapter.BlockPropagator, - timeout int, - chanConfig params.ChainConfig, - batchSize datasize.ByteSize, - snapshots *snapshotsync.RoSnapshots, - blockReader services.FullBlockReader, -) BodiesCfg { - return BodiesCfg{db: db, bd: bd, bodyReqSend: bodyReqSend, penalise: penalise, blockPropagator: blockPropagator, timeout: timeout, chanConfig: chanConfig, batchSize: batchSize, snapshots: snapshots, blockReader: blockReader} +func StageBodiesCfg(db kv.RwDB, bd *bodydownload.BodyDownload, bodyReqSend func(context.Context, *bodydownload.BodyRequest) ([64]byte, bool), penalise func(context.Context, []headerdownload.PenaltyItem), blockPropagator adapter.BlockPropagator, timeout int, chanConfig params.ChainConfig, batchSize datasize.ByteSize, snapshots *snapshotsync.RoSnapshots, blockReader services.FullBlockReader, historyV3 bool) BodiesCfg { + return BodiesCfg{db: db, bd: bd, bodyReqSend: bodyReqSend, penalise: penalise, blockPropagator: blockPropagator, timeout: timeout, chanConfig: chanConfig, batchSize: batchSize, snapshots: snapshots, blockReader: blockReader, historyV3: historyV3} } // BodiesForward progresses Bodies stage in the forward direction @@ -68,6 +61,7 @@ func BodiesForward( var d1, d2, d3, d4, d5, d6 time.Duration var err error useExternalTx := tx != nil + cfg.bd.UsingExternalTx = useExternalTx if !useExternalTx { tx, err = cfg.db.BeginRw(context.Background()) if err != nil { @@ -112,34 +106,75 @@ func BodiesForward( // Property of blockchain: same block in different forks will have different hashes. // Means - can mark all canonical blocks as non-canonical on unwind, and // do opposite here - without storing any meta-info. - if err := rawdb.MakeBodiesCanonical(tx, s.BlockNumber+1, ctx, logPrefix, logEvery); err != nil { + if err := rawdb.MakeBodiesCanonical(tx, s.BlockNumber+1, ctx, logPrefix, logEvery, func(blockNum, lastTxnNum uint64) error { + if cfg.historyV3 { + if err := rawdb.TxNums.Append(tx, blockNum, lastTxnNum); err != nil { + return err + } + //cfg.txNums.Append(blockNum, lastTxnNum) + } + return nil + }); err != nil { return fmt.Errorf("make block canonical: %w", err) } var prevDeliveredCount float64 = 0 var prevWastedCount float64 = 0 timer := time.NewTimer(1 * time.Second) // Check periodically even in the abseence of incoming messages - var blockNum uint64 var req *bodydownload.BodyRequest var peer [64]byte var sentToPeer bool stopped := false prevProgress := bodyProgress noProgressCount := 0 // How many time the progress was printed without actual progress -Loop: - for !stopped { - // TODO: this is incorrect use - if req == nil { - start := time.Now() - currentTime := uint64(time.Now().Unix()) - req, blockNum, err = cfg.bd.RequestMoreBodies(tx, cfg.blockReader, blockNum, currentTime, cfg.blockPropagator) + var totalDelivered uint64 = 0 + + // create a temporary bucket to fire the bodies into as we start to collect them + // this will allow us to restart the bodies stage and not request bodies we already have + // once the bodies stage is complete this bucket is dropped + if !useExternalTx { + err = tx.CreateBucket("BodiesStage") + if err != nil { + return err + } + err = tx.ClearBucket("BodiesStage") + if err != nil { + return err + } + err = tx.Commit() + if err != nil { + return err + } + } + + var blockNum uint64 + loopBody := func() (bool, error) { + // innerTx is used for the temporary stage bucket to hold on to bodies as they're downloaded + // offering restart capability for the stage bodies process + var innerTx kv.RwTx + if !useExternalTx { + innerTx, err = cfg.db.BeginRw(context.Background()) if err != nil { - return fmt.Errorf("request more bodies: %w", err) + return false, err } - d1 += time.Since(start) + defer innerTx.Rollback() + } else { + innerTx = tx + } + + // always check if a new request is needed at the start of the loop + // this will check for timed out old requests and attempt to send them again + start := time.Now() + currentTime := uint64(time.Now().Unix()) + req, blockNum, err = cfg.bd.RequestMoreBodies(innerTx, cfg.blockReader, blockNum, currentTime, cfg.blockPropagator) + if err != nil { + return false, fmt.Errorf("request more bodies: %w", err) } + d1 += time.Since(start) + peer = [64]byte{} sentToPeer = false + if req != nil { start := time.Now() peer, sentToPeer = cfg.bodyReqSend(ctx, req) @@ -151,12 +186,18 @@ Loop: cfg.bd.RequestSent(req, currentTime+uint64(timeout), peer) d3 += time.Since(start) } + + // loopCount is used here to ensure we don't get caught in a constant loop of making requests + // having some time out so requesting again and cycling like that forever. We'll cap it + // and break the loop so we can see if there are any records to actually process further down + // then come back here again in the next cycle + loopCount := 0 for req != nil && sentToPeer { start := time.Now() currentTime := uint64(time.Now().Unix()) - req, blockNum, err = cfg.bd.RequestMoreBodies(tx, cfg.blockReader, blockNum, currentTime, cfg.blockPropagator) + req, blockNum, err = cfg.bd.RequestMoreBodies(innerTx, cfg.blockReader, blockNum, currentTime, cfg.blockPropagator) if err != nil { - return fmt.Errorf("request more bodies: %w", err) + return false, fmt.Errorf("request more bodies: %w", err) } d1 += time.Since(start) peer = [64]byte{} @@ -171,48 +212,100 @@ Loop: cfg.bd.RequestSent(req, currentTime+uint64(timeout), peer) d3 += time.Since(start) } + + loopCount++ + if loopCount >= requestLoopCutOff { + break + } } - start := time.Now() - headers, rawBodies, err := cfg.bd.GetDeliveries() + + start = time.Now() + requestedLow, delivered, err := cfg.bd.GetDeliveries(innerTx) if err != nil { - return err + return false, err } + totalDelivered += delivered d4 += time.Since(start) start = time.Now() - cr := ChainReader{Cfg: cfg.chanConfig, Db: tx} - for i, header := range headers { - rawBody := rawBodies[i] - blockHeight := header.Number.Uint64() - _, err := cfg.bd.VerifyUncles(header, rawBody.Uncles, cr) - if err != nil { - log.Error(fmt.Sprintf("[%s] Uncle verification failed", logPrefix), "number", blockHeight, "hash", header.Hash().String(), "err", err) - u.UnwindTo(blockHeight-1, header.Hash()) - break Loop - } + cr := ChainReader{Cfg: cfg.chanConfig, Db: innerTx} - // Check existence before write - because WriteRawBody isn't idempotent (it allocates new sequence range for transactions on every call) - if err = rawdb.WriteRawBodyIfNotExists(tx, header.Hash(), blockHeight, rawBody); err != nil { - return fmt.Errorf("WriteRawBodyIfNotExists: %w", err) - } + toProcess := cfg.bd.NextProcessingCount() + + if toProcess > 0 { + var i uint64 + for i = 0; i < toProcess; i++ { + nextBlock := requestedLow + i + + header, _, err := cfg.bd.GetHeader(nextBlock, cfg.blockReader, innerTx) + if err != nil { + return false, err + } + blockHeight := header.Number.Uint64() + if blockHeight != nextBlock { + return false, fmt.Errorf("[%s] Header block unexpected when matching body, got %v, expected %v", logPrefix, blockHeight, nextBlock) + } + + rawBody, err := cfg.bd.GetBlockFromCache(innerTx, nextBlock) + if err != nil { + log.Error(fmt.Sprintf("[%s] Error getting body from cache", logPrefix), "err", err) + return false, err + } + if rawBody == nil { + return false, fmt.Errorf("[%s] Body was nil when reading from bucket, block: %v", logPrefix, nextBlock) + } + + // Txn & uncle roots are verified via bd.requestedMap + err = cfg.bd.Engine.VerifyUncles(cr, header, rawBody.Uncles) + if err != nil { + log.Error(fmt.Sprintf("[%s] Uncle verification failed", logPrefix), "number", blockHeight, "hash", header.Hash().String(), "err", err) + u.UnwindTo(blockHeight-1, header.Hash()) + return true, nil + } + + // Check existence before write - because WriteRawBody isn't idempotent (it allocates new sequence range for transactions on every call) + ok, lastTxnNum, err := rawdb.WriteRawBodyIfNotExists(innerTx, header.Hash(), blockHeight, rawBody) + if err != nil { + return false, fmt.Errorf("WriteRawBodyIfNotExists: %w", err) + } + if cfg.historyV3 && ok { + if err := rawdb.TxNums.Append(innerTx, blockHeight, lastTxnNum); err != nil { + return false, err + } + } - if blockHeight > bodyProgress { - bodyProgress = blockHeight - if err = s.Update(tx, blockHeight); err != nil { - return fmt.Errorf("saving Bodies progress: %w", err) + if blockHeight > bodyProgress { + bodyProgress = blockHeight + if err = s.Update(innerTx, blockHeight); err != nil { + return false, fmt.Errorf("saving Bodies progress: %w", err) + } } } } + + // if some form of work has happened then commit the transaction + if !useExternalTx && (cfg.bd.HasAddedBodies() || toProcess > 0) { + err = innerTx.Commit() + if err != nil { + return false, err + } + cfg.bd.ResetAddedBodies() + } + + if toProcess > 0 { + logWritingBodies(logPrefix, bodyProgress, headerProgress) + } + d5 += time.Since(start) start = time.Now() if bodyProgress == headerProgress { - break + return true, nil } if test { stopped = true - break + return true, nil } if !firstCycle && s.BlockNumber > 0 && noProgressCount >= 5 { - break + return true, nil } timer.Stop() timer = time.NewTimer(1 * time.Second) @@ -226,7 +319,7 @@ Loop: } else { noProgressCount = 0 // Reset, there was progress } - logProgressBodies(logPrefix, bodyProgress, prevDeliveredCount, deliveredCount, prevWastedCount, wastedCount) + logDownloadingBodies(logPrefix, bodyProgress, headerProgress-requestedLow, totalDelivered, prevDeliveredCount, deliveredCount, prevWastedCount, wastedCount) prevProgress = bodyProgress prevDeliveredCount = deliveredCount prevWastedCount = wastedCount @@ -237,15 +330,38 @@ Loop: log.Trace("bodyLoop woken up by the incoming request") } d6 += time.Since(start) + + return false, nil } - if err := s.Update(tx, bodyProgress); err != nil { - return err + + // kick off the loop and check for any reason to stop and break early + for !stopped { + shouldBreak, err := loopBody() + if err != nil { + return err + } + if shouldBreak { + break + } } + + // remove the temporary bucket for bodies stage if !useExternalTx { - if err := tx.Commit(); err != nil { + bucketTx, err := cfg.db.BeginRw(context.Background()) + if err != nil { + return err + } + defer bucketTx.Rollback() + + bucketTx.ClearBucket("BodiesStage") + err = bucketTx.Commit() + if err != nil { return err } + } else { + cfg.bd.ClearBodyCache() } + if stopped { return libcommon.ErrStopped } @@ -255,15 +371,35 @@ Loop: return nil } -func logProgressBodies(logPrefix string, committed uint64, prevDeliveredCount, deliveredCount, prevWastedCount, wastedCount float64) { +func logDownloadingBodies(logPrefix string, committed, remaining uint64, totalDelivered uint64, prevDeliveredCount, deliveredCount, prevWastedCount, wastedCount float64) { speed := (deliveredCount - prevDeliveredCount) / float64(logInterval/time.Second) wastedSpeed := (wastedCount - prevWastedCount) / float64(logInterval/time.Second) + if speed == 0 && wastedSpeed == 0 { + // Don't log "Wrote block ..." unless we're actually writing something + log.Info(fmt.Sprintf("[%s] No block bodies to write in this log period", logPrefix), "block number", committed, "blk/second", speed) + return + } + var m runtime.MemStats libcommon.ReadMemStats(&m) - log.Info(fmt.Sprintf("[%s] Wrote block bodies", logPrefix), + log.Info(fmt.Sprintf("[%s] Downloading block bodies", logPrefix), "block_num", committed, "delivery/sec", libcommon.ByteCount(uint64(speed)), "wasted/sec", libcommon.ByteCount(uint64(wastedSpeed)), + "remaining", remaining, + "delivered", totalDelivered, + "alloc", libcommon.ByteCount(m.Alloc), + "sys", libcommon.ByteCount(m.Sys), + ) +} + +func logWritingBodies(logPrefix string, committed, headerProgress uint64) { + var m runtime.MemStats + libcommon.ReadMemStats(&m) + remaining := headerProgress - committed + log.Info(fmt.Sprintf("[%s] Writing block bodies", logPrefix), + "block_num", committed, + "remaining", remaining, "alloc", libcommon.ByteCount(m.Alloc), "sys", libcommon.ByteCount(m.Sys), ) @@ -286,6 +422,11 @@ func UnwindBodiesStage(u *UnwindState, tx kv.RwTx, cfg BodiesCfg, ctx context.Co if err := rawdb.MakeBodiesNonCanonical(tx, u.UnwindPoint+1, badBlock /* deleteBodies */, ctx, u.LogPrefix(), logEvery); err != nil { return err } + if cfg.historyV3 { + if err := rawdb.TxNums.Truncate(tx, u.UnwindPoint+1); err != nil { + return err + } + } if err = u.Done(tx); err != nil { return err diff --git a/eth/stagedsync/stage_bodies_test.go b/eth/stagedsync/stage_bodies_test.go index 01e16c13794..e41faa95832 100644 --- a/eth/stagedsync/stage_bodies_test.go +++ b/eth/stagedsync/stage_bodies_test.go @@ -29,7 +29,7 @@ func TestBodiesUnwind(t *testing.T) { b := &types.RawBody{Transactions: [][]byte{rlpTxn, rlpTxn, rlpTxn}} for i := uint64(1); i <= 10; i++ { - err = rawdb.WriteRawBody(tx, common.Hash{byte(i)}, i, b) + _, _, err = rawdb.WriteRawBody(tx, common.Hash{byte(i)}, i, b) require.NoError(err) err = rawdb.WriteCanonicalHash(tx, common.Hash{byte(i)}, i) require.NoError(err) @@ -43,13 +43,13 @@ func TestBodiesUnwind(t *testing.T) { require.Equal(5*(3+2), int(n)) // from 0, 5 block with 3 txn in each } { - err = rawdb.MakeBodiesCanonical(tx, 5+1, ctx, "test", logEvery) // block 5 already canonical, start from next one + err = rawdb.MakeBodiesCanonical(tx, 5+1, ctx, "test", logEvery, nil) // block 5 already canonical, start from next one require.NoError(err) n, err := tx.ReadSequence(kv.EthTx) require.NoError(err) require.Equal(10*(3+2), int(n)) - err = rawdb.WriteRawBody(tx, common.Hash{11}, 11, b) + _, _, err = rawdb.WriteRawBody(tx, common.Hash{11}, 11, b) require.NoError(err) err = rawdb.WriteCanonicalHash(tx, common.Hash{11}, 11) require.NoError(err) @@ -68,7 +68,7 @@ func TestBodiesUnwind(t *testing.T) { require.NoError(err) require.Equal(5*(3+2), int(n)) // from 0, 5 block with 3 txn in each - err = rawdb.MakeBodiesCanonical(tx, 5+1, ctx, "test", logEvery) // block 5 already canonical, start from next one + err = rawdb.MakeBodiesCanonical(tx, 5+1, ctx, "test", logEvery, nil) // block 5 already canonical, start from next one require.NoError(err) n, err = tx.ReadSequence(kv.EthTx) require.NoError(err) diff --git a/eth/stagedsync/stage_execute.go b/eth/stagedsync/stage_execute.go index 0566c40aa4e..fbe3f719565 100644 --- a/eth/stagedsync/stage_execute.go +++ b/eth/stagedsync/stage_execute.go @@ -6,7 +6,10 @@ import ( "errors" "fmt" "math/big" + "os" + "os/signal" "runtime" + "syscall" "time" "github.com/c2h5oh/datasize" @@ -15,6 +18,7 @@ import ( "github.com/ledgerwatch/erigon-lib/common/length" "github.com/ledgerwatch/erigon-lib/etl" "github.com/ledgerwatch/erigon-lib/kv" + libstate "github.com/ledgerwatch/erigon-lib/state" commonold "github.com/ledgerwatch/erigon/common" ecom "github.com/ledgerwatch/erigon/common" "github.com/ledgerwatch/erigon/common/changeset" @@ -32,9 +36,11 @@ import ( "github.com/ledgerwatch/erigon/ethdb" "github.com/ledgerwatch/erigon/ethdb/olddb" "github.com/ledgerwatch/erigon/ethdb/prune" + "github.com/ledgerwatch/erigon/node/nodecfg/datadir" "github.com/ledgerwatch/erigon/params" "github.com/ledgerwatch/erigon/turbo/services" "github.com/ledgerwatch/erigon/turbo/shards" + "github.com/ledgerwatch/erigon/turbo/snapshotsync" "github.com/ledgerwatch/erigon/turbo/stages/headerdownload" "github.com/ledgerwatch/log/v3" ) @@ -49,6 +55,10 @@ type HasChangeSetWriter interface { type ChangeSetHook func(blockNum uint64, wr *state.ChangeSetWriter) +type WithSnapshots interface { + Snapshots() *snapshotsync.RoSnapshots +} + type ExecuteBlockCfg struct { db kv.RwDB batchSize datasize.ByteSize @@ -58,16 +68,21 @@ type ExecuteBlockCfg struct { engine consensus.Engine vmConfig *vm.Config badBlockHalt bool - tmpdir string stateStream bool accumulator *shards.Accumulator blockReader services.FullBlockReader hd *headerdownload.HeaderDownload + + dirs datadir.Dirs + historyV3 bool + workersCount int + genesis *core.Genesis + agg *libstate.Aggregator22 } func StageExecuteBlocksCfg( - kv kv.RwDB, - prune prune.Mode, + db kv.RwDB, + pm prune.Mode, batchSize datasize.ByteSize, changeSetHook ChangeSetHook, chainConfig *params.ChainConfig, @@ -76,24 +91,33 @@ func StageExecuteBlocksCfg( accumulator *shards.Accumulator, stateStream bool, badBlockHalt bool, - tmpdir string, + + historyV3 bool, + dirs datadir.Dirs, blockReader services.FullBlockReader, hd *headerdownload.HeaderDownload, + genesis *core.Genesis, + workersCount int, + agg *libstate.Aggregator22, ) ExecuteBlockCfg { return ExecuteBlockCfg{ - db: kv, - prune: prune, + db: db, + prune: pm, batchSize: batchSize, changeSetHook: changeSetHook, chainConfig: chainConfig, engine: engine, vmConfig: vmConfig, - tmpdir: tmpdir, + dirs: dirs, accumulator: accumulator, stateStream: stateStream, badBlockHalt: badBlockHalt, blockReader: blockReader, hd: hd, + genesis: genesis, + historyV3: historyV3, + workersCount: workersCount, + agg: agg, } } @@ -106,7 +130,6 @@ func executeBlock( writeChangesets bool, writeReceipts bool, writeCallTraces bool, - contractHasTEVM func(contractHash commonold.Hash) (bool, error), initialCycle bool, effectiveEngine consensus.Engine, ) error { @@ -126,33 +149,36 @@ func executeBlock( return vm.NewStructLogger(&vm.LogConfig{}), nil } - callTracer := calltracer.NewCallTracer(contractHasTEVM) + callTracer := calltracer.NewCallTracer() vmConfig.Debug = true vmConfig.Tracer = callTracer var receipts types.Receipts - var stateSyncReceipt *types.ReceiptForStorage + var stateSyncReceipt *types.Receipt var execRs *core.EphemeralExecResult _, isPoSa := cfg.engine.(consensus.PoSA) + isBor := cfg.chainConfig.Bor != nil getHashFn := core.GetHashFn(block.Header(), getHeader) if isPoSa { - execRs, err = core.ExecuteBlockEphemerallyForBSC(cfg.chainConfig, &vmConfig, getHashFn, cfg.engine, block, stateReader, stateWriter, epochReader{tx: tx}, chainReader{config: cfg.chainConfig, tx: tx, blockReader: cfg.blockReader}, contractHasTEVM, false, getTracer) + execRs, err = core.ExecuteBlockEphemerallyForBSC(cfg.chainConfig, &vmConfig, getHashFn, cfg.engine, block, stateReader, stateWriter, epochReader{tx: tx}, chainReader{config: cfg.chainConfig, tx: tx, blockReader: cfg.blockReader}, false, getTracer) + } else if isBor { + execRs, err = core.ExecuteBlockEphemerallyBor(cfg.chainConfig, &vmConfig, getHashFn, cfg.engine, block, stateReader, stateWriter, epochReader{tx: tx}, chainReader{config: cfg.chainConfig, tx: tx, blockReader: cfg.blockReader}, false, getTracer) } else { - execRs, err = core.ExecuteBlockEphemerally(cfg.chainConfig, &vmConfig, getHashFn, cfg.engine, block, stateReader, stateWriter, epochReader{tx: tx}, chainReader{config: cfg.chainConfig, tx: tx, blockReader: cfg.blockReader}, contractHasTEVM, false, getTracer) + execRs, err = core.ExecuteBlockEphemerally(cfg.chainConfig, &vmConfig, getHashFn, cfg.engine, block, stateReader, stateWriter, epochReader{tx: tx}, chainReader{config: cfg.chainConfig, tx: tx, blockReader: cfg.blockReader}, false, getTracer) } if err != nil { return err } receipts = execRs.Receipts - stateSyncReceipt = execRs.ReceiptForStorage + stateSyncReceipt = execRs.StateSyncReceipt if writeReceipts { if err = rawdb.AppendReceipts(tx, blockNum, receipts); err != nil { return err } - if stateSyncReceipt != nil { + if stateSyncReceipt != nil && stateSyncReceipt.Status == types.ReceiptStatusSuccessful { if err := rawdb.WriteBorReceipt(tx, block.Hash(), block.NumberU64(), stateSyncReceipt); err != nil { return err } @@ -203,7 +229,135 @@ func newStateReaderWriter( return stateReader, stateWriter, nil } -func SpawnExecuteBlocksStage(s *StageState, u Unwinder, tx kv.RwTx, toBlock uint64, ctx context.Context, cfg ExecuteBlockCfg, initialCycle bool) (err error) { +// ================ Erigon3 ================ + +func ExecBlock22(s *StageState, u Unwinder, tx kv.RwTx, toBlock uint64, ctx context.Context, cfg ExecuteBlockCfg, initialCycle bool) (err error) { + sigs := make(chan os.Signal, 1) + signal.Notify(sigs, syscall.SIGINT, syscall.SIGTERM) + execCtx, cancel := context.WithCancel(ctx) + go func() { + <-sigs + cancel() + }() + + workersCount := cfg.workersCount + //workersCount := 2 + if !initialCycle { + workersCount = 1 + } + cfg.agg.SetWorkers(cmp.Max(1, runtime.NumCPU()-1)) + + if initialCycle && s.BlockNumber == 0 { + reconstituteToBlock, found, err := reconstituteBlock(cfg.agg, cfg.db, tx) + if err != nil { + return err + } + + if found && reconstituteToBlock > s.BlockNumber+1 { + log.Info(fmt.Sprintf("[%s] Blocks execution, reconstitution", s.LogPrefix()), "from", s.BlockNumber, "to", reconstituteToBlock) + if err := ReconstituteState(execCtx, s, cfg.dirs, workersCount, cfg.batchSize, cfg.db, cfg.blockReader, log.New(), cfg.agg, cfg.engine, cfg.chainConfig, cfg.genesis); err != nil { + return err + } + } + } + + prevStageProgress, err := senderStageProgress(tx, cfg.db) + if err != nil { + return err + } + + logPrefix := s.LogPrefix() + var to = prevStageProgress + if toBlock > 0 { + to = cmp.Min(prevStageProgress, toBlock) + } + if to <= s.BlockNumber { + return nil + } + if to > s.BlockNumber+16 { + log.Info(fmt.Sprintf("[%s] Blocks execution", logPrefix), "from", s.BlockNumber, "to", to) + } + rs := state.NewState22() + if err := Exec3(execCtx, s, workersCount, cfg.batchSize, cfg.db, tx, rs, + cfg.blockReader, log.New(), cfg.agg, cfg.engine, + to, + cfg.chainConfig, cfg.genesis); err != nil { + return err + } + + return nil +} + +// reconstituteBlock - First block which is not covered by the history snapshot files +func reconstituteBlock(agg *libstate.Aggregator22, db kv.RoDB, tx kv.Tx) (n uint64, ok bool, err error) { + if tx == nil { + if err = db.View(context.Background(), func(tx kv.Tx) error { + ok, n, err = rawdb.TxNums.FindBlockNum(tx, agg.EndTxNumMinimax()) + return err + }); err != nil { + return + } + } else { + ok, n, err = rawdb.TxNums.FindBlockNum(tx, agg.EndTxNumMinimax()) + } + return +} + +func unwindExec3(u *UnwindState, s *StageState, tx kv.RwTx, ctx context.Context, cfg ExecuteBlockCfg) (err error) { + cfg.agg.SetLogPrefix(s.LogPrefix()) + rs := state.NewState22() + // unwind all txs of u.UnwindPoint block. 1 txn in begin/end of block - system txs + txNum, err := rawdb.TxNums.Min(tx, u.UnwindPoint+1) + if err != nil { + return err + } + if err := rs.Unwind(ctx, tx, txNum, cfg.agg, cfg.accumulator); err != nil { + return fmt.Errorf("State22.Unwind: %w", err) + } + if err := rs.Flush(tx); err != nil { + return fmt.Errorf("State22.Flush: %w", err) + } + + if err := rawdb.TruncateReceipts(tx, u.UnwindPoint+1); err != nil { + return fmt.Errorf("truncate receipts: %w", err) + } + if err := rawdb.TruncateBorReceipts(tx, u.UnwindPoint+1); err != nil { + return fmt.Errorf("truncate bor receipts: %w", err) + } + if err := rawdb.DeleteNewerEpochs(tx, u.UnwindPoint+1); err != nil { + return fmt.Errorf("delete newer epochs: %w", err) + } + + return nil +} + +func senderStageProgress(tx kv.Tx, db kv.RoDB) (prevStageProgress uint64, err error) { + if tx != nil { + prevStageProgress, err = stages.GetStageProgress(tx, stages.Senders) + if err != nil { + return prevStageProgress, err + } + } else { + if err = db.View(context.Background(), func(tx kv.Tx) error { + prevStageProgress, err = stages.GetStageProgress(tx, stages.Senders) + if err != nil { + return err + } + return nil + }); err != nil { + return prevStageProgress, err + } + } + return prevStageProgress, nil +} + +// ================ Erigon3 End ================ + +func SpawnExecuteBlocksStage(s *StageState, u Unwinder, tx kv.RwTx, toBlock uint64, ctx context.Context, cfg ExecuteBlockCfg, initialCycle bool, quiet bool) (err error) { + if cfg.historyV3 { + return ExecBlock22(s, u, tx, toBlock, ctx, cfg, initialCycle) + } + quit := ctx.Done() useExternalTx := tx != nil if !useExternalTx { @@ -232,7 +386,7 @@ func SpawnExecuteBlocksStage(s *StageState, u Unwinder, tx kv.RwTx, toBlock uint if to <= s.BlockNumber { return nil } - if to > s.BlockNumber+16 { + if !quiet && to > s.BlockNumber+16 { log.Info(fmt.Sprintf("[%s] Blocks execution", logPrefix), "from", s.BlockNumber, "to", to) } @@ -240,7 +394,7 @@ func SpawnExecuteBlocksStage(s *StageState, u Unwinder, tx kv.RwTx, toBlock uint var batch ethdb.DbWithPendingMutations // state is stored through ethdb batches - batch = olddb.NewHashBatch(tx, quit, cfg.tmpdir) + batch = olddb.NewHashBatch(tx, quit, cfg.dirs.Tmp) defer batch.Rollback() // changes are stored through memory buffer @@ -292,17 +446,11 @@ Loop: lastLogTx += uint64(block.Transactions().Len()) - var contractHasTEVM func(contractHash commonold.Hash) (bool, error) - - if cfg.vmConfig.EnableTEMV { - contractHasTEVM = ethdb.GetHasTEVM(tx) - } - // Incremental move of next stages depend on fully written ChangeSets, Receipts, CallTraceSet writeChangeSets := nextStagesExpectData || blockNum > cfg.prune.History.PruneTo(to) writeReceipts := nextStagesExpectData || blockNum > cfg.prune.Receipts.PruneTo(to) writeCallTraces := nextStagesExpectData || blockNum > cfg.prune.CallTraces.PruneTo(to) - if err = executeBlock(block, tx, batch, cfg, *cfg.vmConfig, writeChangeSets, writeReceipts, writeCallTraces, contractHasTEVM, initialCycle, effectiveEngine); err != nil { + if err = executeBlock(block, tx, batch, cfg, *cfg.vmConfig, writeChangeSets, writeReceipts, writeCallTraces, initialCycle, effectiveEngine); err != nil { if !errors.Is(err, context.Canceled) { log.Warn(fmt.Sprintf("[%s] Execution failed", logPrefix), "block", blockNum, "hash", block.Hash().String(), "err", err) if cfg.hd != nil { @@ -337,7 +485,7 @@ Loop: // TODO: This creates stacked up deferrals defer tx.Rollback() } - batch = olddb.NewHashBatch(tx, quit, cfg.tmpdir) + batch = olddb.NewHashBatch(tx, quit, cfg.dirs.Tmp) // TODO: This creates stacked up deferrals defer batch.Rollback() } @@ -369,7 +517,7 @@ Loop: return err } if err = batch.Commit(); err != nil { - return fmt.Errorf("batch commit: %v", err) + return fmt.Errorf("batch commit: %w", err) } if !useExternalTx { @@ -378,7 +526,9 @@ Loop: } } - log.Info(fmt.Sprintf("[%s] Completed on", logPrefix), "block", stageProgress) + if !quiet { + log.Info(fmt.Sprintf("[%s] Completed on", logPrefix), "block", stageProgress) + } return stoppedErr } @@ -411,7 +561,6 @@ func logProgress(logPrefix string, prevBlock uint64, prevTime time.Time, current } func UnwindExecutionStage(u *UnwindState, s *StageState, tx kv.RwTx, ctx context.Context, cfg ExecuteBlockCfg, initialCycle bool) (err error) { - quit := ctx.Done() if u.UnwindPoint >= s.BlockNumber { return nil } @@ -426,7 +575,7 @@ func UnwindExecutionStage(u *UnwindState, s *StageState, tx kv.RwTx, ctx context logPrefix := u.LogPrefix() log.Info(fmt.Sprintf("[%s] Unwind Execution", logPrefix), "from", s.BlockNumber, "to", u.UnwindPoint) - if err = unwindExecutionStage(u, s, tx, quit, cfg, initialCycle); err != nil { + if err = unwindExecutionStage(u, s, tx, ctx, cfg, initialCycle); err != nil { return err } if err = u.Done(tx); err != nil { @@ -441,7 +590,7 @@ func UnwindExecutionStage(u *UnwindState, s *StageState, tx kv.RwTx, ctx context return nil } -func unwindExecutionStage(u *UnwindState, s *StageState, tx kv.RwTx, quit <-chan struct{}, cfg ExecuteBlockCfg, initialCycle bool) error { +func unwindExecutionStage(u *UnwindState, s *StageState, tx kv.RwTx, ctx context.Context, cfg ExecuteBlockCfg, initialCycle bool) error { logPrefix := s.LogPrefix() stateBucket := kv.PlainState storageKeyLength := length.Addr + length.Incarnation + length.Hash @@ -461,9 +610,13 @@ func unwindExecutionStage(u *UnwindState, s *StageState, tx kv.RwTx, quit <-chan accumulator.StartChange(u.UnwindPoint, hash, txs, true) } - changes := etl.NewCollector(logPrefix, cfg.tmpdir, etl.NewOldestEntryBuffer(etl.BufferOptimalSize)) + if cfg.historyV3 { + return unwindExec3(u, s, tx, ctx, cfg) + } + + changes := etl.NewCollector(logPrefix, cfg.dirs.Tmp, etl.NewOldestEntryBuffer(etl.BufferOptimalSize)) defer changes.Close() - errRewind := changeset.RewindData(tx, s.BlockNumber, u.UnwindPoint, changes, quit) + errRewind := changeset.RewindData(tx, s.BlockNumber, u.UnwindPoint, changes, ctx.Done()) if errRewind != nil { return fmt.Errorf("getting rewind data: %w", errRewind) } @@ -523,6 +676,7 @@ func unwindExecutionStage(u *UnwindState, s *StageState, tx kv.RwTx, quit <-chan copy(address[:], k[:length.Addr]) incarnation = binary.BigEndian.Uint64(k[length.Addr:]) copy(location[:], k[length.Addr+length.Incarnation:]) + log.Debug(fmt.Sprintf("un ch st: %x, %d, %x, %x\n", address, incarnation, location, common.Copy(v))) accumulator.ChangeStorage(address, incarnation, location, common.Copy(v)) } if len(v) > 0 { @@ -536,7 +690,7 @@ func unwindExecutionStage(u *UnwindState, s *StageState, tx kv.RwTx, quit <-chan } return nil - }, etl.TransformArgs{Quit: quit}); err != nil { + }, etl.TransformArgs{Quit: ctx.Done()}); err != nil { return err } @@ -611,6 +765,9 @@ func PruneExecutionStage(s *PruneState, tx kv.RwTx, cfg ExecuteBlockCfg, ctx con if err = rawdb.PruneTable(tx, kv.Receipts, cfg.prune.Receipts.PruneTo(s.ForwardProgress), ctx, math.MaxInt32); err != nil { return err } + if err = rawdb.PruneTable(tx, kv.BorReceipts, cfg.prune.Receipts.PruneTo(s.ForwardProgress), ctx, math.MaxUint32); err != nil { + return err + } // LogIndex.Prune will read everything what not pruned here if err = rawdb.PruneTable(tx, kv.Log, cfg.prune.Receipts.PruneTo(s.ForwardProgress), ctx, math.MaxInt32); err != nil { return err diff --git a/eth/stagedsync/stage_execute_test.go b/eth/stagedsync/stage_execute_test.go index c74444114d3..e16c4a2e6eb 100644 --- a/eth/stagedsync/stage_execute_test.go +++ b/eth/stagedsync/stage_execute_test.go @@ -2,122 +2,229 @@ package stagedsync import ( "context" + "encoding/binary" + "fmt" "testing" "github.com/ledgerwatch/erigon-lib/kv" "github.com/ledgerwatch/erigon-lib/kv/memdb" + libstate "github.com/ledgerwatch/erigon-lib/state" "github.com/ledgerwatch/erigon/common/changeset" + "github.com/ledgerwatch/erigon/core/rawdb" + "github.com/ledgerwatch/erigon/core/state" + "github.com/ledgerwatch/erigon/eth/ethconfig" "github.com/ledgerwatch/erigon/eth/stagedsync/stages" "github.com/ledgerwatch/erigon/ethdb/prune" - "github.com/stretchr/testify/assert" + "github.com/ledgerwatch/erigon/params" + "github.com/stretchr/testify/require" ) -func TestUnwindExecutionStagePlainStatic(t *testing.T) { - ctx, assert := context.Background(), assert.New(t) - _, tx1 := memdb.NewTestTx(t) - _, tx2 := memdb.NewTestTx(t) - - generateBlocks(t, 1, 25, plainWriterGen(tx1), staticCodeStaticIncarnations) - generateBlocks(t, 1, 50, plainWriterGen(tx2), staticCodeStaticIncarnations) - - err := stages.SaveStageProgress(tx2, stages.Execution, 50) - assert.NoError(err) - - u := &UnwindState{ID: stages.Execution, UnwindPoint: 25} - s := &StageState{ID: stages.Execution, BlockNumber: 50} - err = UnwindExecutionStage(u, s, tx2, ctx, ExecuteBlockCfg{}, false) - assert.NoError(err) - - compareCurrentState(t, tx1, tx2, kv.PlainState, kv.PlainContractCode, kv.ContractTEVMCode) +func TestExec(t *testing.T) { + ctx, db1, db2 := context.Background(), memdb.NewTestDB(t), memdb.NewTestDB(t) + cfg := ExecuteBlockCfg{} + + t.Run("UnwindExecutionStagePlainStatic", func(t *testing.T) { + require, tx1, tx2 := require.New(t), memdb.BeginRw(t, db1), memdb.BeginRw(t, db2) + + generateBlocks(t, 1, 25, plainWriterGen(tx1), staticCodeStaticIncarnations) + generateBlocks(t, 1, 50, plainWriterGen(tx2), staticCodeStaticIncarnations) + + err := stages.SaveStageProgress(tx2, stages.Execution, 50) + require.NoError(err) + + u := &UnwindState{ID: stages.Execution, UnwindPoint: 25} + s := &StageState{ID: stages.Execution, BlockNumber: 50} + err = UnwindExecutionStage(u, s, tx2, ctx, cfg, false) + require.NoError(err) + + compareCurrentState(t, tx1, tx2, kv.PlainState, kv.PlainContractCode, kv.ContractTEVMCode) + }) + t.Run("UnwindExecutionStagePlainWithIncarnationChanges", func(t *testing.T) { + require, tx1, tx2 := require.New(t), memdb.BeginRw(t, db1), memdb.BeginRw(t, db2) + + generateBlocks(t, 1, 25, plainWriterGen(tx1), changeCodeWithIncarnations) + generateBlocks(t, 1, 50, plainWriterGen(tx2), changeCodeWithIncarnations) + + err := stages.SaveStageProgress(tx2, stages.Execution, 50) + require.NoError(err) + + u := &UnwindState{ID: stages.Execution, UnwindPoint: 25} + s := &StageState{ID: stages.Execution, BlockNumber: 50} + err = UnwindExecutionStage(u, s, tx2, ctx, cfg, false) + require.NoError(err) + + compareCurrentState(t, tx1, tx2, kv.PlainState, kv.PlainContractCode) + }) + t.Run("UnwindExecutionStagePlainWithCodeChanges", func(t *testing.T) { + t.Skip("not supported yet, to be restored") + require, tx1, tx2 := require.New(t), memdb.BeginRw(t, db1), memdb.BeginRw(t, db2) + + generateBlocks(t, 1, 25, plainWriterGen(tx1), changeCodeIndepenentlyOfIncarnations) + generateBlocks(t, 1, 50, plainWriterGen(tx2), changeCodeIndepenentlyOfIncarnations) + + err := stages.SaveStageProgress(tx2, stages.Execution, 50) + if err != nil { + t.Errorf("error while saving progress: %v", err) + } + u := &UnwindState{ID: stages.Execution, UnwindPoint: 25} + s := &StageState{ID: stages.Execution, BlockNumber: 50} + err = UnwindExecutionStage(u, s, tx2, ctx, cfg, false) + require.NoError(err) + + compareCurrentState(t, tx1, tx2, kv.PlainState, kv.PlainContractCode) + }) + + t.Run("PruneExecution", func(t *testing.T) { + require, tx := require.New(t), memdb.BeginRw(t, db1) + + generateBlocks(t, 1, 20, plainWriterGen(tx), changeCodeIndepenentlyOfIncarnations) + err := stages.SaveStageProgress(tx, stages.Execution, 20) + require.NoError(err) + + available, err := changeset.AvailableFrom(tx) + require.NoError(err) + require.Equal(uint64(1), available) + + s := &PruneState{ID: stages.Execution, ForwardProgress: 20} + // check pruning distance > than current stage progress + err = PruneExecutionStage(s, tx, ExecuteBlockCfg{prune: prune.Mode{History: prune.Distance(100), Receipts: prune.Distance(101), CallTraces: prune.Distance(200)}}, ctx, false) + require.NoError(err) + + available, err = changeset.AvailableFrom(tx) + require.NoError(err) + require.Equal(uint64(1), available) + available, err = changeset.AvailableStorageFrom(tx) + require.NoError(err) + require.Equal(uint64(1), available) + + // pruning distance, first run + err = PruneExecutionStage(s, tx, ExecuteBlockCfg{prune: prune.Mode{History: prune.Distance(5), + Receipts: prune.Distance(10), CallTraces: prune.Distance(15)}}, ctx, false) + require.NoError(err) + + available, err = changeset.AvailableFrom(tx) + require.NoError(err) + require.Equal(uint64(15), available) + available, err = changeset.AvailableStorageFrom(tx) + require.NoError(err) + require.Equal(uint64(15), available) + + // pruning distance, second run + err = PruneExecutionStage(s, tx, ExecuteBlockCfg{prune: prune.Mode{History: prune.Distance(5), + Receipts: prune.Distance(15), CallTraces: prune.Distance(25)}}, ctx, false) + require.NoError(err) + + available, err = changeset.AvailableFrom(tx) + require.NoError(err) + require.Equal(uint64(15), available) + available, err = changeset.AvailableStorageFrom(tx) + require.NoError(err) + require.Equal(uint64(15), available) + }) } -func TestUnwindExecutionStagePlainWithIncarnationChanges(t *testing.T) { - ctx, assert := context.Background(), assert.New(t) - _, tx1 := memdb.NewTestTx(t) - _, tx2 := memdb.NewTestTx(t) - - generateBlocks(t, 1, 25, plainWriterGen(tx1), changeCodeWithIncarnations) - generateBlocks(t, 1, 50, plainWriterGen(tx2), changeCodeWithIncarnations) - - err := stages.SaveStageProgress(tx2, stages.Execution, 50) - assert.NoError(err) - - u := &UnwindState{ID: stages.Execution, UnwindPoint: 25} - s := &StageState{ID: stages.Execution, BlockNumber: 50} - err = UnwindExecutionStage(u, s, tx2, ctx, ExecuteBlockCfg{}, false) - assert.NoError(err) - - compareCurrentState(t, tx1, tx2, kv.PlainState, kv.PlainContractCode) +func apply(tx kv.RwTx, agg *libstate.Aggregator22) (beforeBlock, afterBlock testGenHook, w state.StateWriter) { + agg.SetTx(tx) + rs := state.NewState22() + stateWriter := state.NewStateWriter22(rs) + return func(n, from, numberOfBlocks uint64) { + stateWriter.SetTxNum(n) + stateWriter.ResetWriteSet() + }, func(n, from, numberOfBlocks uint64) { + txTask := &state.TxTask{ + BlockNum: n, + Rules: params.TestRules, + Block: nil, + TxNum: n, + TxIndex: 0, + Final: true, + WriteLists: stateWriter.WriteSet(), + } + txTask.AccountPrevs, txTask.AccountDels, txTask.StoragePrevs, txTask.CodePrevs = stateWriter.PrevAndDels() + if err := rs.Apply(tx, txTask, agg); err != nil { + panic(err) + } + if n == from+numberOfBlocks-1 { + err := rs.Flush(tx) + if err != nil { + panic(err) + } + } + }, stateWriter } -func TestUnwindExecutionStagePlainWithCodeChanges(t *testing.T) { - t.Skip("not supported yet, to be restored") - ctx := context.Background() - _, tx1 := memdb.NewTestTx(t) - _, tx2 := memdb.NewTestTx(t) - - generateBlocks(t, 1, 25, plainWriterGen(tx1), changeCodeIndepenentlyOfIncarnations) - generateBlocks(t, 1, 50, plainWriterGen(tx2), changeCodeIndepenentlyOfIncarnations) - - err := stages.SaveStageProgress(tx2, stages.Execution, 50) - if err != nil { - t.Errorf("error while saving progress: %v", err) - } - u := &UnwindState{ID: stages.Execution, UnwindPoint: 25} - s := &StageState{ID: stages.Execution, BlockNumber: 50} - err = UnwindExecutionStage(u, s, tx2, ctx, ExecuteBlockCfg{}, false) - if err != nil { - t.Errorf("error while unwinding state: %v", err) - } - - compareCurrentState(t, tx1, tx2, kv.PlainState, kv.PlainContractCode) +func newAgg(t *testing.T) *libstate.Aggregator22 { + t.Helper() + agg, err := libstate.NewAggregator22(t.TempDir(), ethconfig.HistoryV3AggregationStep) + require.NoError(t, err) + err = agg.ReopenFiles() + require.NoError(t, err) + return agg } -func TestPruneExecution(t *testing.T) { - ctx, assert := context.Background(), assert.New(t) - _, tx := memdb.NewTestTx(t) - - generateBlocks(t, 1, 20, plainWriterGen(tx), changeCodeIndepenentlyOfIncarnations) - err := stages.SaveStageProgress(tx, stages.Execution, 20) - assert.NoError(err) - - available, err := changeset.AvailableFrom(tx) - assert.NoError(err) - assert.Equal(uint64(1), available) - - s := &PruneState{ID: stages.Execution, ForwardProgress: 20} - // check pruning distance > than current stage progress - err = PruneExecutionStage(s, tx, ExecuteBlockCfg{prune: prune.Mode{History: prune.Distance(100), Receipts: prune.Distance(101), CallTraces: prune.Distance(200)}}, ctx, false) - assert.NoError(err) - - available, err = changeset.AvailableFrom(tx) - assert.NoError(err) - assert.Equal(uint64(1), available) - available, err = changeset.AvailableStorageFrom(tx) - assert.NoError(err) - assert.Equal(uint64(1), available) - - // pruning distance, first run - err = PruneExecutionStage(s, tx, ExecuteBlockCfg{prune: prune.Mode{History: prune.Distance(5), - Receipts: prune.Distance(10), CallTraces: prune.Distance(15)}}, ctx, false) - assert.NoError(err) - - available, err = changeset.AvailableFrom(tx) - assert.NoError(err) - assert.Equal(uint64(15), available) - available, err = changeset.AvailableStorageFrom(tx) - assert.NoError(err) - assert.Equal(uint64(15), available) - - // pruning distance, second run - err = PruneExecutionStage(s, tx, ExecuteBlockCfg{prune: prune.Mode{History: prune.Distance(5), - Receipts: prune.Distance(15), CallTraces: prune.Distance(25)}}, ctx, false) - assert.NoError(err) - - available, err = changeset.AvailableFrom(tx) - assert.NoError(err) - assert.Equal(uint64(15), available) - available, err = changeset.AvailableStorageFrom(tx) - assert.NoError(err) - assert.Equal(uint64(15), available) +func TestExec22(t *testing.T) { + ctx, db1, db2 := context.Background(), memdb.NewTestDB(t), memdb.NewTestDB(t) + agg := newAgg(t) + cfg := ExecuteBlockCfg{historyV3: true, agg: agg} + + t.Run("UnwindExecutionStagePlainStatic", func(t *testing.T) { + require, tx1, tx2 := require.New(t), memdb.BeginRw(t, db1), memdb.BeginRw(t, db2) + + beforeBlock, afterBlock, stateWriter := apply(tx1, agg) + generateBlocks2(t, 1, 25, stateWriter, beforeBlock, afterBlock, staticCodeStaticIncarnations) + beforeBlock, afterBlock, stateWriter = apply(tx2, agg) + generateBlocks2(t, 1, 50, stateWriter, beforeBlock, afterBlock, staticCodeStaticIncarnations) + + err := stages.SaveStageProgress(tx2, stages.Execution, 50) + require.NoError(err) + + for i := uint64(0); i < 50; i++ { + err = rawdb.TxNums.Append(tx2, i, i) + require.NoError(err) + } + + u := &UnwindState{ID: stages.Execution, UnwindPoint: 25} + s := &StageState{ID: stages.Execution, BlockNumber: 50} + err = UnwindExecutionStage(u, s, tx2, ctx, cfg, false) + require.NoError(err) + + compareCurrentState(t, tx1, tx2, kv.PlainState, kv.PlainContractCode) + }) + t.Run("UnwindExecutionStagePlainWithIncarnationChanges", func(t *testing.T) { + t.Skip("we don't delete newer incarnations - seems it's a feature?") + require, tx1, tx2 := require.New(t), memdb.BeginRw(t, db1), memdb.BeginRw(t, db2) + + beforeBlock, afterBlock, stateWriter := apply(tx1, agg) + generateBlocks2(t, 1, 25, stateWriter, beforeBlock, afterBlock, changeCodeWithIncarnations) + beforeBlock, afterBlock, stateWriter = apply(tx2, agg) + generateBlocks2(t, 1, 50, stateWriter, beforeBlock, afterBlock, changeCodeWithIncarnations) + + err := stages.SaveStageProgress(tx2, stages.Execution, 50) + require.NoError(err) + + for i := uint64(0); i < 50; i++ { + err = rawdb.TxNums.Append(tx2, i, i) + require.NoError(err) + } + + u := &UnwindState{ID: stages.Execution, UnwindPoint: 25} + s := &StageState{ID: stages.Execution, BlockNumber: 50} + err = UnwindExecutionStage(u, s, tx2, ctx, cfg, false) + require.NoError(err) + + tx1.ForEach(kv.PlainState, nil, func(k, v []byte) error { + if len(k) > 20 { + fmt.Printf("a: inc=%d, loc=%x, v=%x\n", binary.BigEndian.Uint64(k[20:]), k[28:], v) + } + return nil + }) + tx2.ForEach(kv.PlainState, nil, func(k, v []byte) error { + if len(k) > 20 { + fmt.Printf("b: inc=%d, loc=%x, v=%x\n", binary.BigEndian.Uint64(k[20:]), k[28:], v) + } + return nil + }) + + compareCurrentState(t, tx1, tx2, kv.PlainState, kv.PlainContractCode) + }) } diff --git a/eth/stagedsync/stage_finish.go b/eth/stagedsync/stage_finish.go index c12ef0f410b..55a2cfbb8c7 100644 --- a/eth/stagedsync/stage_finish.go +++ b/eth/stagedsync/stage_finish.go @@ -12,6 +12,7 @@ import ( "github.com/ledgerwatch/erigon-lib/gointerfaces/remote" types2 "github.com/ledgerwatch/erigon-lib/gointerfaces/types" "github.com/ledgerwatch/erigon-lib/kv" + "github.com/ledgerwatch/erigon/common" common2 "github.com/ledgerwatch/erigon/common" "github.com/ledgerwatch/erigon/common/dbutils" "github.com/ledgerwatch/erigon/core/rawdb" @@ -25,17 +26,13 @@ import ( type FinishCfg struct { db kv.RwDB tmpDir string - log log.Logger - headCh chan *types.Block forkValidator *engineapi.ForkValidator } -func StageFinishCfg(db kv.RwDB, tmpDir string, logger log.Logger, headCh chan *types.Block, forkValidator *engineapi.ForkValidator) FinishCfg { +func StageFinishCfg(db kv.RwDB, tmpDir string, forkValidator *engineapi.ForkValidator) FinishCfg { return FinishCfg{ db: db, - log: logger, tmpDir: tmpDir, - headCh: headCh, forkValidator: forkValidator, } } @@ -74,14 +71,6 @@ func FinishForward(s *StageState, tx kv.RwTx, cfg FinishCfg, initialCycle bool) } } - if cfg.headCh != nil { - select { - case cfg.headCh <- rawdb.ReadCurrentBlock(tx): - default: - } - - } - if !useExternalTx { if err := tx.Commit(); err != nil { return err @@ -151,31 +140,45 @@ func NotifyNewHeaders(ctx context.Context, finishStageBeforeSync uint64, finishS } notifyFrom++ - var notifyTo uint64 = notifyFrom + var notifyTo = notifyFrom + var notifyToHash common.Hash var headersRlp [][]byte if err := tx.ForEach(kv.Headers, dbutils.EncodeBlockNumber(notifyFrom), func(k, headerRLP []byte) error { if len(headerRLP) == 0 { return nil } notifyTo = binary.BigEndian.Uint64(k) - headersRlp = append(headersRlp, common2.CopyBytes(headerRLP)) + var err error + if notifyToHash, err = rawdb.ReadCanonicalHash(tx, notifyTo); err != nil { + log.Warn("[Finish] failed checking if header is cannonical") + } + + headerHash := common.BytesToHash(k[8:]) + if notifyToHash == headerHash { + headersRlp = append(headersRlp, common2.CopyBytes(headerRLP)) + } + return libcommon.Stopped(ctx.Done()) }); err != nil { log.Error("RPC Daemon notification failed", "err", err) return err } - notifier.OnNewHeader(headersRlp) - headerTiming := time.Since(t) - t = time.Now() - if notifier.HasLogSubsriptions() { - logs, err := ReadLogs(tx, notifyFrom, isUnwind) - if err != nil { - return err + + if len(headersRlp) > 0 { + notifier.OnNewHeader(headersRlp) + headerTiming := time.Since(t) + + t = time.Now() + if notifier.HasLogSubsriptions() { + logs, err := ReadLogs(tx, notifyFrom, isUnwind) + if err != nil { + return err + } + notifier.OnLogs(logs) } - notifier.OnLogs(logs) + logTiming := time.Since(t) + log.Info("RPC Daemon notified of new headers", "from", notifyFrom-1, "to", notifyTo, "hash", notifyToHash, "header sending", headerTiming, "log sending", logTiming) } - logTiming := time.Since(t) - log.Info("RPC Daemon notified of new headers", "from", notifyFrom-1, "to", notifyTo, "header sending", headerTiming, "log sending", logTiming) return nil } diff --git a/eth/stagedsync/stage_hashstate.go b/eth/stagedsync/stage_hashstate.go index cb8c0a35a06..32ea79fc50b 100644 --- a/eth/stagedsync/stage_hashstate.go +++ b/eth/stagedsync/stage_hashstate.go @@ -5,7 +5,6 @@ import ( "context" "encoding/binary" "fmt" - "os" "runtime" "time" @@ -13,26 +12,35 @@ import ( "github.com/ledgerwatch/erigon-lib/common/length" "github.com/ledgerwatch/erigon-lib/etl" "github.com/ledgerwatch/erigon-lib/kv" + "github.com/ledgerwatch/erigon-lib/state" "github.com/ledgerwatch/erigon/common" "github.com/ledgerwatch/erigon/common/changeset" "github.com/ledgerwatch/erigon/common/dbutils" + "github.com/ledgerwatch/erigon/common/math" + "github.com/ledgerwatch/erigon/core/rawdb" "github.com/ledgerwatch/erigon/core/types/accounts" + "github.com/ledgerwatch/erigon/node/nodecfg/datadir" "github.com/ledgerwatch/log/v3" ) type HashStateCfg struct { - db kv.RwDB - tmpDir string + db kv.RwDB + dirs datadir.Dirs + + historyV3 bool + agg *state.Aggregator22 } -func StageHashStateCfg(db kv.RwDB, tmpDir string) HashStateCfg { +func StageHashStateCfg(db kv.RwDB, dirs datadir.Dirs, historyV3 bool, agg *state.Aggregator22) HashStateCfg { return HashStateCfg{ - db: db, - tmpDir: tmpDir, + db: db, + dirs: dirs, + historyV3: historyV3, + agg: agg, } } -func SpawnHashStateStage(s *StageState, tx kv.RwTx, cfg HashStateCfg, ctx context.Context) error { +func SpawnHashStateStage(s *StageState, tx kv.RwTx, cfg HashStateCfg, ctx context.Context, quiet bool) error { useExternalTx := tx != nil if !useExternalTx { var err error @@ -58,7 +66,7 @@ func SpawnHashStateStage(s *StageState, tx kv.RwTx, cfg HashStateCfg, ctx contex return fmt.Errorf("hashstate: promotion backwards from %d to %d", s.BlockNumber, to) } - if to > s.BlockNumber+16 { + if !quiet && to > s.BlockNumber+16 { log.Info(fmt.Sprintf("[%s] Promoting plain state", logPrefix), "from", s.BlockNumber, "to", to) } if s.BlockNumber == 0 { // Initial hashing of the state is performed at the previous stage @@ -66,7 +74,7 @@ func SpawnHashStateStage(s *StageState, tx kv.RwTx, cfg HashStateCfg, ctx contex return err } } else { - if err := promoteHashedStateIncrementally(logPrefix, s, s.BlockNumber, to, tx, cfg, ctx.Done()); err != nil { + if err := promoteHashedStateIncrementally(logPrefix, s.BlockNumber, to, tx, cfg, ctx.Done(), quiet); err != nil { return err } } @@ -111,8 +119,20 @@ func UnwindHashStateStage(u *UnwindState, s *StageState, tx kv.RwTx, cfg HashSta func unwindHashStateStageImpl(logPrefix string, u *UnwindState, s *StageState, tx kv.RwTx, cfg HashStateCfg, quit <-chan struct{}) error { // Currently it does not require unwinding because it does not create any Intermediate Hash records // and recomputes the state root from scratch - prom := NewPromoter(tx, quit) - prom.TempDir = cfg.tmpDir + prom := NewPromoter(tx, cfg.dirs, quit) + if cfg.historyV3 { + cfg.agg.SetTx(tx) + if err := prom.UnwindOnHistoryV3(logPrefix, cfg.agg, s.BlockNumber, u.UnwindPoint+1, false, true); err != nil { + return err + } + if err := prom.UnwindOnHistoryV3(logPrefix, cfg.agg, s.BlockNumber, u.UnwindPoint+1, false, false); err != nil { + return err + } + if err := prom.UnwindOnHistoryV3(logPrefix, cfg.agg, s.BlockNumber, u.UnwindPoint+1, true, false); err != nil { + return err + } + return nil + } if err := prom.Unwind(logPrefix, s, u, false /* storage */, true /* codes */); err != nil { return err } @@ -129,7 +149,7 @@ func PromoteHashedStateCleanly(logPrefix string, tx kv.RwTx, cfg HashStateCfg, c if err := promotePlainState( logPrefix, tx, - cfg.tmpDir, + cfg.dirs.Tmp, etl.IdentityLoadFunc, ctx.Done(), ); err != nil { @@ -141,7 +161,7 @@ func PromoteHashedStateCleanly(logPrefix string, tx kv.RwTx, cfg HashStateCfg, c tx, kv.PlainContractCode, kv.ContractCode, - cfg.tmpDir, + cfg.dirs.Tmp, keyTransformExtractFunc(transformContractCodeKey), etl.IdentityLoadFunc, etl.TransformArgs{ @@ -316,19 +336,19 @@ func (l *OldestAppearedLoad) LoadFunc(k, v []byte, table etl.CurrentTableReader, return l.innerLoadFunc(k, v, table, next) } -func NewPromoter(db kv.RwTx, quitCh <-chan struct{}) *Promoter { +func NewPromoter(db kv.RwTx, dirs datadir.Dirs, quitCh <-chan struct{}) *Promoter { return &Promoter{ - db: db, + tx: db, ChangeSetBufSize: 256 * 1024 * 1024, - TempDir: os.TempDir(), + dirs: dirs, quitCh: quitCh, } } type Promoter struct { - db kv.RwTx + tx kv.RwTx ChangeSetBufSize uint64 - TempDir string + dirs datadir.Dirs quitCh <-chan struct{} } @@ -348,7 +368,6 @@ func getExtractFunc(db kv.Tx, changeSetBucket string) etl.ExtractFunc { if err != nil { return err } - return next(dbKey, newK, value) } } @@ -475,42 +494,161 @@ func getCodeUnwindExtractFunc(db kv.Tx, changeSetBucket string) etl.ExtractFunc } } -func (p *Promoter) Promote(logPrefix string, s *StageState, from, to uint64, storage bool, codes bool) error { +func (p *Promoter) PromoteOnHistoryV3(logPrefix string, agg *state.Aggregator22, from, to uint64, storage, codes bool, quiet bool) error { + if !quiet && to > from+16 { + log.Info(fmt.Sprintf("[%s] Incremental promotion", logPrefix), "from", from, "to", to, "codes", codes, "storage", storage) + } + + txnFrom, err := rawdb.TxNums.Min(p.tx, from+1) + if err != nil { + return err + } + txnTo := uint64(math.MaxUint64) + collector := etl.NewCollector(logPrefix, p.dirs.Tmp, etl.NewSortableBuffer(etl.BufferOptimalSize)) + defer collector.Close() + if codes { + cCtx := agg.Accounts().InvertedIndex.MakeContext() + cIt := cCtx.IterateChangedKeys(txnFrom, txnTo, p.tx) + defer cIt.Close() + + for cIt.HasNext() { + k := cIt.Next(nil) + + value, err := p.tx.GetOne(kv.PlainState, k) + if err != nil { + return err + } + if len(value) == 0 { + return nil + } + incarnation, err := accounts.DecodeIncarnationFromStorage(value) + if err != nil { + return err + } + if incarnation == 0 { + return nil + } + plainKey := dbutils.PlainGenerateStoragePrefix(k, incarnation) + var codeHash []byte + codeHash, err = p.tx.GetOne(kv.PlainContractCode, plainKey) + if err != nil { + return fmt.Errorf("getFromPlainCodesAndLoad for %x, inc %d: %w", plainKey, incarnation, err) + } + if codeHash == nil { + return nil + } + newK, err := transformContractCodeKey(plainKey) + if err != nil { + return err + } + + if err := collector.Collect(newK, value); err != nil { + return err + } + } + if err := collector.Load(p.tx, kv.ContractCode, etl.IdentityLoadFunc, etl.TransformArgs{Quit: p.quitCh}); err != nil { + return err + } + return nil + } + + if storage { + sCtx := agg.Storage().InvertedIndex.MakeContext() + sIt := sCtx.IterateChangedKeys(txnFrom, txnTo, p.tx) + defer sIt.Close() + for sIt.HasNext() { + k := sIt.Next(nil) + + accBytes, err := p.tx.GetOne(kv.PlainState, k[:20]) + if err != nil { + return err + } + incarnation := uint64(1) + if len(accBytes) != 0 { + incarnation, err = accounts.DecodeIncarnationFromStorage(accBytes) + if err != nil { + return err + } + if incarnation == 0 { + return nil + } + } + plainKey := dbutils.PlainGenerateCompositeStorageKey(k[:20], incarnation, k[20:]) + newV, err := p.tx.GetOne(kv.PlainState, plainKey) + if err != nil { + return err + } + newK, err := transformPlainStateKey(plainKey) + if err != nil { + return err + } + + if err := collector.Collect(newK, newV); err != nil { + return err + } + } + if err := collector.Load(p.tx, kv.HashedStorage, etl.IdentityLoadFunc, etl.TransformArgs{Quit: p.quitCh}); err != nil { + return err + } + return nil + } + + aCtx := agg.Accounts().InvertedIndex.MakeContext() + aIt := aCtx.IterateChangedKeys(txnFrom, txnTo, p.tx) + defer aIt.Close() + for aIt.HasNext() { + k := aIt.Next(nil) + value, err := p.tx.GetOne(kv.PlainState, k) + if err != nil { + return err + } + newK, err := transformPlainStateKey(k) + if err != nil { + return err + } + + if err := collector.Collect(newK, value); err != nil { + return err + } + } + if err := collector.Load(p.tx, kv.HashedAccounts, etl.IdentityLoadFunc, etl.TransformArgs{Quit: p.quitCh}); err != nil { + return err + } + return nil +} +func (p *Promoter) Promote(logPrefix string, from, to uint64, storage, codes bool, quiet bool) error { var changeSetBucket string if storage { changeSetBucket = kv.StorageChangeSet } else { changeSetBucket = kv.AccountChangeSet } - if to > from+16 { - log.Info(fmt.Sprintf("[%s] Incremental promotion started", logPrefix), "from", from, "to", to, "codes", codes, "csbucket", changeSetBucket) + if !quiet && to > from+16 { + log.Info(fmt.Sprintf("[%s] Incremental promotion", logPrefix), "from", from, "to", to, "codes", codes, "csbucket", changeSetBucket) } startkey := dbutils.EncodeBlockNumber(from + 1) - var l OldestAppearedLoad - l.innerLoadFunc = etl.IdentityLoadFunc - var loadBucket string var extract etl.ExtractFunc if codes { loadBucket = kv.ContractCode - extract = getExtractCode(p.db, changeSetBucket) + extract = getExtractCode(p.tx, changeSetBucket) } else { if storage { loadBucket = kv.HashedStorage } else { loadBucket = kv.HashedAccounts } - extract = getExtractFunc(p.db, changeSetBucket) + extract = getExtractFunc(p.tx, changeSetBucket) } if err := etl.Transform( logPrefix, - p.db, + p.tx, changeSetBucket, loadBucket, - p.TempDir, + p.dirs.Tmp, extract, etl.IdentityLoadFunc, etl.TransformArgs{ @@ -525,6 +663,112 @@ func (p *Promoter) Promote(logPrefix string, s *StageState, from, to uint64, sto return nil } +func (p *Promoter) UnwindOnHistoryV3(logPrefix string, agg *state.Aggregator22, unwindFrom, unwindTo uint64, storage, codes bool) error { + log.Info(fmt.Sprintf("[%s] Unwinding started", logPrefix), "from", unwindFrom, "to", unwindTo, "storage", storage, "codes", codes) + + txnFrom, err := rawdb.TxNums.Min(p.tx, unwindTo) + if err != nil { + return err + } + txnTo := uint64(math.MaxUint64) + collector := etl.NewCollector(logPrefix, p.dirs.Tmp, etl.NewOldestEntryBuffer(etl.BufferOptimalSize)) + defer collector.Close() + var k, v []byte + + acc := accounts.NewAccount() + if codes { + it := agg.Accounts().MakeContext().IterateChanged(txnFrom, txnTo, p.tx) + defer it.Close() + for it.HasNext() { + k, v = it.Next(k[:0], v[:0]) + if len(v) == 0 { + continue + } + if err := accounts.Deserialise2(&acc, v); err != nil { + return err + } + + incarnation := acc.Incarnation + if incarnation == 0 { + continue + } + plainKey := dbutils.PlainGenerateStoragePrefix(k, incarnation) + codeHash, err := p.tx.GetOne(kv.PlainContractCode, plainKey) + if err != nil { + return fmt.Errorf("getCodeUnwindExtractFunc: %w, key=%x", err, plainKey) + } + newK, err := transformContractCodeKey(plainKey) + if err != nil { + return err + } + if err = collector.Collect(newK, codeHash); err != nil { + return err + } + } + return collector.Load(p.tx, kv.ContractCode, etl.IdentityLoadFunc, etl.TransformArgs{Quit: p.quitCh}) + } + + if storage { + it := agg.Storage().MakeContext().IterateChanged(txnFrom, txnTo, p.tx) + defer it.Close() + for it.HasNext() { + k, v = it.Next(k[:0], v[:0]) + val, err := p.tx.GetOne(kv.PlainState, k[:20]) + if err != nil { + return err + } + incarnation := uint64(1) + if len(val) != 0 { + oldInc, _ := accounts.DecodeIncarnationFromStorage(val) + incarnation = oldInc + } + plainKey := dbutils.PlainGenerateCompositeStorageKey(k[:20], incarnation, k[20:]) + newK, err := transformPlainStateKey(plainKey) + if err != nil { + return err + } + if err := collector.Collect(newK, v); err != nil { + return err + } + } + return collector.Load(p.tx, kv.HashedStorage, etl.IdentityLoadFunc, etl.TransformArgs{Quit: p.quitCh}) + } + + it := agg.Accounts().MakeContext().IterateChanged(txnFrom, txnTo, p.tx) + defer it.Close() + for it.HasNext() { + k, v = it.Next(k[:0], v[:0]) + newK, err := transformPlainStateKey(k) + if err != nil { + return err + } + + if len(v) == 0 { + if err = collector.Collect(newK, nil); err != nil { + return err + } + continue + } + if err := accounts.Deserialise2(&acc, v); err != nil { + return err + } + if acc.Incarnation > 0 && acc.IsEmptyCodeHash() { + if codeHash, err := p.tx.GetOne(kv.ContractCode, dbutils.GenerateStoragePrefix(newK, acc.Incarnation)); err == nil { + copy(acc.CodeHash[:], codeHash) + } else { + return fmt.Errorf("adjusting codeHash for ks %x, inc %d: %w", newK, acc.Incarnation, err) + } + } + + value := make([]byte, acc.EncodingLengthForStorage()) + acc.EncodeForStorage(value) + if err := collector.Collect(newK, value); err != nil { + return err + } + } + return collector.Load(p.tx, kv.HashedAccounts, etl.IdentityLoadFunc, etl.TransformArgs{Quit: p.quitCh}) +} + func (p *Promoter) Unwind(logPrefix string, s *StageState, u *UnwindState, storage bool, codes bool) error { var changeSetBucket string if storage { @@ -540,29 +784,28 @@ func (p *Promoter) Unwind(logPrefix string, s *StageState, u *UnwindState, stora startkey := dbutils.EncodeBlockNumber(to + 1) var l OldestAppearedLoad + l.innerLoadFunc = etl.IdentityLoadFunc var loadBucket string var extractFunc etl.ExtractFunc if codes { loadBucket = kv.ContractCode - extractFunc = getCodeUnwindExtractFunc(p.db, changeSetBucket) - l.innerLoadFunc = etl.IdentityLoadFunc + extractFunc = getCodeUnwindExtractFunc(p.tx, changeSetBucket) } else { - l.innerLoadFunc = etl.IdentityLoadFunc if storage { loadBucket = kv.HashedStorage extractFunc = getUnwindExtractStorage(changeSetBucket) } else { loadBucket = kv.HashedAccounts - extractFunc = getUnwindExtractAccounts(p.db, changeSetBucket) + extractFunc = getUnwindExtractAccounts(p.tx, changeSetBucket) } } return etl.Transform( logPrefix, - p.db, + p.tx, changeSetBucket, loadBucket, - p.TempDir, + p.dirs.Tmp, extractFunc, l.LoadFunc, etl.TransformArgs{ @@ -579,16 +822,29 @@ func (p *Promoter) Unwind(logPrefix string, s *StageState, u *UnwindState, stora ) } -func promoteHashedStateIncrementally(logPrefix string, s *StageState, from, to uint64, db kv.RwTx, cfg HashStateCfg, quit <-chan struct{}) error { - prom := NewPromoter(db, quit) - prom.TempDir = cfg.tmpDir - if err := prom.Promote(logPrefix, s, from, to, false /* storage */, true /* codes */); err != nil { +func promoteHashedStateIncrementally(logPrefix string, from, to uint64, tx kv.RwTx, cfg HashStateCfg, quit <-chan struct{}, quiet bool) error { + prom := NewPromoter(tx, cfg.dirs, quit) + if cfg.historyV3 { + cfg.agg.SetTx(tx) + if err := prom.PromoteOnHistoryV3(logPrefix, cfg.agg, from, to, false, true, quiet); err != nil { + return err + } + if err := prom.PromoteOnHistoryV3(logPrefix, cfg.agg, from, to, false, false, quiet); err != nil { + return err + } + if err := prom.PromoteOnHistoryV3(logPrefix, cfg.agg, from, to, true, false, quiet); err != nil { + return err + } + return nil + } + + if err := prom.Promote(logPrefix, from, to, false, true, quiet); err != nil { return err } - if err := prom.Promote(logPrefix, s, from, to, false /* storage */, false /* codes */); err != nil { + if err := prom.Promote(logPrefix, from, to, false, false, quiet); err != nil { return err } - if err := prom.Promote(logPrefix, s, from, to, true /* storage */, false /* codes */); err != nil { + if err := prom.Promote(logPrefix, from, to, true, false, quiet); err != nil { return err } return nil diff --git a/eth/stagedsync/stage_hashstate_test.go b/eth/stagedsync/stage_hashstate_test.go index a0486c1db98..89b5a4210c2 100644 --- a/eth/stagedsync/stage_hashstate_test.go +++ b/eth/stagedsync/stage_hashstate_test.go @@ -7,19 +7,22 @@ import ( "github.com/ledgerwatch/erigon-lib/kv" "github.com/ledgerwatch/erigon-lib/kv/memdb" + "github.com/ledgerwatch/erigon/node/nodecfg/datadir" "github.com/stretchr/testify/require" libcommon "github.com/ledgerwatch/erigon-lib/common" ) func TestPromoteHashedStateClearState(t *testing.T) { + dirs := datadir.New(t.TempDir()) + historyV3 := false _, tx1 := memdb.NewTestTx(t) db2, tx2 := memdb.NewTestTx(t) generateBlocks(t, 1, 50, hashedWriterGen(tx1), changeCodeWithIncarnations) generateBlocks(t, 1, 50, plainWriterGen(tx2), changeCodeWithIncarnations) - err := PromoteHashedStateCleanly("logPrefix", tx2, StageHashStateCfg(db2, t.TempDir()), context.Background()) + err := PromoteHashedStateCleanly("logPrefix", tx2, StageHashStateCfg(db2, dirs, historyV3, nil), context.Background()) if err != nil { t.Errorf("error while promoting state: %v", err) } @@ -28,13 +31,15 @@ func TestPromoteHashedStateClearState(t *testing.T) { } func TestPromoteHashedStateIncremental(t *testing.T) { + dirs := datadir.New(t.TempDir()) + historyV3 := false _, tx1 := memdb.NewTestTx(t) db2, tx2 := memdb.NewTestTx(t) generateBlocks(t, 1, 50, hashedWriterGen(tx1), changeCodeWithIncarnations) generateBlocks(t, 1, 50, plainWriterGen(tx2), changeCodeWithIncarnations) - cfg := StageHashStateCfg(db2, t.TempDir()) + cfg := StageHashStateCfg(db2, dirs, historyV3, nil) err := PromoteHashedStateCleanly("logPrefix", tx2, cfg, context.Background()) if err != nil { t.Errorf("error while promoting state: %v", err) @@ -43,7 +48,7 @@ func TestPromoteHashedStateIncremental(t *testing.T) { generateBlocks(t, 51, 50, hashedWriterGen(tx1), changeCodeWithIncarnations) generateBlocks(t, 51, 50, plainWriterGen(tx2), changeCodeWithIncarnations) - err = promoteHashedStateIncrementally("logPrefix", &StageState{BlockNumber: 50}, 50, 101, tx2, cfg, nil) + err = promoteHashedStateIncrementally("logPrefix", 50, 101, tx2, cfg, nil /* quit */, false /* quiet */) if err != nil { t.Errorf("error while promoting state: %v", err) } @@ -52,6 +57,8 @@ func TestPromoteHashedStateIncremental(t *testing.T) { } func TestPromoteHashedStateIncrementalMixed(t *testing.T) { + dirs := datadir.New(t.TempDir()) + historyV3 := false _, tx1 := memdb.NewTestTx(t) db2, tx2 := memdb.NewTestTx(t) @@ -59,7 +66,7 @@ func TestPromoteHashedStateIncrementalMixed(t *testing.T) { generateBlocks(t, 1, 50, hashedWriterGen(tx2), changeCodeWithIncarnations) generateBlocks(t, 51, 50, plainWriterGen(tx2), changeCodeWithIncarnations) - err := promoteHashedStateIncrementally("logPrefix", &StageState{}, 50, 101, tx2, StageHashStateCfg(db2, t.TempDir()), nil) + err := promoteHashedStateIncrementally("logPrefix", 50, 101, tx2, StageHashStateCfg(db2, dirs, historyV3, nil), nil /* quit */, false /* quiet */) if err != nil { t.Errorf("error while promoting state: %v", err) } @@ -67,19 +74,21 @@ func TestPromoteHashedStateIncrementalMixed(t *testing.T) { } func TestUnwindHashed(t *testing.T) { + dirs := datadir.New(t.TempDir()) + historyV3 := false _, tx1 := memdb.NewTestTx(t) db2, tx2 := memdb.NewTestTx(t) generateBlocks(t, 1, 50, hashedWriterGen(tx1), changeCodeWithIncarnations) generateBlocks(t, 1, 50, plainWriterGen(tx2), changeCodeWithIncarnations) - err := PromoteHashedStateCleanly("logPrefix", tx2, StageHashStateCfg(db2, t.TempDir()), context.Background()) + err := PromoteHashedStateCleanly("logPrefix", tx2, StageHashStateCfg(db2, dirs, historyV3, nil), context.Background()) if err != nil { t.Errorf("error while promoting state: %v", err) } u := &UnwindState{UnwindPoint: 50} s := &StageState{BlockNumber: 100} - err = unwindHashStateStageImpl("logPrefix", u, s, tx2, StageHashStateCfg(db2, t.TempDir()), nil) + err = unwindHashStateStageImpl("logPrefix", u, s, tx2, StageHashStateCfg(db2, dirs, historyV3, nil), nil) if err != nil { t.Errorf("error while unwind state: %v", err) } @@ -88,6 +97,7 @@ func TestUnwindHashed(t *testing.T) { } func TestPromoteIncrementallyShutdown(t *testing.T) { + historyV3 := false tt := []struct { name string @@ -101,6 +111,7 @@ func TestPromoteIncrementallyShutdown(t *testing.T) { for _, tc := range tt { tc := tc t.Run(tc.name, func(t *testing.T) { + dirs := datadir.New(t.TempDir()) ctx, cancel := context.WithCancel(context.Background()) defer cancel() if tc.cancelFuncExec { @@ -108,7 +119,7 @@ func TestPromoteIncrementallyShutdown(t *testing.T) { } db, tx := memdb.NewTestTx(t) generateBlocks(t, 1, 10, plainWriterGen(tx), changeCodeWithIncarnations) - if err := promoteHashedStateIncrementally("logPrefix", &StageState{BlockNumber: 1}, 1, 10, tx, StageHashStateCfg(db, t.TempDir()), ctx.Done()); !errors.Is(err, tc.errExp) { + if err := promoteHashedStateIncrementally("logPrefix", 1, 10, tx, StageHashStateCfg(db, dirs, historyV3, nil), ctx.Done(), false /* quiet */); !errors.Is(err, tc.errExp) { t.Errorf("error does not match expected error while shutdown promoteHashedStateIncrementally, got: %v, expected: %v", err, tc.errExp) } }) @@ -118,6 +129,7 @@ func TestPromoteIncrementallyShutdown(t *testing.T) { } func TestPromoteHashedStateCleanlyShutdown(t *testing.T) { + historyV3 := false tt := []struct { name string @@ -131,6 +143,7 @@ func TestPromoteHashedStateCleanlyShutdown(t *testing.T) { for _, tc := range tt { tc := tc t.Run(tc.name, func(t *testing.T) { + dirs := datadir.New(t.TempDir()) ctx, cancel := context.WithCancel(context.Background()) defer cancel() @@ -142,7 +155,7 @@ func TestPromoteHashedStateCleanlyShutdown(t *testing.T) { generateBlocks(t, 1, 10, plainWriterGen(tx), changeCodeWithIncarnations) - if err := PromoteHashedStateCleanly("logPrefix", tx, StageHashStateCfg(db, t.TempDir()), ctx); !errors.Is(err, tc.errExp) { + if err := PromoteHashedStateCleanly("logPrefix", tx, StageHashStateCfg(db, dirs, historyV3, nil), ctx); !errors.Is(err, tc.errExp) { t.Errorf("error does not match expected error while shutdown promoteHashedStateCleanly , got: %v, expected: %v", err, tc.errExp) } @@ -151,7 +164,7 @@ func TestPromoteHashedStateCleanlyShutdown(t *testing.T) { } func TestUnwindHashStateShutdown(t *testing.T) { - + historyV3 := false tt := []struct { name string cancelFuncExec bool @@ -164,7 +177,7 @@ func TestUnwindHashStateShutdown(t *testing.T) { for _, tc := range tt { tc := tc t.Run(tc.name, func(t *testing.T) { - + dirs := datadir.New(t.TempDir()) ctx, cancel := context.WithCancel(context.Background()) defer cancel() @@ -175,7 +188,7 @@ func TestUnwindHashStateShutdown(t *testing.T) { db, tx := memdb.NewTestTx(t) generateBlocks(t, 1, 10, plainWriterGen(tx), changeCodeWithIncarnations) - cfg := StageHashStateCfg(db, t.TempDir()) + cfg := StageHashStateCfg(db, dirs, historyV3, nil) err := PromoteHashedStateCleanly("logPrefix", tx, cfg, ctx) if tc.cancelFuncExec { require.ErrorIs(t, err, libcommon.ErrStopped) diff --git a/eth/stagedsync/stage_headers.go b/eth/stagedsync/stage_headers.go index 36b033bdbf6..e27140a119a 100644 --- a/eth/stagedsync/stage_headers.go +++ b/eth/stagedsync/stage_headers.go @@ -3,18 +3,14 @@ package stagedsync import ( "context" "encoding/binary" - "errors" "fmt" "math/big" "runtime" "time" "github.com/c2h5oh/datasize" - "github.com/holiman/uint256" libcommon "github.com/ledgerwatch/erigon-lib/common" - "github.com/ledgerwatch/erigon-lib/common/cmp" "github.com/ledgerwatch/erigon-lib/etl" - proto_downloader "github.com/ledgerwatch/erigon-lib/gointerfaces/downloader" "github.com/ledgerwatch/erigon-lib/gointerfaces/remote" "github.com/ledgerwatch/erigon-lib/kv" "github.com/ledgerwatch/erigon/common" @@ -22,13 +18,14 @@ import ( "github.com/ledgerwatch/erigon/common/math" "github.com/ledgerwatch/erigon/core/rawdb" "github.com/ledgerwatch/erigon/core/types" + "github.com/ledgerwatch/erigon/eth/stagedsync/stages" "github.com/ledgerwatch/erigon/ethdb/privateapi" "github.com/ledgerwatch/erigon/params" "github.com/ledgerwatch/erigon/rlp" "github.com/ledgerwatch/erigon/turbo/engineapi" "github.com/ledgerwatch/erigon/turbo/services" + "github.com/ledgerwatch/erigon/turbo/shards" "github.com/ledgerwatch/erigon/turbo/snapshotsync" - "github.com/ledgerwatch/erigon/turbo/snapshotsync/snapcfg" "github.com/ledgerwatch/erigon/turbo/stages/bodydownload" "github.com/ledgerwatch/erigon/turbo/stages/headerdownload" "github.com/ledgerwatch/log/v3" @@ -48,15 +45,12 @@ type HeadersCfg struct { penalize func(context.Context, []headerdownload.PenaltyItem) batchSize datasize.ByteSize noP2PDiscovery bool - memoryOverlay bool tmpdir string - snapshots *snapshotsync.RoSnapshots - snapshotDownloader proto_downloader.DownloaderClient - blockReader services.FullBlockReader - dbEventNotifier snapshotsync.DBEventNotifier - forkValidator *engineapi.ForkValidator - notifications *Notifications + snapshots *snapshotsync.RoSnapshots + blockReader services.FullBlockReader + forkValidator *engineapi.ForkValidator + notifications *shards.Notifications } func StageHeadersCfg( @@ -69,32 +63,26 @@ func StageHeadersCfg( penalize func(context.Context, []headerdownload.PenaltyItem), batchSize datasize.ByteSize, noP2PDiscovery bool, - memoryOverlay bool, snapshots *snapshotsync.RoSnapshots, - snapshotDownloader proto_downloader.DownloaderClient, blockReader services.FullBlockReader, tmpdir string, - dbEventNotifier snapshotsync.DBEventNotifier, - notifications *Notifications, + notifications *shards.Notifications, forkValidator *engineapi.ForkValidator) HeadersCfg { return HeadersCfg{ - db: db, - hd: headerDownload, - bodyDownload: bodyDownload, - chainConfig: chainConfig, - headerReqSend: headerReqSend, - announceNewHashes: announceNewHashes, - penalize: penalize, - batchSize: batchSize, - tmpdir: tmpdir, - noP2PDiscovery: noP2PDiscovery, - snapshots: snapshots, - snapshotDownloader: snapshotDownloader, - blockReader: blockReader, - dbEventNotifier: dbEventNotifier, - forkValidator: forkValidator, - notifications: notifications, - memoryOverlay: memoryOverlay, + db: db, + hd: headerDownload, + bodyDownload: bodyDownload, + chainConfig: chainConfig, + headerReqSend: headerReqSend, + announceNewHashes: announceNewHashes, + penalize: penalize, + batchSize: batchSize, + tmpdir: tmpdir, + noP2PDiscovery: noP2PDiscovery, + snapshots: snapshots, + blockReader: blockReader, + forkValidator: forkValidator, + notifications: notifications, } } @@ -116,30 +104,41 @@ func SpawnStageHeaders( } defer tx.Rollback() } - if err := DownloadAndIndexSnapshotsIfNeed(s, ctx, tx, cfg, initialCycle); err != nil { - return err + if initialCycle && cfg.snapshots != nil && cfg.snapshots.Cfg().Enabled { + if err := cfg.hd.AddHeadersFromSnapshot(tx, cfg.snapshots.BlocksAvailable(), cfg.blockReader); err != nil { + return err + } } - var blockNumber uint64 + var preProgress uint64 if s == nil { - blockNumber = 0 + preProgress = 0 } else { - blockNumber = s.BlockNumber + preProgress = s.BlockNumber } + notBorAndParlia := cfg.chainConfig.Bor == nil && cfg.chainConfig.Parlia == nil + unsettledForkChoice, headHeight := cfg.hd.GetUnsettledForkChoice() - if unsettledForkChoice != nil { // some work left to do after unwind + if notBorAndParlia && unsettledForkChoice != nil { // some work left to do after unwind return finishHandlingForkChoice(unsettledForkChoice, headHeight, s, tx, cfg, useExternalTx) } - transitionedToPoS, err := rawdb.Transitioned(tx, blockNumber, cfg.chainConfig.TerminalTotalDifficulty) - if err != nil { - return err + transitionedToPoS := cfg.chainConfig.TerminalTotalDifficultyPassed + if notBorAndParlia && !transitionedToPoS { + var err error + transitionedToPoS, err = rawdb.Transitioned(tx, preProgress, cfg.chainConfig.TerminalTotalDifficulty) + if err != nil { + return err + } + if transitionedToPoS { + cfg.hd.SetFirstPoSHeight(preProgress) + } } if transitionedToPoS { libcommon.SafeClose(cfg.hd.QuitPoWMining) - return HeadersPOS(s, u, ctx, tx, cfg, initialCycle, test, useExternalTx) + return HeadersPOS(s, u, ctx, tx, cfg, initialCycle, test, useExternalTx, preProgress) } else { return HeadersPOW(s, u, ctx, tx, cfg, initialCycle, test, useExternalTx) } @@ -156,18 +155,24 @@ func HeadersPOS( initialCycle bool, test bool, useExternalTx bool, + preProgress uint64, ) error { if initialCycle { - // Let execution and other stages to finish before waiting for CL - return nil + // Let execution and other stages to finish before waiting for CL, but only if other stages aren't ahead + if execProgress, err := stages.GetStageProgress(tx, stages.Execution); err != nil { + return err + } else if s.BlockNumber >= execProgress { + return nil + } } - log.Info(fmt.Sprintf("[%s] Waiting for Beacon Chain...", s.LogPrefix())) - - onlyNewRequests := cfg.hd.PosStatus() == headerdownload.Syncing - interrupt, requestId, requestWithStatus := cfg.hd.BeaconRequestList.WaitForRequest(onlyNewRequests, test) - cfg.hd.SetPOSSync(true) + syncing := cfg.hd.PosStatus() != headerdownload.Idle + if !syncing { + log.Info(fmt.Sprintf("[%s] Waiting for Consensus Layer...", s.LogPrefix())) + } + interrupt, requestId, requestWithStatus := cfg.hd.BeaconRequestList.WaitForRequest(syncing, test) + cfg.hd.SetHeaderReader(&chainReader{config: &cfg.chainConfig, tx: tx, blockReader: cfg.blockReader}) headerInserter := headerdownload.NewHeaderInserter(s.LogPrefix(), nil, s.BlockNumber, cfg.blockReader) @@ -195,7 +200,7 @@ func HeadersPOS( var payloadStatus *engineapi.PayloadStatus if forkChoiceInsteadOfNewPayload { - payloadStatus, err = startHandlingForkChoice(forkChoiceMessage, requestStatus, requestId, s, u, ctx, tx, cfg, test, headerInserter) + payloadStatus, err = startHandlingForkChoice(forkChoiceMessage, requestStatus, requestId, s, u, ctx, tx, cfg, test, headerInserter, preProgress) } else { payloadMessage := request.(*types.Block) payloadStatus, err = handleNewPayload(payloadMessage, requestStatus, requestId, s, ctx, tx, cfg, test, headerInserter) @@ -276,10 +281,9 @@ func startHandlingForkChoice( cfg HeadersCfg, test bool, headerInserter *headerdownload.HeaderInserter, + preProgress uint64, ) (*engineapi.PayloadStatus, error) { - if cfg.memoryOverlay { - defer cfg.forkValidator.ClearWithUnwind(tx, cfg.notifications.Accumulator, cfg.notifications.StateChangesConsumer) - } + defer cfg.forkValidator.ClearWithUnwind(tx, cfg.notifications.Accumulator, cfg.notifications.StateChangesConsumer) headerHash := forkChoice.HeadBlockHash log.Debug(fmt.Sprintf("[%s] Handling fork choice", s.LogPrefix()), "headerHash", headerHash) @@ -313,12 +317,11 @@ func startHandlingForkChoice( } if header == nil { - log.Info(fmt.Sprintf("[%s] Fork choice missing header with hash %x", s.LogPrefix(), headerHash)) + log.Debug(fmt.Sprintf("[%s] Fork choice: need to download header with hash %x", s.LogPrefix(), headerHash)) if test { cfg.hd.BeaconRequestList.Remove(requestId) } else { - cfg.hd.SetPoSDownloaderTip(headerHash) - schedulePoSDownload(requestId, headerHash, 0 /* header height is unknown, setting to 0 */, s, cfg) + schedulePoSDownload(requestId, headerHash, 0 /* header height is unknown, setting to 0 */, headerHash, s, cfg) } return &engineapi.PayloadStatus{Status: remote.EngineStatus_SYNCING}, nil } @@ -327,9 +330,9 @@ func startHandlingForkChoice( headerNumber := header.Number.Uint64() - if cfg.memoryOverlay && headerHash == cfg.forkValidator.ExtendingForkHeadHash() { - log.Info("Flushing in-memory state") - if err := cfg.forkValidator.FlushExtendingFork(tx); err != nil { + if headerHash == cfg.forkValidator.ExtendingForkHeadHash() { + log.Info(fmt.Sprintf("[%s] Fork choice update: flushing in-memory state (built by previous newPayload)", s.LogPrefix())) + if err := cfg.forkValidator.FlushExtendingFork(tx, cfg.notifications.Accumulator); err != nil { return nil, err } cfg.hd.BeaconRequestList.Remove(requestId) @@ -353,21 +356,56 @@ func startHandlingForkChoice( if err != nil { return nil, err } + if forkingPoint < preProgress { - log.Info(fmt.Sprintf("[%s] Fork choice re-org", s.LogPrefix()), "headerNumber", headerNumber, "forkingPoint", forkingPoint) + log.Info(fmt.Sprintf("[%s] Fork choice: re-org", s.LogPrefix()), "goal", headerNumber, "from", preProgress, "unwind to", forkingPoint) - if requestStatus == engineapi.New { - if headerNumber-forkingPoint <= ShortPoSReorgThresholdBlocks { - // TODO(yperbasis): what if some bodies are missing and we have to download them? - cfg.hd.SetPendingPayloadHash(headerHash) - } else { - cfg.hd.PayloadStatusCh <- engineapi.PayloadStatus{Status: remote.EngineStatus_SYNCING} + if requestStatus == engineapi.New { + if headerNumber-forkingPoint <= ShortPoSReorgThresholdBlocks { + // TODO(yperbasis): what if some bodies are missing and we have to download them? + cfg.hd.SetPendingPayloadHash(headerHash) + } else { + cfg.hd.PayloadStatusCh <- engineapi.PayloadStatus{Status: remote.EngineStatus_SYNCING} + } } - } - u.UnwindTo(forkingPoint, common.Hash{}) + u.UnwindTo(forkingPoint, common.Hash{}) - cfg.hd.SetUnsettledForkChoice(forkChoice, headerNumber) + cfg.hd.SetUnsettledForkChoice(forkChoice, headerNumber) + } else { + // Extend canonical chain by the new header + log.Info(fmt.Sprintf("[%s] Fork choice: chain extension", s.LogPrefix()), "from", preProgress, "to", headerNumber) + logEvery := time.NewTicker(logInterval) + defer logEvery.Stop() + if err = fixCanonicalChain(s.LogPrefix(), logEvery, headerNumber, headerHash, tx, cfg.blockReader); err != nil { + return nil, err + } + if err = rawdb.WriteHeadHeaderHash(tx, forkChoice.HeadBlockHash); err != nil { + return nil, err + } + + canonical, err := writeForkChoiceHashes(forkChoice, s, tx, cfg) + if err != nil { + return nil, err + } + + if err := s.Update(tx, headerNumber); err != nil { + return nil, err + } + // Referesh currentHeadHash + currentHeadHash = rawdb.ReadHeadHeaderHash(tx) + + if canonical { + return &engineapi.PayloadStatus{ + Status: remote.EngineStatus_VALID, + LatestValidHash: currentHeadHash, + }, nil + } else { + return &engineapi.PayloadStatus{ + CriticalError: &privateapi.InvalidForkchoiceStateErr, + }, nil + } + } return nil, nil } @@ -436,7 +474,7 @@ func handleNewPayload( headerNumber := header.Number.Uint64() headerHash := block.Hash() - log.Debug(fmt.Sprintf("[%s] Handling new payload", s.LogPrefix()), "height", headerNumber, "hash", headerHash) + log.Info(fmt.Sprintf("[%s] Handling new payload", s.LogPrefix()), "height", headerNumber, "hash", headerHash) cfg.hd.UpdateTopSeenHeightPoS(headerNumber) parent, err := cfg.blockReader.HeaderByHash(ctx, tx, header.ParentHash) @@ -444,13 +482,14 @@ func handleNewPayload( return nil, err } if parent == nil { - log.Info(fmt.Sprintf("[%s] New payload missing parent", s.LogPrefix())) + log.Debug(fmt.Sprintf("[%s] New payload: need to download parent", s.LogPrefix()), "height", headerNumber, "hash", headerHash, "parentHash", header.ParentHash) if test { cfg.hd.BeaconRequestList.Remove(requestId) return &engineapi.PayloadStatus{Status: remote.EngineStatus_SYNCING}, nil } - cfg.hd.SetPoSDownloaderTip(headerHash) - schedulePoSDownload(requestId, header.ParentHash, headerNumber-1, s, cfg) + if !schedulePoSDownload(requestId, header.ParentHash, headerNumber-1, headerHash /* downloaderTip */, s, cfg) { + return &engineapi.PayloadStatus{Status: remote.EngineStatus_SYNCING}, nil + } currentHeadNumber := rawdb.ReadCurrentBlockNumber(tx) if currentHeadNumber != nil && math.AbsoluteDifference(*currentHeadNumber, headerNumber) < 32 { // We try waiting until we finish downloading the PoS blocks if the distance from the head is enough, @@ -503,9 +542,8 @@ func verifyAndSaveNewPoSHeader( headerNumber := header.Number.Uint64() headerHash := block.Hash() - bad, lastValidHash := cfg.hd.IsBadHeaderPoS(header.ParentHash) + bad, lastValidHash := cfg.hd.IsBadHeaderPoS(headerHash) if bad { - cfg.hd.ReportBadHeaderPoS(headerHash, lastValidHash) return &engineapi.PayloadStatus{Status: remote.EngineStatus_INVALID, LatestValidHash: lastValidHash}, false, nil } @@ -521,88 +559,49 @@ func verifyAndSaveNewPoSHeader( currentHeadHash := rawdb.ReadHeadHeaderHash(tx) - forkingPoint, err := forkingPoint(ctx, tx, headerInserter, cfg.blockReader, header) - if err != nil { + extendingHash := cfg.forkValidator.ExtendingForkHeadHash() + extendCanonical := (extendingHash == common.Hash{} && header.ParentHash == currentHeadHash) || extendingHash == header.ParentHash + status, latestValidHash, validationError, criticalError := cfg.forkValidator.ValidatePayload(tx, header, block.RawBody(), extendCanonical) + if criticalError != nil { + return nil, false, criticalError + } + success = validationError == nil + if !success { + log.Warn("Validation failed for header", "hash", headerHash, "height", headerNumber, "err", validationError) + cfg.hd.ReportBadHeaderPoS(headerHash, latestValidHash) + } else if err := headerInserter.FeedHeaderPoS(tx, header, headerHash); err != nil { return nil, false, err } - forkingHash, err := cfg.blockReader.CanonicalHash(ctx, tx, forkingPoint) - - canExtendCanonical := forkingHash == currentHeadHash - - if cfg.memoryOverlay { - extendingHash := cfg.forkValidator.ExtendingForkHeadHash() - extendCanonical := (extendingHash == common.Hash{} && header.ParentHash == currentHeadHash) || extendingHash == header.ParentHash - status, latestValidHash, validationError, criticalError := cfg.forkValidator.ValidatePayload(tx, header, block.RawBody(), extendCanonical) - if criticalError != nil { - return nil, false, criticalError - } - success = validationError == nil - if !success { - log.Warn("Validation failed for header", "hash", headerHash, "height", headerNumber, "err", validationError) - cfg.hd.ReportBadHeaderPoS(headerHash, latestValidHash) - } else if err := headerInserter.FeedHeaderPoS(tx, header, headerHash); err != nil { - return nil, false, err - } - return &engineapi.PayloadStatus{ - Status: status, - LatestValidHash: latestValidHash, - ValidationError: validationError, - }, success, nil - } - - if err := headerInserter.FeedHeaderPoS(tx, header, headerHash); err != nil { - return nil, false, err - } - - if !canExtendCanonical { - log.Info("Side chain", "parentHash", header.ParentHash, "currentHead", currentHeadHash) - return &engineapi.PayloadStatus{Status: remote.EngineStatus_ACCEPTED}, true, nil - } - - // OK, we're on the canonical chain - if requestStatus == engineapi.New { - cfg.hd.SetPendingPayloadHash(headerHash) - } - - logEvery := time.NewTicker(logInterval) - defer logEvery.Stop() - - // Extend canonical chain by the new header - err = fixCanonicalChain(s.LogPrefix(), logEvery, headerInserter.GetHighest(), headerInserter.GetHighestHash(), tx, cfg.blockReader) - if err != nil { - return nil, false, err - } - - err = rawdb.WriteHeadHeaderHash(tx, headerHash) - if err != nil { - return nil, false, err - } - - err = s.Update(tx, headerNumber) - if err != nil { - return nil, false, err - } - - return nil, true, nil + return &engineapi.PayloadStatus{ + Status: status, + LatestValidHash: latestValidHash, + ValidationError: validationError, + }, success, nil } func schedulePoSDownload( requestId int, hashToDownload common.Hash, heightToDownload uint64, + downloaderTip common.Hash, s *StageState, cfg HeadersCfg, -) { +) bool { cfg.hd.BeaconRequestList.SetStatus(requestId, engineapi.DataWasMissing) if cfg.hd.PosStatus() != headerdownload.Idle { - log.Debug(fmt.Sprintf("[%s] Postponing PoS download since another one is in progress", s.LogPrefix()), "height", heightToDownload, "hash", hashToDownload) - return + log.Info(fmt.Sprintf("[%s] Postponing PoS download since another one is in progress", s.LogPrefix()), "height", heightToDownload, "hash", hashToDownload) + return false } - log.Info(fmt.Sprintf("[%s] Downloading PoS headers...", s.LogPrefix()), "height", heightToDownload, "hash", hashToDownload, "requestId", requestId) + if heightToDownload == 0 { + log.Info(fmt.Sprintf("[%s] Downloading PoS headers...", s.LogPrefix()), "height", "unknown", "hash", hashToDownload, "requestId", requestId) + } else { + log.Info(fmt.Sprintf("[%s] Downloading PoS headers...", s.LogPrefix()), "height", heightToDownload, "hash", hashToDownload, "requestId", requestId) + } cfg.hd.SetRequestId(requestId) + cfg.hd.SetPoSDownloaderTip(downloaderTip) cfg.hd.SetHeaderToDownloadPoS(hashToDownload, heightToDownload) cfg.hd.SetPOSSync(true) // This needs to be called after SetHeaderToDownloadPOS because SetHeaderToDownloadPOS sets `posAnchor` member field which is used by ProcessHeadersPOS @@ -613,17 +612,21 @@ func schedulePoSDownload( cfg.hd.SetHeadersCollector(headerCollector) cfg.hd.SetPosStatus(headerdownload.Syncing) + + return true } func verifyAndSaveDownloadedPoSHeaders(tx kv.RwTx, cfg HeadersCfg, headerInserter *headerdownload.HeaderInserter) { - defer cfg.forkValidator.Clear() - var lastValidHash common.Hash var badChainError error var foundPow bool headerLoadFunc := func(key, value []byte, _ etl.CurrentTableReader, _ etl.LoadNextFunc) error { var h types.Header + // no header to process + if value == nil { + return nil + } if err := rlp.DecodeBytes(value, &h); err != nil { return err } @@ -657,6 +660,7 @@ func verifyAndSaveDownloadedPoSHeaders(tx kv.RwTx, cfg HeadersCfg, headerInserte cfg.hd.ReportBadHeaderPoS(h.Hash(), lastValidHash) return nil } + return headerInserter.FeedHeaderPoS(tx, &h, h.Hash()) } @@ -703,7 +707,8 @@ func forkingPoint( func handleInterrupt(interrupt engineapi.Interrupt, cfg HeadersCfg, tx kv.RwTx, headerInserter *headerdownload.HeaderInserter, useExternalTx bool) (bool, error) { if interrupt != engineapi.None { if interrupt == engineapi.Stopping { - cfg.hd.PayloadStatusCh <- engineapi.PayloadStatus{CriticalError: errors.New("server is stopping")} + close(cfg.hd.ShutdownCh) + return false, fmt.Errorf("server is stopping") } if interrupt == engineapi.Synced && cfg.hd.HeadersCollector() != nil { verifyAndSaveDownloadedPoSHeaders(tx, cfg, headerInserter) @@ -842,10 +847,13 @@ Loop: return err } - announces := cfg.hd.GrabAnnounces() - if len(announces) > 0 { - cfg.announceNewHashes(ctx, announces) + if test { + announces := cfg.hd.GrabAnnounces() + if len(announces) > 0 { + cfg.announceNewHashes(ctx, announces) + } } + if headerInserter.BestHeaderChanged() { // We do not break unless there best header changed noProgressCounter = 0 wasProgress = true @@ -1064,6 +1072,12 @@ func HeadersUnwind(u *UnwindState, s *StageState, tx kv.RwTx, cfg HeadersCfg, te func logProgressHeaders(logPrefix string, prev, now uint64) uint64 { speed := float64(now-prev) / float64(logInterval/time.Second) + if speed == 0 { + // Don't log "Wrote block ..." unless we're actually writing something + log.Info(fmt.Sprintf("[%s] No block headers to write in this log period", logPrefix), "block number", now, "blk/second", speed) + return now + } + var m runtime.MemStats libcommon.ReadMemStats(&m) log.Info(fmt.Sprintf("[%s] Wrote block headers", logPrefix), @@ -1155,233 +1169,3 @@ func HeadersPrune(p *PruneState, tx kv.RwTx, cfg HeadersCfg, ctx context.Context } return nil } - -func DownloadAndIndexSnapshotsIfNeed(s *StageState, ctx context.Context, tx kv.RwTx, cfg HeadersCfg, initialCycle bool) error { - if !initialCycle || cfg.snapshots == nil || !cfg.snapshots.Cfg().Enabled { - return nil - } - - if err := WaitForDownloader(ctx, cfg, tx); err != nil { - return err - } - if err := cfg.snapshots.ReopenFolder(); err != nil { - return fmt.Errorf("ReopenSegments: %w", err) - } - if cfg.dbEventNotifier != nil { - cfg.dbEventNotifier.OnNewSnapshot() - } - - cfg.snapshots.LogStat() - - // Create .idx files - if cfg.snapshots.IndicesMax() < cfg.snapshots.SegmentsMax() { - if !cfg.snapshots.Cfg().Produce && cfg.snapshots.IndicesMax() == 0 { - return fmt.Errorf("please remove --snap.stop, erigon can't work without creating basic indices") - } - if cfg.snapshots.Cfg().Produce { - if !cfg.snapshots.SegmentsReady() { - return fmt.Errorf("not all snapshot segments are available") - } - - // wait for Downloader service to download all expected snapshots - if cfg.snapshots.IndicesMax() < cfg.snapshots.SegmentsMax() { - chainID, _ := uint256.FromBig(cfg.chainConfig.ChainID) - workers := cmp.InRange(1, 2, runtime.GOMAXPROCS(-1)-1) - if err := snapshotsync.BuildMissedIndices(ctx, cfg.snapshots.Dir(), *chainID, cfg.tmpdir, workers, log.LvlInfo); err != nil { - return fmt.Errorf("BuildMissedIndices: %w", err) - } - } - - if err := cfg.snapshots.ReopenFolder(); err != nil { - return err - } - if cfg.dbEventNotifier != nil { - cfg.dbEventNotifier.OnNewSnapshot() - } - } - } - - if s.BlockNumber < cfg.snapshots.BlocksAvailable() { // allow genesis - logEvery := time.NewTicker(logInterval) - defer logEvery.Stop() - - h2n := etl.NewCollector("Snapshots", cfg.tmpdir, etl.NewSortableBuffer(etl.BufferOptimalSize)) - defer h2n.Close() - h2n.LogLvl(log.LvlDebug) - - // fill some small tables from snapshots, in future we may store this data in snapshots also, but - // for now easier just store them in db - td := big.NewInt(0) - if err := snapshotsync.ForEachHeader(ctx, cfg.snapshots, func(header *types.Header) error { - blockNum, blockHash := header.Number.Uint64(), header.Hash() - td.Add(td, header.Difficulty) - if err := rawdb.WriteTd(tx, blockHash, blockNum, td); err != nil { - return err - } - if err := rawdb.WriteCanonicalHash(tx, blockHash, blockNum); err != nil { - return err - } - if err := h2n.Collect(blockHash[:], dbutils.EncodeBlockNumber(blockNum)); err != nil { - return err - } - select { - case <-ctx.Done(): - return ctx.Err() - case <-logEvery.C: - log.Info(fmt.Sprintf("[%s] Writing total difficulty index for snapshots", s.LogPrefix()), "block_num", header.Number.Uint64()) - default: - } - return nil - }); err != nil { - return err - } - if err := h2n.Load(tx, kv.HeaderNumber, etl.IdentityLoadFunc, etl.TransformArgs{}); err != nil { - return err - } - // ResetSequence - allow set arbitrary value to sequence (for example to decrement it to exact value) - ok, err := cfg.snapshots.ViewTxs(cfg.snapshots.BlocksAvailable(), func(sn *snapshotsync.TxnSegment) error { - lastTxnID := sn.IdxTxnHash.BaseDataID() + uint64(sn.Seg.Count()) - if err := rawdb.ResetSequence(tx, kv.EthTx, lastTxnID+1); err != nil { - return err - } - return nil - }) - if err != nil { - return err - } - if !ok { - return fmt.Errorf("snapshot not found for block: %d", cfg.snapshots.BlocksAvailable()) - } - if err := s.Update(tx, cfg.snapshots.BlocksAvailable()); err != nil { - return err - } - canonicalHash, err := cfg.blockReader.CanonicalHash(ctx, tx, cfg.snapshots.BlocksAvailable()) - if err != nil { - return err - } - if err = rawdb.WriteHeadHeaderHash(tx, canonicalHash); err != nil { - return err - } - if err := s.Update(tx, cfg.snapshots.BlocksAvailable()); err != nil { - return err - } - s.BlockNumber = cfg.snapshots.BlocksAvailable() - } - - if err := cfg.hd.AddHeadersFromSnapshot(tx, cfg.snapshots.BlocksAvailable(), cfg.blockReader); err != nil { - return err - } - - return nil -} - -// WaitForDownloader - wait for Downloader service to download all expected snapshots -// for MVP we sync with Downloader only once, in future will send new snapshots also -func WaitForDownloader(ctx context.Context, cfg HeadersCfg, tx kv.RwTx) error { - if cfg.snapshots.Cfg().NoDownloader { - return nil - } - - snInDB, err := rawdb.ReadSnapshots(tx) - if err != nil { - return err - } - dbEmpty := len(snInDB) == 0 - var missingSnapshots []snapshotsync.Range - if !dbEmpty { - _, missingSnapshots, err = snapshotsync.Segments(cfg.snapshots.Dir()) - if err != nil { - return err - } - } - - if len(missingSnapshots) > 0 { - log.Warn("[Snapshots] downloading missing snapshots") - } - - // send all hashes to the Downloader service - preverified := snapcfg.KnownCfg(cfg.chainConfig.ChainName, snInDB).Preverified - var downloadRequest []snapshotsync.DownloadRequest - // build all download requests - // builds preverified snapshots request - for _, p := range preverified { - downloadRequest = append(downloadRequest, snapshotsync.NewDownloadRequest(nil, p.Name, p.Hash)) - } - // builds missing snapshots request - for _, r := range missingSnapshots { - downloadRequest = append(downloadRequest, snapshotsync.NewDownloadRequest(&r, "", "")) - } - - log.Info("[Snapshots] Fetching torrent files metadata") - for { - select { - case <-ctx.Done(): - return ctx.Err() - default: - } - if err := snapshotsync.RequestSnapshotsDownload(ctx, downloadRequest, cfg.snapshotDownloader); err != nil { - log.Error("[Snapshots] call downloader", "err", err) - time.Sleep(10 * time.Second) - continue - } - break - } - logEvery := time.NewTicker(logInterval) - defer logEvery.Stop() - var m runtime.MemStats - - // Check once without delay, for faster erigon re-start - stats, err := cfg.snapshotDownloader.Stats(ctx, &proto_downloader.StatsRequest{}) - if err == nil && stats.Completed { - goto Finish - } - - // Print download progress until all segments are available -Loop: - for { - select { - case <-ctx.Done(): - return ctx.Err() - case <-logEvery.C: - if stats, err := cfg.snapshotDownloader.Stats(ctx, &proto_downloader.StatsRequest{}); err != nil { - log.Warn("Error while waiting for snapshots progress", "err", err) - } else if stats.Completed { - if !cfg.snapshots.Cfg().Verify { // will verify after loop - if _, err := cfg.snapshotDownloader.Verify(ctx, &proto_downloader.VerifyRequest{}); err != nil { - return err - } - } - break Loop - } else { - if stats.MetadataReady < stats.FilesTotal { - log.Info(fmt.Sprintf("[Snapshots] Waiting for torrents metadata: %d/%d", stats.MetadataReady, stats.FilesTotal)) - continue - } - libcommon.ReadMemStats(&m) - log.Info("[Snapshots] download", - "progress", fmt.Sprintf("%.2f%% %s/%s", stats.Progress, libcommon.ByteCount(stats.BytesCompleted), libcommon.ByteCount(stats.BytesTotal)), - "download", libcommon.ByteCount(stats.DownloadRate)+"/s", - "upload", libcommon.ByteCount(stats.UploadRate)+"/s", - "peers", stats.PeersUnique, - "connections", stats.ConnectionsTotal, - "files", stats.FilesTotal, - "alloc", libcommon.ByteCount(m.Alloc), "sys", libcommon.ByteCount(m.Sys), - ) - } - } - } - -Finish: - if cfg.snapshots.Cfg().Verify { - if _, err := cfg.snapshotDownloader.Verify(ctx, &proto_downloader.VerifyRequest{}); err != nil { - return err - } - } - - if dbEmpty { - if err = rawdb.WriteSnapshots(tx, snInDB); err != nil { - return err - } - } - return nil -} diff --git a/eth/stagedsync/stage_interhashes.go b/eth/stagedsync/stage_interhashes.go index 8b0e370a971..c9d67204bb0 100644 --- a/eth/stagedsync/stage_interhashes.go +++ b/eth/stagedsync/stage_interhashes.go @@ -3,17 +3,20 @@ package stagedsync import ( "bytes" "context" + "encoding/binary" "fmt" "math/bits" - "os" "github.com/ledgerwatch/erigon-lib/common/length" "github.com/ledgerwatch/erigon-lib/etl" "github.com/ledgerwatch/erigon-lib/kv" + "github.com/ledgerwatch/erigon-lib/state" "github.com/ledgerwatch/erigon/common" "github.com/ledgerwatch/erigon/common/changeset" "github.com/ledgerwatch/erigon/common/dbutils" + "github.com/ledgerwatch/erigon/common/math" "github.com/ledgerwatch/erigon/core/rawdb" + "github.com/ledgerwatch/erigon/core/types" "github.com/ledgerwatch/erigon/core/types/accounts" "github.com/ledgerwatch/erigon/eth/stagedsync/stages" "github.com/ledgerwatch/erigon/turbo/services" @@ -31,9 +34,12 @@ type TrieCfg struct { saveNewHashesToDB bool // no reason to save changes when calculating root for mining blockReader services.FullBlockReader hd *headerdownload.HeaderDownload + + historyV3 bool + agg *state.Aggregator22 } -func StageTrieCfg(db kv.RwDB, checkRoot, saveNewHashesToDB, badBlockHalt bool, tmpDir string, blockReader services.FullBlockReader, hd *headerdownload.HeaderDownload) TrieCfg { +func StageTrieCfg(db kv.RwDB, checkRoot, saveNewHashesToDB, badBlockHalt bool, tmpDir string, blockReader services.FullBlockReader, hd *headerdownload.HeaderDownload, historyV3 bool, agg *state.Aggregator22) TrieCfg { return TrieCfg{ db: db, checkRoot: checkRoot, @@ -42,10 +48,13 @@ func StageTrieCfg(db kv.RwDB, checkRoot, saveNewHashesToDB, badBlockHalt bool, t badBlockHalt: badBlockHalt, blockReader: blockReader, hd: hd, + + historyV3: historyV3, + agg: agg, } } -func SpawnIntermediateHashesStage(s *StageState, u Unwinder, tx kv.RwTx, cfg TrieCfg, ctx context.Context) (common.Hash, error) { +func SpawnIntermediateHashesStage(s *StageState, u Unwinder, tx kv.RwTx, cfg TrieCfg, ctx context.Context, quiet bool) (common.Hash, error) { quit := ctx.Done() useExternalTx := tx != nil if !useExternalTx { @@ -70,16 +79,20 @@ func SpawnIntermediateHashesStage(s *StageState, u Unwinder, tx kv.RwTx, cfg Tri var expectedRootHash common.Hash var headerHash common.Hash + var syncHeadHeader *types.Header if cfg.checkRoot { - syncHeadHeader, err := cfg.blockReader.HeaderByNumber(ctx, tx, to) + syncHeadHeader, err = cfg.blockReader.HeaderByNumber(ctx, tx, to) if err != nil { return trie.EmptyRoot, err } + if syncHeadHeader == nil { + return trie.EmptyRoot, fmt.Errorf("no header found with number %d", to) + } expectedRootHash = syncHeadHeader.Root headerHash = syncHeadHeader.Hash() } logPrefix := s.LogPrefix() - if to > s.BlockNumber+16 { + if !quiet && to > s.BlockNumber+16 { log.Info(fmt.Sprintf("[%s] Generating intermediate hashes", logPrefix), "from", s.BlockNumber, "to", to) } var root common.Hash @@ -94,25 +107,20 @@ func SpawnIntermediateHashesStage(s *StageState, u Unwinder, tx kv.RwTx, cfg Tri } } - if err == nil { - if cfg.checkRoot && root != expectedRootHash { - log.Error(fmt.Sprintf("[%s] Wrong trie root of block %d: %x, expected (from header): %x. Block hash: %x", logPrefix, to, root, expectedRootHash, headerHash)) - if cfg.badBlockHalt { - return trie.EmptyRoot, fmt.Errorf("Wrong trie root") - } - if cfg.hd != nil { - header := rawdb.ReadHeader(tx, headerHash, to) - cfg.hd.ReportBadHeaderPoS(headerHash, header.ParentHash) - } - if to > s.BlockNumber { - unwindTo := (to + s.BlockNumber) / 2 // Binary search for the correct block, biased to the lower numbers - log.Warn("Unwinding due to incorrect root hash", "to", unwindTo) - u.UnwindTo(unwindTo, headerHash) - } - } else if err = s.Update(tx, to); err != nil { - return trie.EmptyRoot, err + if cfg.checkRoot && root != expectedRootHash { + log.Error(fmt.Sprintf("[%s] Wrong trie root of block %d: %x, expected (from header): %x. Block hash: %x", logPrefix, to, root, expectedRootHash, headerHash)) + if cfg.badBlockHalt { + return trie.EmptyRoot, fmt.Errorf("wrong trie root") } - } else { + if cfg.hd != nil { + cfg.hd.ReportBadHeaderPoS(headerHash, syncHeadHeader.ParentHash) + } + if to > s.BlockNumber { + unwindTo := (to + s.BlockNumber) / 2 // Binary search for the correct block, biased to the lower numbers + log.Warn("Unwinding due to incorrect root hash", "to", unwindTo) + u.UnwindTo(unwindTo, headerHash) + } + } else if err = s.Update(tx, to); err != nil { return trie.EmptyRoot, err } @@ -163,22 +171,80 @@ func RegenerateIntermediateHashes(logPrefix string, db kv.RwTx, cfg TrieCfg, exp } type HashPromoter struct { - db kv.RwTx + tx kv.RwTx ChangeSetBufSize uint64 TempDir string + logPrefix string quitCh <-chan struct{} } -func NewHashPromoter(db kv.RwTx, quitCh <-chan struct{}) *HashPromoter { +func NewHashPromoter(db kv.RwTx, tempDir string, quitCh <-chan struct{}, logPrefix string) *HashPromoter { return &HashPromoter{ - db: db, + tx: db, ChangeSetBufSize: 256 * 1024 * 1024, - TempDir: os.TempDir(), + TempDir: tempDir, quitCh: quitCh, + logPrefix: logPrefix, } } -func (p *HashPromoter) Promote(logPrefix string, s *StageState, from, to uint64, storage bool, load etl.LoadFunc) error { +func (p *HashPromoter) PromoteOnHistoryV3(logPrefix string, agg *state.Aggregator22, from, to uint64, storage bool, load func(k []byte, v []byte) error) error { + nonEmptyMarker := []byte{1} + + agg.SetTx(p.tx) + var k, v []byte + + txnFrom, err := rawdb.TxNums.Min(p.tx, from+1) + if err != nil { + return err + } + txnTo := uint64(math.MaxUint64) + + if storage { + compositeKey := make([]byte, common.HashLength+common.HashLength) + it := agg.Storage().MakeContext().IterateChanged(txnFrom, txnTo, p.tx) + defer it.Close() + for it.HasNext() { + k, v = it.Next(k[:0], v[:0]) + addrHash, err := common.HashData(k[:length.Addr]) + if err != nil { + return err + } + secKey, err := common.HashData(k[length.Addr:]) + if err != nil { + return err + } + copy(compositeKey, addrHash[:]) + copy(compositeKey[common.HashLength:], secKey[:]) + if len(v) != 0 { + v = nonEmptyMarker + } + if err := load(compositeKey, v); err != nil { + return err + } + } + return nil + } + + it := agg.Accounts().MakeContext().IterateChanged(txnFrom, txnTo, p.tx) + defer it.Close() + for it.HasNext() { + k, v = it.Next(k[:0], v[:0]) + newK, err := transformPlainStateKey(k) + if err != nil { + return err + } + if len(v) != 0 { + v = nonEmptyMarker + } + if err := load(newK, v); err != nil { + return err + } + } + return nil +} + +func (p *HashPromoter) Promote(logPrefix string, from, to uint64, storage bool, load etl.LoadFunc) error { var changeSetBucket string if storage { changeSetBucket = kv.StorageChangeSet @@ -209,7 +275,7 @@ func (p *HashPromoter) Promote(logPrefix string, s *StageState, from, to uint64, if oldAccount.Incarnation > 0 { - newValue, err := p.db.GetOne(kv.PlainState, k) + newValue, err := p.tx.GetOne(kv.PlainState, k) if err != nil { return err } @@ -226,7 +292,6 @@ func (p *HashPromoter) Promote(logPrefix string, s *StageState, from, to uint64, } } } - } return next(dbKey, newK, v) @@ -237,7 +302,7 @@ func (p *HashPromoter) Promote(logPrefix string, s *StageState, from, to uint64, if err := etl.Transform( logPrefix, - p.db, + p.tx, changeSetBucket, "", p.TempDir, @@ -255,8 +320,8 @@ func (p *HashPromoter) Promote(logPrefix string, s *StageState, from, to uint64, if !storage { // delete Intermediate hashes of deleted accounts slices.SortFunc(deletedAccounts, func(a, b []byte) bool { return bytes.Compare(a, b) < 0 }) for _, k := range deletedAccounts { - if err := p.db.ForPrefix(kv.TrieOfStorage, k, func(k, v []byte) error { - if err := p.db.Delete(kv.TrieOfStorage, k); err != nil { + if err := p.tx.ForPrefix(kv.TrieOfStorage, k, func(k, v []byte) error { + if err := p.tx.Delete(kv.TrieOfStorage, k); err != nil { return err } return nil @@ -269,6 +334,90 @@ func (p *HashPromoter) Promote(logPrefix string, s *StageState, from, to uint64, return nil } +func (p *HashPromoter) UnwindOnHistoryV3(logPrefix string, agg *state.Aggregator22, unwindFrom, unwindTo uint64, storage bool, load func(k []byte, v []byte)) error { + txnFrom, err := rawdb.TxNums.Min(p.tx, unwindTo) + if err != nil { + return err + } + txnTo := uint64(math.MaxUint64) + var deletedAccounts [][]byte + var k, v []byte + + if storage { + it := agg.Storage().MakeContext().IterateChanged(txnFrom, txnTo, p.tx) + defer it.Close() + for it.HasNext() { + k, v = it.Next(k[:0], v[:0]) + // Plain state not unwind yet, it means - if key not-exists in PlainState but has value from ChangeSets - then need mark it as "created" in RetainList + value, err := p.tx.GetOne(kv.PlainState, k[:20]) + if err != nil { + return err + } + incarnation := uint64(1) + if len(value) != 0 { + oldInc, _ := accounts.DecodeIncarnationFromStorage(value) + incarnation = oldInc + } + plainKey := dbutils.PlainGenerateCompositeStorageKey(k[:20], incarnation, k[20:]) + newK, err := transformPlainStateKey(plainKey) + if err != nil { + return err + } + load(newK, value) + } + return nil + } + + it := agg.Accounts().MakeContext().IterateChanged(txnFrom, txnTo, p.tx) + defer it.Close() + + for it.HasNext() { + k, v = it.Next(k[:0], v[:0]) + newK, err := transformPlainStateKey(k) + if err != nil { + return err + } + // Plain state not unwind yet, it means - if key not-exists in PlainState but has value from ChangeSets - then need mark it as "created" in RetainList + value, err := p.tx.GetOne(kv.PlainState, k) + if err != nil { + return err + } + + if len(value) > 0 { + oldInc, _ := accounts.DecodeIncarnationFromStorage(value) + if oldInc > 0 { + if len(v) == 0 { // self-destructed + deletedAccounts = append(deletedAccounts, newK) + } else { + var newAccount accounts.Account + if err = accounts.Deserialise2(&newAccount, v); err != nil { + return err + } + if newAccount.Incarnation > oldInc { + deletedAccounts = append(deletedAccounts, newK) + } + } + } + } + + load(newK, value) + } + + // delete Intermediate hashes of deleted accounts + slices.SortFunc(deletedAccounts, func(a, b []byte) bool { return bytes.Compare(a, b) < 0 }) + for _, k := range deletedAccounts { + if err := p.tx.ForPrefix(kv.TrieOfStorage, k, func(k, v []byte) error { + if err := p.tx.Delete(kv.TrieOfStorage, k); err != nil { + return err + } + return nil + }); err != nil { + return err + } + } + return nil +} + func (p *HashPromoter) Unwind(logPrefix string, s *StageState, u *UnwindState, storage bool, load etl.LoadFunc) error { to := u.UnwindPoint var changeSetBucket string @@ -294,7 +443,7 @@ func (p *HashPromoter) Unwind(logPrefix string, s *StageState, u *UnwindState, s return err } // Plain state not unwind yet, it means - if key not-exists in PlainState but has value from ChangeSets - then need mark it as "created" in RetainList - value, err := p.db.GetOne(kv.PlainState, k) + value, err := p.tx.GetOne(kv.PlainState, k) if err != nil { return err } @@ -326,7 +475,7 @@ func (p *HashPromoter) Unwind(logPrefix string, s *StageState, u *UnwindState, s if err := etl.Transform( logPrefix, - p.db, + p.tx, changeSetBucket, "", p.TempDir, @@ -344,8 +493,8 @@ func (p *HashPromoter) Unwind(logPrefix string, s *StageState, u *UnwindState, s if !storage { // delete Intermediate hashes of deleted accounts slices.SortFunc(deletedAccounts, func(a, b []byte) bool { return bytes.Compare(a, b) < 0 }) for _, k := range deletedAccounts { - if err := p.db.ForPrefix(kv.TrieOfStorage, k, func(k, v []byte) error { - if err := p.db.Delete(kv.TrieOfStorage, k); err != nil { + if err := p.tx.ForPrefix(kv.TrieOfStorage, k, func(k, v []byte) error { + if err := p.tx.Delete(kv.TrieOfStorage, k); err != nil { return err } return nil @@ -360,20 +509,54 @@ func (p *HashPromoter) Unwind(logPrefix string, s *StageState, u *UnwindState, s } func incrementIntermediateHashes(logPrefix string, s *StageState, db kv.RwTx, to uint64, cfg TrieCfg, expectedRootHash common.Hash, quit <-chan struct{}) (common.Hash, error) { - p := NewHashPromoter(db, quit) - p.TempDir = cfg.tmpDir + p := NewHashPromoter(db, cfg.tmpDir, quit, logPrefix) rl := trie.NewRetainList(0) - collect := func(k, v []byte, _ etl.CurrentTableReader, _ etl.LoadNextFunc) error { - rl.AddKeyWithMarker(k, len(v) == 0) - return nil - } - if err := p.Promote(logPrefix, s, s.BlockNumber, to, false /* storage */, collect); err != nil { - return trie.EmptyRoot, err - } - if err := p.Promote(logPrefix, s, s.BlockNumber, to, true /* storage */, collect); err != nil { - return trie.EmptyRoot, err + if cfg.historyV3 { + cfg.agg.SetTx(db) + collect := func(k, v []byte) error { + if len(k) == 32 { + rl.AddKeyWithMarker(k, len(v) == 0) + return nil + } + accBytes, err := p.tx.GetOne(kv.HashedAccounts, k[:32]) + if err != nil { + return err + } + incarnation := uint64(1) + if len(accBytes) != 0 { + incarnation, err = accounts.DecodeIncarnationFromStorage(accBytes) + if err != nil { + return err + } + if incarnation == 0 { + return nil + } + } + compositeKey := make([]byte, common.HashLength+common.IncarnationLength+common.HashLength) + copy(compositeKey, k[:32]) + binary.BigEndian.PutUint64(compositeKey[32:], incarnation) + copy(compositeKey[40:], k[32:]) + rl.AddKeyWithMarker(compositeKey, len(v) == 0) + return nil + } + if err := p.PromoteOnHistoryV3(logPrefix, cfg.agg, s.BlockNumber, to, false, collect); err != nil { + return trie.EmptyRoot, err + } + if err := p.PromoteOnHistoryV3(logPrefix, cfg.agg, s.BlockNumber, to, true, collect); err != nil { + return trie.EmptyRoot, err + } + } else { + collect := func(k, v []byte, _ etl.CurrentTableReader, _ etl.LoadNextFunc) error { + rl.AddKeyWithMarker(k, len(v) == 0) + return nil + } + if err := p.Promote(logPrefix, s.BlockNumber, to, false, collect); err != nil { + return trie.EmptyRoot, err + } + if err := p.Promote(logPrefix, s.BlockNumber, to, true, collect); err != nil { + return trie.EmptyRoot, err + } } - accTrieCollector := etl.NewCollector(logPrefix, cfg.tmpDir, etl.NewSortableBuffer(etl.BufferOptimalSize)) defer accTrieCollector.Close() accTrieCollectorFunc := accountTrieCollector(accTrieCollector) @@ -440,18 +623,30 @@ func UnwindIntermediateHashesStage(u *UnwindState, s *StageState, tx kv.RwTx, cf } func unwindIntermediateHashesStageImpl(logPrefix string, u *UnwindState, s *StageState, db kv.RwTx, cfg TrieCfg, expectedRootHash common.Hash, quit <-chan struct{}) error { - p := NewHashPromoter(db, quit) - p.TempDir = cfg.tmpDir + p := NewHashPromoter(db, cfg.tmpDir, quit, logPrefix) rl := trie.NewRetainList(0) - collect := func(k, v []byte, _ etl.CurrentTableReader, _ etl.LoadNextFunc) error { - rl.AddKeyWithMarker(k, len(v) == 0) - return nil - } - if err := p.Unwind(logPrefix, s, u, false /* storage */, collect); err != nil { - return err - } - if err := p.Unwind(logPrefix, s, u, true /* storage */, collect); err != nil { - return err + if cfg.historyV3 { + cfg.agg.SetTx(db) + collect := func(k, v []byte) { + rl.AddKeyWithMarker(k, len(v) == 0) + } + if err := p.UnwindOnHistoryV3(logPrefix, cfg.agg, s.BlockNumber, u.UnwindPoint, false, collect); err != nil { + return err + } + if err := p.UnwindOnHistoryV3(logPrefix, cfg.agg, s.BlockNumber, u.UnwindPoint, true, collect); err != nil { + return err + } + } else { + collect := func(k, v []byte, _ etl.CurrentTableReader, _ etl.LoadNextFunc) error { + rl.AddKeyWithMarker(k, len(v) == 0) + return nil + } + if err := p.Unwind(logPrefix, s, u, false /* storage */, collect); err != nil { + return err + } + if err := p.Unwind(logPrefix, s, u, true /* storage */, collect); err != nil { + return err + } } accTrieCollector := etl.NewCollector(logPrefix, cfg.tmpDir, etl.NewSortableBuffer(etl.BufferOptimalSize)) diff --git a/eth/stagedsync/stage_interhashes_test.go b/eth/stagedsync/stage_interhashes_test.go index 6e3528c96c9..942b17d4ad5 100644 --- a/eth/stagedsync/stage_interhashes_test.go +++ b/eth/stagedsync/stage_interhashes_test.go @@ -15,6 +15,7 @@ import ( "github.com/ledgerwatch/erigon/turbo/trie" "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" ) func addTestAccount(tx kv.Putter, hash common.Hash, balance uint64, incarnation uint64) error { @@ -70,8 +71,9 @@ func TestAccountAndStorageTrie(t *testing.T) { // Populate account & storage trie DB tables // ---------------------------------------------------------------- + historyV3 := false blockReader := snapshotsync.NewBlockReader() - cfg := StageTrieCfg(nil, false, true, false, t.TempDir(), blockReader, nil) + cfg := StageTrieCfg(nil, false, true, false, t.TempDir(), blockReader, nil, historyV3, nil) _, err := RegenerateIntermediateHashes("IH", tx, cfg, common.Hash{} /* expectedRootHash */, nil /* quit */) assert.Nil(t, err) @@ -166,6 +168,7 @@ func TestAccountAndStorageTrie(t *testing.T) { func TestAccountTrieAroundExtensionNode(t *testing.T) { _, tx := memdb.NewTestTx(t) + historyV3 := false acc := accounts.NewAccount() acc.Balance.SetUint64(1 * params.Ether) @@ -191,7 +194,7 @@ func TestAccountTrieAroundExtensionNode(t *testing.T) { assert.Nil(t, tx.Put(kv.HashedAccounts, hash6[:], encoded)) blockReader := snapshotsync.NewBlockReader() - _, err := RegenerateIntermediateHashes("IH", tx, StageTrieCfg(nil, false, true, false, t.TempDir(), blockReader, nil), common.Hash{} /* expectedRootHash */, nil /* quit */) + _, err := RegenerateIntermediateHashes("IH", tx, StageTrieCfg(nil, false, true, false, t.TempDir(), blockReader, nil, historyV3, nil), common.Hash{} /* expectedRootHash */, nil /* quit */) assert.Nil(t, err) accountTrie := make(map[string][]byte) @@ -251,9 +254,9 @@ func TestStorageDeletion(t *testing.T) { // ---------------------------------------------------------------- // Populate account & storage trie DB tables // ---------------------------------------------------------------- - + historyV3 := false blockReader := snapshotsync.NewBlockReader() - cfg := StageTrieCfg(nil, false, true, false, t.TempDir(), blockReader, nil) + cfg := StageTrieCfg(nil, false, true, false, t.TempDir(), blockReader, nil, historyV3, nil) _, err = RegenerateIntermediateHashes("IH", tx, cfg, common.Hash{} /* expectedRootHash */, nil /* quit */) assert.Nil(t, err) @@ -301,3 +304,95 @@ func TestStorageDeletion(t *testing.T) { assert.Equal(t, 0, len(storageTrieB)) } + +func TestHiveTrieRoot(t *testing.T) { + _, tx := memdb.NewTestTx(t) + + hashedAddress1, _ := common.HashData(common.FromHex("0000000000000000000000000000000000000000")) + require.Nil(t, tx.Put(kv.HashedAccounts, hashedAddress1[:], + common.FromHex("02081bc5e32fd4403800"))) + + hashedAddress2, _ := common.HashData(common.FromHex("0000000000000000000000000000000000000314")) + require.Nil(t, tx.Put(kv.HashedAccounts, hashedAddress2[:], + common.FromHex("0c0101203e6de602146067c01322e2528a8f320c504fd3d19a4d6c4c53b54d2b2f9357ec"))) + + hashedLocA, _ := common.HashData(common.FromHex("0000000000000000000000000000000000000000000000000000000000000000")) + require.Nil(t, tx.Put(kv.HashedStorage, dbutils.GenerateCompositeStorageKey(hashedAddress2, 1, hashedLocA), + common.FromHex("1234"))) + + hashedLocB, _ := common.HashData(common.FromHex("6661e9d6d8b923d5bbaab1b96e1dd51ff6ea2a93520fdc9eb75d059238b8c5e9")) + require.Nil(t, tx.Put(kv.HashedStorage, dbutils.GenerateCompositeStorageKey(hashedAddress2, 1, hashedLocB), + common.FromHex("01"))) + + hashedAddress3, _ := common.HashData(common.FromHex("0000000000000000000000000000000000000315")) + require.Nil(t, tx.Put(kv.HashedAccounts, hashedAddress3[:], + common.FromHex("0e100999999999999999999999999999999901012052de487a82a5e45f90f7fb0edf025b1d23f85c308ae7543736a91ac6295217f3"))) + + hashedAddress4, _ := common.HashData(common.FromHex("0000000000000000000000000000000000000316")) + require.Nil(t, tx.Put(kv.HashedAccounts, hashedAddress4[:], + common.FromHex("0c010120803ac275052ba5360d44e51a7d4a49ed9156c461a21119ff650506869827f2c8"))) + + hashedLocC, _ := common.HashData(common.FromHex("0000000000000000000000000000000000000000000000000000000000000001")) + require.Nil(t, tx.Put(kv.HashedStorage, dbutils.GenerateCompositeStorageKey(hashedAddress4, 1, hashedLocC), + common.FromHex("030000"))) + + hashedAddress5, _ := common.HashData(common.FromHex("0000000000000000000000000000000000000317")) + require.Nil(t, tx.Put(kv.HashedAccounts, hashedAddress5[:], + common.FromHex("0c010120247c40b032c36acb07ca105280db053d204d3133302420f403dfbb54f775d0e2"))) + + hashedAddress6, _ := common.HashData(common.FromHex("0161e041aad467a890839d5b08b138c1e6373072")) + require.Nil(t, tx.Put(kv.HashedAccounts, hashedAddress6[:], + common.FromHex("020b0123450000000000000000"))) + + hashedAddress7, _ := common.HashData(common.FromHex("6e53b788a8e675377c5f160e5c6cca6b46074af8")) + require.Nil(t, tx.Put(kv.HashedAccounts, hashedAddress7[:], + common.FromHex("02081bc16d674ec80000"))) + + hashedAddress8, _ := common.HashData(common.FromHex("87da6a8c6e9eff15d703fc2773e32f6af8dbe301")) + require.Nil(t, tx.Put(kv.HashedAccounts, hashedAddress8[:], + common.FromHex("020b0123450000000000000000"))) + + hashedAddress9, _ := common.HashData(common.FromHex("b97de4b8c857e4f6bc354f226dc3249aaee49209")) + require.Nil(t, tx.Put(kv.HashedAccounts, hashedAddress9[:], + common.FromHex("020b0123450000000000000000"))) + + hashedAddress10, _ := common.HashData(common.FromHex("c5065c9eeebe6df2c2284d046bfc906501846c51")) + require.Nil(t, tx.Put(kv.HashedAccounts, hashedAddress10[:], + common.FromHex("020b0123450000000000000000"))) + + hashedAddress11, _ := common.HashData(common.FromHex("cf49fda3be353c69b41ed96333cd24302da4556f")) + require.Nil(t, tx.Put(kv.HashedAccounts, hashedAddress11[:], + common.FromHex("0301010b012344fffb67ea09bf8000"))) + + hashedAddress12, _ := common.HashData(common.FromHex("e0840414c530d72e5c2f1fe64f6311cc3136cab1")) + require.Nil(t, tx.Put(kv.HashedAccounts, hashedAddress12[:], + common.FromHex("02081bc16d674ec80000"))) + + hashedAddress13, _ := common.HashData(common.FromHex("f8e0e7f6f1d0514ddfbc00bec204641f1f4d8cc8")) + require.Nil(t, tx.Put(kv.HashedAccounts, hashedAddress13[:], + common.FromHex("02081bc16d674ec80000"))) + + historyV3 := false + blockReader := snapshotsync.NewBlockReader() + cfg := StageTrieCfg(nil, false, true, false, t.TempDir(), blockReader, nil, historyV3, nil) + _, err := RegenerateIntermediateHashes("IH", tx, cfg, common.Hash{} /* expectedRootHash */, nil /* quit */) + require.Nil(t, err) + + // Now add a new account + newAddress := common.HexToAddress("0xf76fefb6608ca3d826945a9571d1f8e53bb6f366") + newHash, err := common.HashData(newAddress[:]) + require.Nil(t, err) + + require.Nil(t, tx.Put(kv.HashedAccounts, newHash[:], common.FromHex("02081bc16d674ec80000"))) + require.Nil(t, tx.Put(kv.AccountChangeSet, dbutils.EncodeBlockNumber(1), newAddress[:])) + + var s StageState + s.BlockNumber = 0 + incrementalRoot, err := incrementIntermediateHashes("IH", &s, tx, 1 /* to */, cfg, common.Hash{} /* expectedRootHash */, nil /* quit */) + require.Nil(t, err) + + regeneratedRoot, err := RegenerateIntermediateHashes("IH", tx, cfg, common.Hash{} /* expectedRootHash */, nil /* quit */) + require.Nil(t, err) + + assert.Equal(t, regeneratedRoot, incrementalRoot) +} diff --git a/eth/stagedsync/stage_issuance.go b/eth/stagedsync/stage_issuance.go index 38429ddca78..b00d0fc6226 100644 --- a/eth/stagedsync/stage_issuance.go +++ b/eth/stagedsync/stage_issuance.go @@ -133,7 +133,7 @@ func SpawnStageIssuance(cfg IssuanceCfg, s *StageState, tx kv.RwTx, ctx context. if header.UncleHash == types.EmptyUncleHash { blockReward, uncleRewards = ethash.AccumulateRewards(cfg.chainConfig, &header, nil) } else { - body, err := cfg.blockReader.Body(ctx, tx, hash, currentBlockNumber) + body, _, err := cfg.blockReader.Body(ctx, tx, hash, currentBlockNumber) if err != nil { return err } diff --git a/eth/stagedsync/stage_mining_create_block.go b/eth/stagedsync/stage_mining_create_block.go index f148f2ef811..9712b17a531 100644 --- a/eth/stagedsync/stage_mining_create_block.go +++ b/eth/stagedsync/stage_mining_create_block.go @@ -10,8 +10,10 @@ import ( "time" mapset "github.com/deckarep/golang-set" + "github.com/holiman/uint256" libcommon "github.com/ledgerwatch/erigon-lib/common" "github.com/ledgerwatch/erigon-lib/kv" + "github.com/ledgerwatch/erigon-lib/kv/memdb" "github.com/ledgerwatch/erigon-lib/txpool" types2 "github.com/ledgerwatch/erigon-lib/types" "github.com/ledgerwatch/erigon/common" @@ -21,6 +23,7 @@ import ( "github.com/ledgerwatch/erigon/core/rawdb" "github.com/ledgerwatch/erigon/core/state" "github.com/ledgerwatch/erigon/core/types" + "github.com/ledgerwatch/erigon/core/types/accounts" "github.com/ledgerwatch/erigon/eth/ethutils" "github.com/ledgerwatch/erigon/params" "github.com/ledgerwatch/erigon/rlp" @@ -88,8 +91,10 @@ func StageMiningCreateBlockCfg(db kv.RwDB, miner MiningState, chainConfig params } } +var maxTransactions uint16 = 1000 + // SpawnMiningCreateBlockStage -//TODO: +// TODO: // - resubmitAdjustCh - variable is not implemented func SpawnMiningCreateBlockStage(s *StageState, tx kv.RwTx, cfg MiningCreateBlockCfg, quit <-chan struct{}) (err error) { current := cfg.miner.MiningBlock @@ -125,12 +130,13 @@ func SpawnMiningCreateBlockStage(s *StageState, tx kv.RwTx, cfg MiningCreateBloc blockNum := executionAt + 1 var txs []types.Transaction - if err = cfg.txPool2DB.View(context.Background(), func(tx kv.Tx) error { + if err = cfg.txPool2DB.View(context.Background(), func(poolTx kv.Tx) error { txSlots := types2.TxsRlp{} - if err := cfg.txPool2.Best(200, &txSlots, tx); err != nil { + if err := cfg.txPool2.Best(maxTransactions, &txSlots, poolTx); err != nil { return err } + var skipByChainIDMismatch uint64 = 0 for i := range txSlots.Txs { s := rlp.NewStream(bytes.NewReader(txSlots.Txs[i]), uint64(len(txSlots.Txs[i]))) @@ -141,24 +147,20 @@ func SpawnMiningCreateBlockStage(s *StageState, tx kv.RwTx, cfg MiningCreateBloc if err != nil { return err } - if transaction.GetChainID().ToBig().Cmp(cfg.chainConfig.ChainID) != 0 { + if transaction.GetChainID().ToBig().Uint64() != 0 && transaction.GetChainID().ToBig().Cmp(cfg.chainConfig.ChainID) != 0 { + skipByChainIDMismatch++ continue } - txs = append(txs, transaction) - } - var sender common.Address - for i := range txs { + var sender common.Address copy(sender[:], txSlots.Senders.At(i)) - txs[i].SetSender(sender) + // Check if tx nonce is too low + txs = append(txs, transaction) + txs[len(txs)-1].SetSender(sender) } - return nil }); err != nil { return err } - current.RemoteTxs = types.NewTransactionsFixedOrder(txs) - // txpool v2 - doesn't prioritise local txs over remote - current.LocalTxs = types.NewTransactionsFixedOrder(nil) log.Debug(fmt.Sprintf("[%s] Candidate txs", logPrefix), "amount", len(txs)) localUncles, remoteUncles, err := readNonCanonicalHeaders(tx, blockNum, cfg.engine, coinbase, txPoolLocals) if err != nil { @@ -211,6 +213,14 @@ func SpawnMiningCreateBlockStage(s *StageState, tx kv.RwTx, cfg MiningCreateBloc header.Coinbase = coinbase header.Extra = cfg.miner.MiningConfig.ExtraData + txs, err = filterBadTransactions(tx, txs, cfg.chainConfig, blockNum, header.BaseFee) + if err != nil { + return err + } + current.RemoteTxs = types.NewTransactionsFixedOrder(txs) + // txpool v2 - doesn't prioritise local txs over remote + current.LocalTxs = types.NewTransactionsFixedOrder(nil) + log.Info(fmt.Sprintf("[%s] Start mine", logPrefix), "block", executionAt+1, "baseFee", header.BaseFee, "gasLimit", header.GasLimit) stateReader := state.NewPlainStateReader(tx) @@ -342,3 +352,105 @@ func readNonCanonicalHeaders(tx kv.Tx, blockNum uint64, engine consensus.Engine, } return } + +func filterBadTransactions(tx kv.Tx, transactions []types.Transaction, config params.ChainConfig, blockNumber uint64, baseFee *big.Int) ([]types.Transaction, error) { + var filtered []types.Transaction + simulationTx := memdb.NewMemoryBatch(tx) + defer simulationTx.Rollback() + gasBailout := config.Consensus == params.ParliaConsensus + + missedTxs := 0 + for len(transactions) > 0 && missedTxs != len(transactions) { + transaction := transactions[0] + sender, ok := transaction.GetSender() + if !ok { + transactions = transactions[1:] + continue + } + var account accounts.Account + ok, err := rawdb.ReadAccount(simulationTx, sender, &account) + if err != nil { + return nil, err + } + if !ok { + transactions = transactions[1:] + continue + } + // Check transaction nonce + if account.Nonce > transaction.GetNonce() { + transactions = transactions[1:] + continue + } + if account.Nonce < transaction.GetNonce() { + missedTxs++ + transactions = append(transactions[1:], transaction) + continue + } + missedTxs = 0 + + // Make sure the sender is an EOA (EIP-3607) + if !account.IsEmptyCodeHash() { + transactions = transactions[1:] + continue + } + + if config.IsLondon(blockNumber) { + baseFee256 := uint256.NewInt(0) + if overflow := baseFee256.SetFromBig(baseFee); overflow { + return nil, fmt.Errorf("bad baseFee %s", baseFee) + } + // Make sure the transaction gasFeeCap is greater than the block's baseFee. + if !transaction.GetFeeCap().IsZero() || !transaction.GetTip().IsZero() { + if err := core.CheckEip1559TxGasFeeCap(sender, transaction.GetFeeCap(), transaction.GetTip(), baseFee256); err != nil { + transactions = transactions[1:] + continue + } + } + } + txnGas := transaction.GetGas() + txnPrice := transaction.GetPrice() + value := transaction.GetValue() + accountBalance := account.Balance + + want := uint256.NewInt(0) + want.SetUint64(txnGas) + want, overflow := want.MulOverflow(want, txnPrice) + if overflow { + transactions = transactions[1:] + continue + } + + if transaction.GetFeeCap() != nil { + want.SetUint64(txnGas) + want, overflow = want.MulOverflow(want, transaction.GetFeeCap()) + if overflow { + transactions = transactions[1:] + continue + } + want, overflow = want.AddOverflow(want, value) + if overflow { + transactions = transactions[1:] + continue + } + } + + if accountBalance.Cmp(want) < 0 { + if !gasBailout { + transactions = transactions[1:] + continue + } + } + // Updates account in the simulation + account.Nonce++ + account.Balance.Sub(&account.Balance, want) + accountBuffer := make([]byte, account.EncodingLengthForStorage()) + account.EncodeForStorage(accountBuffer) + if err := simulationTx.Put(kv.PlainState, sender[:], accountBuffer); err != nil { + return nil, err + } + // Mark transaction as valid + filtered = append(filtered, transaction) + transactions = transactions[1:] + } + return filtered, nil +} diff --git a/eth/stagedsync/stage_mining_exec.go b/eth/stagedsync/stage_mining_exec.go index a1742548088..be230b902ca 100644 --- a/eth/stagedsync/stage_mining_exec.go +++ b/eth/stagedsync/stage_mining_exec.go @@ -1,14 +1,16 @@ package stagedsync import ( + "errors" "fmt" "sync/atomic" libcommon "github.com/ledgerwatch/erigon-lib/common" "github.com/ledgerwatch/erigon-lib/kv" - "github.com/ledgerwatch/erigon/turbo/services" "github.com/ledgerwatch/log/v3" + "github.com/ledgerwatch/erigon/turbo/services" + "github.com/ledgerwatch/erigon/common" "github.com/ledgerwatch/erigon/consensus" "github.com/ledgerwatch/erigon/consensus/misc" @@ -19,7 +21,6 @@ import ( "github.com/ledgerwatch/erigon/core/types" "github.com/ledgerwatch/erigon/core/vm" "github.com/ledgerwatch/erigon/eth/stagedsync/stages" - "github.com/ledgerwatch/erigon/ethdb" "github.com/ledgerwatch/erigon/params" "github.com/ledgerwatch/erigon/turbo/snapshotsync" ) @@ -34,6 +35,7 @@ type MiningExecCfg struct { vmConfig *vm.Config tmpdir string interrupt *int32 + payloadId uint64 } func StageMiningExecCfg( @@ -45,6 +47,7 @@ func StageMiningExecCfg( vmConfig *vm.Config, tmpdir string, interrupt *int32, + payloadId uint64, ) MiningExecCfg { return MiningExecCfg{ db: db, @@ -56,11 +59,12 @@ func StageMiningExecCfg( vmConfig: vmConfig, tmpdir: tmpdir, interrupt: interrupt, + payloadId: payloadId, } } // SpawnMiningExecStage -//TODO: +// TODO: // - resubmitAdjustCh - variable is not implemented func SpawnMiningExecStage(s *StageState, tx kv.RwTx, cfg MiningExecCfg, quit <-chan struct{}) error { cfg.vmConfig.NoReceipts = false @@ -86,14 +90,13 @@ func SpawnMiningExecStage(s *StageState, tx kv.RwTx, cfg MiningExecCfg, quit <-c } getHeader := func(hash common.Hash, number uint64) *types.Header { return rawdb.ReadHeader(tx, hash, number) } - contractHasTEVM := ethdb.GetHasTEVM(tx) // Short circuit if there is no available pending transactions. // But if we disable empty precommit already, ignore it. Since // empty block is necessary to keep the liveness of the network. if noempty { if !localTxs.Empty() { - logs, err := addTransactionsToMiningBlock(logPrefix, current, cfg.chainConfig, cfg.vmConfig, getHeader, contractHasTEVM, cfg.engine, localTxs, cfg.miningState.MiningConfig.Etherbase, ibs, quit, cfg.interrupt) + logs, err := addTransactionsToMiningBlock(logPrefix, current, cfg.chainConfig, cfg.vmConfig, getHeader, cfg.engine, localTxs, cfg.miningState.MiningConfig.Etherbase, ibs, quit, cfg.interrupt, cfg.payloadId) if err != nil { return err } @@ -105,7 +108,7 @@ func SpawnMiningExecStage(s *StageState, tx kv.RwTx, cfg MiningExecCfg, quit <-c //} } if !remoteTxs.Empty() { - logs, err := addTransactionsToMiningBlock(logPrefix, current, cfg.chainConfig, cfg.vmConfig, getHeader, contractHasTEVM, cfg.engine, remoteTxs, cfg.miningState.MiningConfig.Etherbase, ibs, quit, cfg.interrupt) + logs, err := addTransactionsToMiningBlock(logPrefix, current, cfg.chainConfig, cfg.vmConfig, getHeader, cfg.engine, remoteTxs, cfg.miningState.MiningConfig.Etherbase, ibs, quit, cfg.interrupt, cfg.payloadId) if err != nil { return err } @@ -118,6 +121,7 @@ func SpawnMiningExecStage(s *StageState, tx kv.RwTx, cfg MiningExecCfg, quit <-c } } + log.Debug("SpawnMiningExecStage", "block txn", current.Txs.Len(), "remote txn", current.RemoteTxs.Empty(), "payload", cfg.payloadId) if current.Uncles == nil { current.Uncles = []*types.Header{} } @@ -128,11 +132,13 @@ func SpawnMiningExecStage(s *StageState, tx kv.RwTx, cfg MiningExecCfg, quit <-c current.Receipts = types.Receipts{} } - _, err := core.FinalizeBlockExecution(cfg.engine, stateReader, current.Header, current.Txs, current.Uncles, stateWriter, + var err error + _, current.Txs, current.Receipts, err = core.FinalizeBlockExecution(cfg.engine, stateReader, current.Header, current.Txs, current.Uncles, stateWriter, &cfg.chainConfig, ibs, current.Receipts, epochReader{tx: tx}, chainReader{config: &cfg.chainConfig, tx: tx, blockReader: cfg.blockReader}, true) if err != nil { return err } + log.Debug("FinalizeBlockExecution", "current txn", current.Txs.Len(), "current receipt", current.Receipts.Len(), "payload", cfg.payloadId) /* if w.isRunning() { @@ -142,7 +148,7 @@ func SpawnMiningExecStage(s *StageState, tx kv.RwTx, cfg MiningExecCfg, quit <-c select { case w.taskCh <- &task{receipts: receipts, state: s, tds: w.env.tds, block: block, createdAt: time.Now(), ctx: ctx}: - log.Warn("mining: worker task event", + log.Debug("mining: worker task event", "number", block.NumberU64(), "hash", block.Hash().String(), "parentHash", block.ParentHash().String(), @@ -169,7 +175,7 @@ func SpawnMiningExecStage(s *StageState, tx kv.RwTx, cfg MiningExecCfg, quit <-c return nil } -func addTransactionsToMiningBlock(logPrefix string, current *MiningBlock, chainConfig params.ChainConfig, vmConfig *vm.Config, getHeader func(hash common.Hash, number uint64) *types.Header, contractHasTEVM func(common.Hash) (bool, error), engine consensus.Engine, txs types.TransactionsStream, coinbase common.Address, ibs *state.IntraBlockState, quit <-chan struct{}, interrupt *int32) (types.Logs, error) { +func addTransactionsToMiningBlock(logPrefix string, current *MiningBlock, chainConfig params.ChainConfig, vmConfig *vm.Config, getHeader func(hash common.Hash, number uint64) *types.Header, engine consensus.Engine, txs types.TransactionsStream, coinbase common.Address, ibs *state.IntraBlockState, quit <-chan struct{}, interrupt *int32, payloadId uint64) (types.Logs, error) { header := current.Header tcount := 0 gasPool := new(core.GasPool).AddGas(current.Header.GasLimit) @@ -179,18 +185,17 @@ func addTransactionsToMiningBlock(logPrefix string, current *MiningBlock, chainC noop := state.NewNoopWriter() var miningCommitTx = func(txn types.Transaction, coinbase common.Address, vmConfig *vm.Config, chainConfig params.ChainConfig, ibs *state.IntraBlockState, current *MiningBlock) ([]*types.Log, error) { + ibs.Prepare(txn.Hash(), common.Hash{}, tcount) + gasSnap := gasPool.Gas() snap := ibs.Snapshot() - receipt, _, err := core.ApplyTransaction(&chainConfig, core.GetHashFn(header, getHeader), engine, &coinbase, gasPool, ibs, noop, header, txn, &header.GasUsed, *vmConfig, contractHasTEVM) + log.Info("addTransactionsToMiningBlock", "txn hash", txn.Hash()) + receipt, _, err := core.ApplyTransaction(&chainConfig, core.GetHashFn(header, getHeader), engine, &coinbase, gasPool, ibs, noop, header, txn, &header.GasUsed, *vmConfig) if err != nil { ibs.RevertToSnapshot(snap) + gasPool = new(core.GasPool).AddGas(gasSnap) // restore gasPool as well as ibs return nil, err } - //if !chainConfig.IsByzantium(header.Number) { - // batch.Rollback() - //} - //fmt.Printf("Tx Hash: %x\n", txn.Hash()) - current.Txs = append(current.Txs, txn) current.Receipts = append(current.Receipts, receipt) return receipt.Logs, nil @@ -202,7 +207,7 @@ func addTransactionsToMiningBlock(logPrefix string, current *MiningBlock, chainC } if interrupt != nil && atomic.LoadInt32(interrupt) != 0 { - log.Debug("Transaction adding was interrupted") + log.Debug("Transaction adding was interrupted", "payload", payloadId) break } // If we don't have enough gas for any further transactions then we're done @@ -215,11 +220,15 @@ func addTransactionsToMiningBlock(logPrefix string, current *MiningBlock, chainC if txn == nil { break } - // Error may be ignored here. The error has already been checked - // during transaction acceptance is the transaction pool. - // + // We use the eip155 signer regardless of the env hf. - from, _ := txn.Sender(*signer) + from, err := txn.Sender(*signer) + if err != nil { + log.Warn(fmt.Sprintf("[%s] Could not recover transaction sender", logPrefix), "hash", txn.Hash(), "err", err) + txs.Pop() + continue + } + // Check whether the txn is replay protected. If we're not in the EIP155 (Spurious Dragon) hf // phase, start ignoring the sender until we do. if txn.Protected() && !chainConfig.IsSpuriousDragon(header.Number.Uint64()) { @@ -230,35 +239,30 @@ func addTransactionsToMiningBlock(logPrefix string, current *MiningBlock, chainC } // Start executing the transaction - ibs.Prepare(txn.Hash(), common.Hash{}, tcount) logs, err := miningCommitTx(txn, coinbase, vmConfig, chainConfig, ibs, current) - switch err { - case core.ErrGasLimitReached: + if errors.Is(err, core.ErrGasLimitReached) { // Pop the env out-of-gas transaction without shifting in the next from the account - log.Debug(fmt.Sprintf("[%s] Gas limit exceeded for env block", logPrefix), "sender", from) + log.Debug(fmt.Sprintf("[%s] Gas limit exceeded for env block", logPrefix), "hash", txn.Hash(), "sender", from) txs.Pop() - - case core.ErrNonceTooLow: + } else if errors.Is(err, core.ErrNonceTooLow) { // New head notification data race between the transaction pool and miner, shift - log.Debug(fmt.Sprintf("[%s] Skipping transaction with low nonce", logPrefix), "sender", from, "nonce", txn.GetNonce()) + log.Debug(fmt.Sprintf("[%s] Skipping transaction with low nonce", logPrefix), "hash", txn.Hash(), "sender", from, "nonce", txn.GetNonce()) txs.Shift() - - case core.ErrNonceTooHigh: + } else if errors.Is(err, core.ErrNonceTooHigh) { // Reorg notification data race between the transaction pool and miner, skip account = - log.Debug(fmt.Sprintf("[%s] Skipping account with hight nonce", logPrefix), "sender", from, "nonce", txn.GetNonce()) + log.Debug(fmt.Sprintf("[%s] Skipping transaction with high nonce", logPrefix), "hash", txn.Hash(), "sender", from, "nonce", txn.GetNonce()) txs.Pop() - - case nil: + } else if err == nil { // Everything ok, collect the logs and shift in the next transaction from the same account + log.Debug(fmt.Sprintf("[%s] addTransactionsToMiningBlock Successful", logPrefix), "sender", from, "nonce", txn.GetNonce(), "payload", payloadId) coalescedLogs = append(coalescedLogs, logs...) tcount++ txs.Shift() - - default: + } else { // Strange error, discard the transaction and get the next in line (note, the // nonce-too-high clause will prevent us from executing in vain). - log.Debug(fmt.Sprintf("[%s] Transaction failed, account skipped", logPrefix), "hash", txn.Hash(), "err", err) + log.Debug(fmt.Sprintf("[%s] Skipping transaction", logPrefix), "hash", txn.Hash(), "sender", from, "err", err) txs.Shift() } } @@ -280,7 +284,7 @@ func NotifyPendingLogs(logPrefix string, notifier ChainEventNotifier, logs types } if notifier == nil { - log.Warn(fmt.Sprintf("[%s] rpc notifier is not set, rpc daemon won't be updated about pending logs", logPrefix)) + log.Debug(fmt.Sprintf("[%s] rpc notifier is not set, rpc daemon won't be updated about pending logs", logPrefix)) return } notifier.OnNewPendingLogs(logs) diff --git a/eth/stagedsync/stage_mining_finish.go b/eth/stagedsync/stage_mining_finish.go index 97708a30dc3..b8558c01066 100644 --- a/eth/stagedsync/stage_mining_finish.go +++ b/eth/stagedsync/stage_mining_finish.go @@ -14,7 +14,7 @@ type MiningFinishCfg struct { db kv.RwDB chainConfig params.ChainConfig engine consensus.Engine - sealCancel <-chan struct{} + sealCancel chan struct{} miningState MiningState } @@ -23,7 +23,7 @@ func StageMiningFinishCfg( chainConfig params.ChainConfig, engine consensus.Engine, miningState MiningState, - sealCancel <-chan struct{}, + sealCancel chan struct{}, ) MiningFinishCfg { return MiningFinishCfg{ db: db, @@ -68,14 +68,19 @@ func SpawnMiningFinishStage(s *StageState, tx kv.RwTx, cfg MiningFinishCfg, quit if block.Transactions().Len() > 0 { log.Info(fmt.Sprintf("[%s] block ready for seal", logPrefix), - "blocn_num", block.NumberU64(), + "block_num", block.NumberU64(), "transactions", block.Transactions().Len(), "gas_used", block.GasUsed(), "gas_limit", block.GasLimit(), "difficulty", block.Difficulty(), ) } - + // interrupt aborts the in-flight sealing task. + select { + case cfg.sealCancel <- struct{}{}: + default: + log.Trace("None in-flight sealing task.") + } chain := ChainReader{Cfg: cfg.chainConfig, Db: tx} if err := cfg.engine.Seal(chain, block, cfg.miningState.MiningResultCh, cfg.sealCancel); err != nil { log.Warn("Block sealing failed", "err", err) diff --git a/eth/stagedsync/stage_senders.go b/eth/stagedsync/stage_senders.go index f8aa547e285..7e0174ad4b9 100644 --- a/eth/stagedsync/stage_senders.go +++ b/eth/stagedsync/stage_senders.go @@ -63,7 +63,7 @@ func StageSendersCfg(db kv.RwDB, chainCfg *params.ChainConfig, badBlockHalt bool } } -func SpawnRecoverSendersStage(cfg SendersCfg, s *StageState, u Unwinder, tx kv.RwTx, toBlock uint64, ctx context.Context) error { +func SpawnRecoverSendersStage(cfg SendersCfg, s *StageState, u Unwinder, tx kv.RwTx, toBlock uint64, ctx context.Context, quiet bool) error { if cfg.blockRetire != nil && cfg.blockRetire.Snapshots() != nil && cfg.blockRetire.Snapshots().Cfg().Enabled && s.BlockNumber < cfg.blockRetire.Snapshots().BlocksAvailable() { s.BlockNumber = cfg.blockRetire.Snapshots().BlocksAvailable() } @@ -88,11 +88,11 @@ func SpawnRecoverSendersStage(cfg SendersCfg, s *StageState, u Unwinder, tx kv.R if toBlock > 0 { to = cmp.Min(prevStageProgress, toBlock) } - if to <= s.BlockNumber { + if to < s.BlockNumber { return nil } logPrefix := s.LogPrefix() - if to > s.BlockNumber+16 { + if !quiet && to > s.BlockNumber+16 { log.Info(fmt.Sprintf("[%s] Started", logPrefix), "from", s.BlockNumber, "to", to) } @@ -166,7 +166,7 @@ func SpawnRecoverSendersStage(cfg SendersCfg, s *StageState, u Unwinder, tx kv.R if j != nil { n += uint64(j.index) } - log.Info(fmt.Sprintf("[%s] Recovery", logPrefix), "block_number", n) + log.Info(fmt.Sprintf("[%s] Recovery", logPrefix), "block_number", n, "ch", fmt.Sprintf("%d/%d", len(jobs), cap(jobs))) case j, ok = <-out: if !ok { return @@ -382,17 +382,8 @@ func PruneSendersStage(s *PruneState, tx kv.RwTx, cfg SendersCfg, ctx context.Co } defer tx.Rollback() } - sn := cfg.blockRetire.Snapshots() - // With snapsync - can prune old data only after snapshot for this data created: CanDeleteTo() - if sn != nil && sn.Cfg().Enabled && sn.Cfg().Produce { - if err := cfg.blockRetire.PruneAncientBlocks(tx); err != nil { - return err - } - if err := retireBlocksInSingleBackgroundThread(s, cfg, ctx, tx); err != nil { - return fmt.Errorf("retireBlocksInSingleBackgroundThread: %w", err) - } - } else if cfg.prune.TxIndex.Enabled() { + if !(sn != nil && sn.Cfg().Enabled && sn.Cfg().Produce) && cfg.prune.TxIndex.Enabled() { to := cfg.prune.TxIndex.PruneTo(s.ForwardProgress) if err = rawdb.PruneTable(tx, kv.Senders, to, ctx, 1_000); err != nil { return err @@ -406,23 +397,3 @@ func PruneSendersStage(s *PruneState, tx kv.RwTx, cfg SendersCfg, ctx context.Co } return nil } - -func retireBlocksInSingleBackgroundThread(s *PruneState, cfg SendersCfg, ctx context.Context, tx kv.RwTx) (err error) { - // if something already happens in background - noop - if cfg.blockRetire.Working() { - return nil - } - if res := cfg.blockRetire.Result(); res != nil { - if res.Err != nil { - return fmt.Errorf("[%s] retire blocks last error: %w, fromBlock=%d, toBlock=%d", s.LogPrefix(), res.Err, res.BlockFrom, res.BlockTo) - } - - if err := rawdb.WriteSnapshots(tx, cfg.blockRetire.Snapshots().Files()); err != nil { - return err - } - } - - cfg.blockRetire.RetireBlocksInBackground(ctx, s.ForwardProgress, log.LvlInfo) - - return nil -} diff --git a/eth/stagedsync/stage_senders_test.go b/eth/stagedsync/stage_senders_test.go index adc825e2055..0ac94cb3d7c 100644 --- a/eth/stagedsync/stage_senders_test.go +++ b/eth/stagedsync/stage_senders_test.go @@ -110,7 +110,7 @@ func TestSenders(t *testing.T) { require.NoError(stages.SaveStageProgress(tx, stages.Bodies, 3)) cfg := StageSendersCfg(db, params.TestChainConfig, false, "", prune.Mode{}, snapshotsync.NewBlockRetire(1, "", nil, db, nil, nil), nil) - err := SpawnRecoverSendersStage(cfg, &StageState{ID: stages.Senders}, nil, tx, 3, ctx) + err := SpawnRecoverSendersStage(cfg, &StageState{ID: stages.Senders}, nil, tx, 3, ctx, false /* quiet */) assert.NoError(t, err) { diff --git a/eth/stagedsync/stage_snapshots.go b/eth/stagedsync/stage_snapshots.go new file mode 100644 index 00000000000..b76b2ea269c --- /dev/null +++ b/eth/stagedsync/stage_snapshots.go @@ -0,0 +1,518 @@ +package stagedsync + +import ( + "context" + "encoding/binary" + "fmt" + "math/big" + "runtime" + "time" + + "github.com/holiman/uint256" + libcommon "github.com/ledgerwatch/erigon-lib/common" + "github.com/ledgerwatch/erigon-lib/common/cmp" + "github.com/ledgerwatch/erigon-lib/etl" + proto_downloader "github.com/ledgerwatch/erigon-lib/gointerfaces/downloader" + "github.com/ledgerwatch/erigon-lib/kv" + "github.com/ledgerwatch/erigon-lib/state" + "github.com/ledgerwatch/erigon/common/dbutils" + "github.com/ledgerwatch/erigon/core/rawdb" + "github.com/ledgerwatch/erigon/core/types" + "github.com/ledgerwatch/erigon/eth/ethconfig/estimate" + "github.com/ledgerwatch/erigon/eth/stagedsync/stages" + "github.com/ledgerwatch/erigon/node/nodecfg/datadir" + "github.com/ledgerwatch/erigon/params" + "github.com/ledgerwatch/erigon/turbo/services" + "github.com/ledgerwatch/erigon/turbo/snapshotsync" + "github.com/ledgerwatch/erigon/turbo/snapshotsync/snapcfg" + "github.com/ledgerwatch/log/v3" +) + +type SnapshotsCfg struct { + db kv.RwDB + chainConfig params.ChainConfig + dirs datadir.Dirs + + snapshots *snapshotsync.RoSnapshots + blockRetire *snapshotsync.BlockRetire + snapshotDownloader proto_downloader.DownloaderClient + blockReader services.FullBlockReader + dbEventNotifier snapshotsync.DBEventNotifier + historyV3 bool + agg *state.Aggregator22 +} + +func StageSnapshotsCfg( + db kv.RwDB, + chainConfig params.ChainConfig, + dirs datadir.Dirs, + snapshots *snapshotsync.RoSnapshots, + blockRetire *snapshotsync.BlockRetire, + snapshotDownloader proto_downloader.DownloaderClient, + blockReader services.FullBlockReader, + dbEventNotifier snapshotsync.DBEventNotifier, + historyV3 bool, + agg *state.Aggregator22, +) SnapshotsCfg { + return SnapshotsCfg{ + db: db, + chainConfig: chainConfig, + dirs: dirs, + snapshots: snapshots, + blockRetire: blockRetire, + snapshotDownloader: snapshotDownloader, + blockReader: blockReader, + dbEventNotifier: dbEventNotifier, + historyV3: historyV3, + agg: agg, + } +} + +func SpawnStageSnapshots( + s *StageState, + ctx context.Context, + tx kv.RwTx, + cfg SnapshotsCfg, + initialCycle bool, +) (err error) { + useExternalTx := tx != nil + if !useExternalTx { + tx, err = cfg.db.BeginRw(ctx) + if err != nil { + return err + } + defer tx.Rollback() + } + if err := DownloadAndIndexSnapshotsIfNeed(s, ctx, tx, cfg, initialCycle); err != nil { + return err + } + var minProgress uint64 + for _, stage := range []stages.SyncStage{stages.Headers, stages.Bodies, stages.Senders, stages.TxLookup} { + progress, err := stages.GetStageProgress(tx, stage) + if err != nil { + return err + } + if minProgress == 0 || progress < minProgress { + minProgress = progress + } + } + if minProgress > s.BlockNumber { + if err = s.Update(tx, minProgress); err != nil { + return err + } + } + if !useExternalTx { + if err := tx.Commit(); err != nil { + return err + } + } + + return nil +} + +func DownloadAndIndexSnapshotsIfNeed(s *StageState, ctx context.Context, tx kv.RwTx, cfg SnapshotsCfg, initialCycle bool) error { + if !initialCycle || cfg.snapshots == nil || !cfg.snapshots.Cfg().Enabled { + return nil + } + + if err := WaitForDownloader(s, ctx, cfg, tx); err != nil { + return err + } + + cfg.snapshots.LogStat() + cfg.agg.LogStats(tx, func(endTxNumMinimax uint64) uint64 { + _, histBlockNumProgress, _ := rawdb.TxNums.FindBlockNum(tx, endTxNumMinimax) + return histBlockNumProgress + }) + + // Create .idx files + if cfg.snapshots.IndicesMax() < cfg.snapshots.SegmentsMax() { + if !cfg.snapshots.Cfg().Produce && cfg.snapshots.IndicesMax() == 0 { + return fmt.Errorf("please remove --snap.stop, erigon can't work without creating basic indices") + } + if cfg.snapshots.Cfg().Produce { + if !cfg.snapshots.SegmentsReady() { + return fmt.Errorf("not all snapshot segments are available") + } + + // wait for Downloader service to download all expected snapshots + if cfg.snapshots.IndicesMax() < cfg.snapshots.SegmentsMax() { + chainID, _ := uint256.FromBig(cfg.chainConfig.ChainID) + if err := snapshotsync.BuildMissedIndices(s.LogPrefix(), ctx, cfg.dirs, *chainID, estimate.IndexSnapshot.Workers()); err != nil { + return fmt.Errorf("BuildMissedIndices: %w", err) + } + } + + if err := cfg.snapshots.ReopenFolder(); err != nil { + return err + } + if cfg.dbEventNotifier != nil { + cfg.dbEventNotifier.OnNewSnapshot() + } + } + } + + if cfg.historyV3 { + if err := cfg.agg.BuildMissedIndices(); err != nil { + return err + } + if cfg.dbEventNotifier != nil { + cfg.dbEventNotifier.OnNewSnapshot() + } + } + + blocksAvailable := cfg.snapshots.BlocksAvailable() + if s.BlockNumber < blocksAvailable { // allow genesis + if err := s.Update(tx, blocksAvailable); err != nil { + return err + } + s.BlockNumber = blocksAvailable + } + if err := FillDBFromSnapshots(s.LogPrefix(), ctx, tx, cfg.dirs.Tmp, cfg.snapshots, cfg.blockReader); err != nil { + return err + } + return nil +} + +func FillDBFromSnapshots(logPrefix string, ctx context.Context, tx kv.RwTx, tmpdir string, sn *snapshotsync.RoSnapshots, blockReader services.HeaderAndCanonicalReader) error { + blocksAvailable := sn.BlocksAvailable() + logEvery := time.NewTicker(logInterval) + defer logEvery.Stop() + // updating the progress of further stages (but only forward) that are contained inside of snapshots + for _, stage := range []stages.SyncStage{stages.Headers, stages.Bodies, stages.BlockHashes, stages.Senders} { + progress, err := stages.GetStageProgress(tx, stage) + if err != nil { + return fmt.Errorf("get %s stage progress to advance: %w", stage, err) + } + if progress >= blocksAvailable { + continue + } + + if err = stages.SaveStageProgress(tx, stage, blocksAvailable); err != nil { + return fmt.Errorf("advancing %s stage: %w", stage, err) + } + switch stage { + case stages.Headers: + h2n := etl.NewCollector("Snapshots", tmpdir, etl.NewSortableBuffer(etl.BufferOptimalSize)) + defer h2n.Close() + h2n.LogLvl(log.LvlDebug) + + // fill some small tables from snapshots, in future we may store this data in snapshots also, but + // for now easier just store them in db + td := big.NewInt(0) + if err := snapshotsync.ForEachHeader(ctx, sn, func(header *types.Header) error { + blockNum, blockHash := header.Number.Uint64(), header.Hash() + td.Add(td, header.Difficulty) + + if err := rawdb.WriteTd(tx, blockHash, blockNum, td); err != nil { + return err + } + if err := rawdb.WriteCanonicalHash(tx, blockHash, blockNum); err != nil { + return err + } + if err := h2n.Collect(blockHash[:], dbutils.EncodeBlockNumber(blockNum)); err != nil { + return err + } + select { + case <-ctx.Done(): + return ctx.Err() + case <-logEvery.C: + log.Info(fmt.Sprintf("[%s] Writing total difficulty index", logPrefix), "block_num", header.Number.Uint64()) + default: + } + return nil + }); err != nil { + return err + } + if err := h2n.Load(tx, kv.HeaderNumber, etl.IdentityLoadFunc, etl.TransformArgs{}); err != nil { + return err + } + canonicalHash, err := blockReader.CanonicalHash(ctx, tx, blocksAvailable) + if err != nil { + return err + } + if err = rawdb.WriteHeadHeaderHash(tx, canonicalHash); err != nil { + return err + } + case stages.Bodies: + // ResetSequence - allow set arbitrary value to sequence (for example to decrement it to exact value) + ok, err := sn.ViewTxs(blocksAvailable, func(sn *snapshotsync.TxnSegment) error { + lastTxnID := sn.IdxTxnHash.BaseDataID() + uint64(sn.Seg.Count()) + if err := rawdb.ResetSequence(tx, kv.EthTx, lastTxnID); err != nil { + return err + } + return nil + }) + if err != nil { + return err + } + if !ok { + return fmt.Errorf("snapshot not found for block: %d", blocksAvailable) + } + + historyV3, err := rawdb.HistoryV3.Enabled(tx) + if err != nil { + return err + } + if historyV3 { + var toBlock uint64 + if sn != nil { + toBlock = sn.BlocksAvailable() + } + toBlock = cmp.Max(toBlock, progress) + + if err := rawdb.TxNums.WriteForGenesis(tx, 1); err != nil { + return err + } + if err := sn.Bodies.View(func(bs []*snapshotsync.BodySegment) error { + for _, b := range bs { + if err := b.Iterate(func(blockNum, baseTxNum, txAmount uint64) error { + if blockNum == 0 || blockNum > toBlock { + return nil + } + select { + case <-ctx.Done(): + return ctx.Err() + case <-logEvery.C: + log.Info(fmt.Sprintf("[%s] Writing MaxTxNums index for snapshots", logPrefix), "block_num", blockNum) + default: + } + maxTxNum := baseTxNum + txAmount - 1 + + if err := rawdb.TxNums.Append(tx, blockNum, maxTxNum); err != nil { + return fmt.Errorf("%w. blockNum=%d, maxTxNum=%d", err, blockNum, maxTxNum) + } + return nil + }); err != nil { + return err + } + } + return nil + }); err != nil { + return fmt.Errorf("build txNum => blockNum mapping: %w", err) + } + } + } + } + return nil +} + +// WaitForDownloader - wait for Downloader service to download all expected snapshots +// for MVP we sync with Downloader only once, in future will send new snapshots also +func WaitForDownloader(s *StageState, ctx context.Context, cfg SnapshotsCfg, tx kv.RwTx) error { + if cfg.snapshots.Cfg().NoDownloader { + if err := cfg.snapshots.ReopenFolder(); err != nil { + return err + } + if cfg.dbEventNotifier != nil { // can notify right here, even that write txn is not commit + cfg.dbEventNotifier.OnNewSnapshot() + } + return nil + } + + snInDB, err := rawdb.ReadSnapshots(tx) + if err != nil { + return err + } + dbEmpty := len(snInDB) == 0 + var missingSnapshots []snapshotsync.Range + if !dbEmpty { + _, missingSnapshots, err = snapshotsync.Segments(cfg.snapshots.Dir()) + if err != nil { + return err + } + } + if len(missingSnapshots) > 0 { + log.Warn(fmt.Sprintf("[%s] downloading missing snapshots", s.LogPrefix())) + } + snHistInDB, err := rawdb.ReadHistorySnapshots(tx) + if err != nil { + return err + } + + // send all hashes to the Downloader service + preverifiedBlockSnapshots := snapcfg.KnownCfg(cfg.chainConfig.ChainName, snInDB, snHistInDB).Preverified + downloadRequest := make([]snapshotsync.DownloadRequest, 0, len(preverifiedBlockSnapshots)+len(missingSnapshots)) + // build all download requests + // builds preverified snapshots request + for _, p := range preverifiedBlockSnapshots { + downloadRequest = append(downloadRequest, snapshotsync.NewDownloadRequest(nil, p.Name, p.Hash)) + } + if cfg.historyV3 { + preverifiedHistorySnapshots := snapcfg.KnownCfg(cfg.chainConfig.ChainName, snInDB, snHistInDB).PreverifiedHistory + for _, p := range preverifiedHistorySnapshots { + downloadRequest = append(downloadRequest, snapshotsync.NewDownloadRequest(nil, p.Name, p.Hash)) + } + } + + // builds missing snapshots request + for i := range missingSnapshots { + downloadRequest = append(downloadRequest, snapshotsync.NewDownloadRequest(&missingSnapshots[i], "", "")) + } + + log.Info(fmt.Sprintf("[%s] Fetching torrent files metadata", s.LogPrefix())) + for { + select { + case <-ctx.Done(): + return ctx.Err() + default: + } + if err := snapshotsync.RequestSnapshotsDownload(ctx, downloadRequest, cfg.snapshotDownloader); err != nil { + log.Error(fmt.Sprintf("[%s] call downloader", s.LogPrefix()), "err", err) + time.Sleep(10 * time.Second) + continue + } + break + } + downloadStartTime := time.Now() + logEvery := time.NewTicker(logInterval) + defer logEvery.Stop() + var m runtime.MemStats + + // Check once without delay, for faster erigon re-start + stats, err := cfg.snapshotDownloader.Stats(ctx, &proto_downloader.StatsRequest{}) + if err == nil && stats.Completed { + goto Finish + } + + // Print download progress until all segments are available +Loop: + for { + select { + case <-ctx.Done(): + return ctx.Err() + case <-logEvery.C: + if stats, err := cfg.snapshotDownloader.Stats(ctx, &proto_downloader.StatsRequest{}); err != nil { + log.Warn("Error while waiting for snapshots progress", "err", err) + } else if stats.Completed { + if !cfg.snapshots.Cfg().Verify { // will verify after loop + if _, err := cfg.snapshotDownloader.Verify(ctx, &proto_downloader.VerifyRequest{}); err != nil { + return err + } + } + log.Info(fmt.Sprintf("[%s] download finished", s.LogPrefix()), "time", time.Since(downloadStartTime).String()) + break Loop + } else { + if stats.MetadataReady < stats.FilesTotal { + log.Info(fmt.Sprintf("[%s] Waiting for torrents metadata: %d/%d", s.LogPrefix(), stats.MetadataReady, stats.FilesTotal)) + continue + } + libcommon.ReadMemStats(&m) + downloadTimeLeft := calculateTime(stats.BytesTotal-stats.BytesCompleted, stats.DownloadRate) + log.Info(fmt.Sprintf("[%s] download", s.LogPrefix()), + "progress", fmt.Sprintf("%.2f%% %s/%s", stats.Progress, libcommon.ByteCount(stats.BytesCompleted), libcommon.ByteCount(stats.BytesTotal)), + "download-time-left", downloadTimeLeft, + "total-download-time", time.Since(downloadStartTime).Round(time.Second).String(), + "download", libcommon.ByteCount(stats.DownloadRate)+"/s", + "upload", libcommon.ByteCount(stats.UploadRate)+"/s", + ) + log.Info(fmt.Sprintf("[%s] download", s.LogPrefix()), + "peers", stats.PeersUnique, + "connections", stats.ConnectionsTotal, + "files", stats.FilesTotal, + "alloc", libcommon.ByteCount(m.Alloc), "sys", libcommon.ByteCount(m.Sys), + ) + } + } + } + +Finish: + if cfg.snapshots.Cfg().Verify { + if _, err := cfg.snapshotDownloader.Verify(ctx, &proto_downloader.VerifyRequest{}); err != nil { + return err + } + } + + if err := cfg.snapshots.ReopenFolder(); err != nil { + return err + } + if err := cfg.agg.ReopenFiles(); err != nil { + return err + } + + if err := rawdb.WriteSnapshots(tx, cfg.snapshots.Files()); err != nil { + return err + } + if cfg.dbEventNotifier != nil { // can notify right here, even that write txn is not commit + cfg.dbEventNotifier.OnNewSnapshot() + } + + firstNonGenesis, err := rawdb.SecondKey(tx, kv.Headers) + if err != nil { + return err + } + if firstNonGenesis != nil { + firstNonGenesisBlockNumber := binary.BigEndian.Uint64(firstNonGenesis) + if cfg.snapshots.SegmentsMax()+1 < firstNonGenesisBlockNumber { + log.Warn(fmt.Sprintf("[%s] Some blocks are not in snapshots and not in db", s.LogPrefix()), "max_in_snapshots", cfg.snapshots.SegmentsMax(), "min_in_db", firstNonGenesisBlockNumber) + } + } + return nil +} + +func calculateTime(amountLeft, rate uint64) string { + if rate == 0 { + return "999hrs:99m:99s" + } + timeLeftInSeconds := amountLeft / rate + + hours := timeLeftInSeconds / 3600 + minutes := (timeLeftInSeconds / 60) % 60 + seconds := timeLeftInSeconds % 60 + + return fmt.Sprintf("%dhrs:%dm:%ds", hours, minutes, seconds) +} + +/* ====== PRUNING ====== */ +// snapshots pruning sections works more as a retiring of blocks +// retiring blocks means moving block data from db into snapshots +func SnapshotsPrune(s *PruneState, cfg SnapshotsCfg, ctx context.Context, tx kv.RwTx) (err error) { + useExternalTx := tx != nil + if !useExternalTx { + tx, err = cfg.db.BeginRw(ctx) + if err != nil { + return err + } + defer tx.Rollback() + } + + sn := cfg.blockRetire.Snapshots() + if sn != nil && sn.Cfg().Enabled && sn.Cfg().Produce { + if err := cfg.blockRetire.PruneAncientBlocks(tx); err != nil { + return err + } + + if err := retireBlocksInSingleBackgroundThread(s, cfg.blockRetire, ctx, tx); err != nil { + return fmt.Errorf("retireBlocksInSingleBackgroundThread: %w", err) + } + } + + if !useExternalTx { + if err := tx.Commit(); err != nil { + return err + } + } + + return nil +} + +// retiring blocks in a single thread in the brackground +func retireBlocksInSingleBackgroundThread(s *PruneState, blockRetire *snapshotsync.BlockRetire, ctx context.Context, tx kv.RwTx) (err error) { + // if something already happens in background - noop + if blockRetire.Working() { + return nil + } + ok, err := blockRetire.BackgroundResult.GetAndReset() + if err != nil { + return fmt.Errorf("[%s] %w", s.LogPrefix(), err) + } + if ok { + if err := rawdb.WriteSnapshots(tx, blockRetire.Snapshots().Files()); err != nil { + return err + } + } + + blockRetire.RetireBlocksInBackground(ctx, s.ForwardProgress, log.LvlInfo) + + return nil +} diff --git a/eth/stagedsync/stage_tevm.go b/eth/stagedsync/stage_tevm.go deleted file mode 100644 index 845eb6ccd87..00000000000 --- a/eth/stagedsync/stage_tevm.go +++ /dev/null @@ -1,375 +0,0 @@ -package stagedsync - -import ( - "context" - "errors" - "fmt" - "runtime" - "time" - - "github.com/c2h5oh/datasize" - libcommon "github.com/ledgerwatch/erigon-lib/common" - "github.com/ledgerwatch/erigon-lib/common/cmp" - "github.com/ledgerwatch/erigon-lib/kv" - "github.com/ledgerwatch/erigon/common" - "github.com/ledgerwatch/erigon/common/dbutils" - "github.com/ledgerwatch/erigon/core/state" - "github.com/ledgerwatch/erigon/core/types/accounts" - "github.com/ledgerwatch/erigon/eth/stagedsync/stages" - "github.com/ledgerwatch/erigon/ethdb" - "github.com/ledgerwatch/erigon/ethdb/olddb" - "github.com/ledgerwatch/erigon/params" - "github.com/ledgerwatch/log/v3" -) - -type TranspileCfg struct { - db kv.RwDB - batchSize datasize.ByteSize - chainConfig *params.ChainConfig -} - -func StageTranspileCfg( - kv kv.RwDB, - batchSize datasize.ByteSize, - chainConfig *params.ChainConfig, -) TranspileCfg { - return TranspileCfg{ - db: kv, - batchSize: batchSize, - chainConfig: chainConfig, - } -} - -func SpawnTranspileStage(s *StageState, tx kv.RwTx, toBlock uint64, cfg TranspileCfg, ctx context.Context) error { - var prevStageProgress uint64 - var errStart error - - if tx == nil { - errStart = cfg.db.View(ctx, func(tx kv.Tx) error { - prevStageProgress, errStart = stages.GetStageProgress(tx, stages.Execution) - return errStart - }) - } else { - prevStageProgress, errStart = stages.GetStageProgress(tx, stages.Execution) - } - - if errStart != nil { - return errStart - } - - var to = prevStageProgress - if toBlock > 0 { - to = cmp.Min(prevStageProgress, toBlock) - } - - if to <= s.BlockNumber { - return nil - } - - stageProgress := uint64(0) - logPrefix := s.LogPrefix() - if to > s.BlockNumber+16 { - log.Info(fmt.Sprintf("[%s] Contract translation", logPrefix), "from", s.BlockNumber, "to", to) - } - - empty := common.Address{} - - observedAddresses := map[common.Address]struct{}{ - empty: {}, - } - observedCodeHashes := map[common.Hash]struct{}{} - - var err error - for stageProgress <= toBlock { - stageProgress, err = transpileBatch(logPrefix, stageProgress, to, cfg, tx, observedAddresses, observedCodeHashes, ctx.Done()) - if err != nil { - return err - } - } - - if to > s.BlockNumber+16 { - log.Info(fmt.Sprintf("[%s] Completed on", logPrefix), "block", toBlock) - } - - return nil -} - -func transpileBatch(logPrefix string, stageProgress, toBlock uint64, cfg TranspileCfg, tx kv.RwTx, observedAddresses map[common.Address]struct{}, observedCodeHashes map[common.Hash]struct{}, quitCh <-chan struct{}) (uint64, error) { - useExternalTx := tx != nil - var err error - if !useExternalTx { - tx, err = cfg.db.BeginRw(context.Background()) - if err != nil { - return 0, err - } - defer tx.Rollback() - } - - batch := olddb.NewBatch(tx, quitCh) - defer batch.Rollback() - - // read contracts pending for translation - c, err := tx.CursorDupSort(kv.CallTraceSet) - if err != nil { - return 0, err - } - defer c.Close() - - logTime := time.Now() - logEvery := time.NewTicker(logInterval) - defer logEvery.Stop() - - stateReader := state.NewPlainStateReader(batch) - - var ( - codeHash common.Hash - codeHashBytes []byte - addr common.Address - addrBytes []byte - acc *accounts.Account - evmContract []byte - transpiledCode []byte - ok bool - ) - - prevContract := stageProgress - blockKey := dbutils.EncodeBlockNumber(stageProgress) - - var addressStatus []byte - for blockKey, addressStatus, err = c.SeekExact(blockKey); blockKey != nil; blockKey, addressStatus, err = c.Next() { - if err != nil { - return 0, fmt.Errorf("can't read pending code translations: %w", err) - } - - select { - case <-quitCh: - return 0, libcommon.ErrStopped - case <-logEvery.C: - prevContract, logTime = logTEVMProgress(logPrefix, prevContract, logTime, stageProgress) - tx.CollectMetrics() - default: - } - - stageProgress, err = dbutils.DecodeBlockNumber(blockKey) - if err != nil { - return 0, fmt.Errorf("can't read pending code translations. incorrect block key: %w", err) - } - - if stageProgress > toBlock { - break - } - - if addressStatus[len(addressStatus)-1]&4 == 0 { - continue - } - - addrBytes = addressStatus[:len(addressStatus)-1] - addr = common.BytesToAddress(addrBytes) - - _, ok = observedAddresses[addr] - if ok { - continue - } - observedAddresses[addr] = struct{}{} - - acc, err = stateReader.ReadAccountData(addr) - if err != nil { - if errors.Is(err, ethdb.ErrKeyNotFound) { - continue - } - return 0, fmt.Errorf("can't read account by address %q: %w", addr, err) - } - if acc == nil { - continue - } - - codeHash = acc.CodeHash - if ok = accounts.IsEmptyCodeHash(codeHash); ok { - continue - } - codeHashBytes = codeHash.Bytes() - - _, ok = observedCodeHashes[codeHash] - if ok { - continue - } - observedCodeHashes[codeHash] = struct{}{} - - // check if we already have TEVM code - ok, err = batch.Has(kv.ContractTEVMCode, codeHashBytes) - if err != nil && !errors.Is(err, ethdb.ErrKeyNotFound) { - return 0, fmt.Errorf("can't read code TEVM bucket by contract hash %q: %w", codeHash, err) - } - if ok && err == nil { - // already has TEVM code - continue - } - - // load the contract code - evmContract, err = batch.GetOne(kv.Code, codeHashBytes) - if err != nil { - if errors.Is(err, ethdb.ErrKeyNotFound) { - continue - } - return 0, fmt.Errorf("can't read pending code translations. incorrect code hash in the bucket: %w", err) - } - if len(evmContract) == 0 { - continue - } - - // call a transpiler - transpiledCode, err = transpileCode(evmContract) - if err != nil { - if errors.Is(err, ethdb.ErrKeyNotFound) { - log.Warn("cannot find EVM contract", "address", addr, "hash", codeHash) - continue - } - return 0, fmt.Errorf("contract %q cannot be translated: %w", codeHash, err) - } - - // store TEVM contract code - err = batch.Put(kv.ContractTEVMCode, codeHashBytes, transpiledCode) - if err != nil { - return 0, fmt.Errorf("cannot store TEVM code %q: %w", codeHash, err) - } - - if batch.BatchSize() >= int(cfg.batchSize) { - break // limit RAM usage. Break to commit batch - } - } - - if err = batch.Commit(); err != nil { - return 0, fmt.Errorf("cannot commit the batch of translations on %q: %w", codeHash, err) - } - - if !useExternalTx { - if err = tx.Commit(); err != nil { - return 0, fmt.Errorf("cannot commit the external transation on %q: %w", codeHash, err) - } - } - - return stageProgress, nil -} - -func logTEVMProgress(logPrefix string, prevContract uint64, prevTime time.Time, currentContract uint64) (uint64, time.Time) { - currentTime := time.Now() - interval := currentTime.Sub(prevTime) - speed := float64(currentContract-prevContract) / float64(interval/time.Second) - var m runtime.MemStats - libcommon.ReadMemStats(&m) - var logpairs = []interface{}{ - "number", currentContract, - "contracts/s", speed, - } - logpairs = append(logpairs, "alloc", libcommon.ByteCount(m.Alloc), "sys", libcommon.ByteCount(m.Sys)) - log.Info(fmt.Sprintf("[%s] Translated contracts", logPrefix), logpairs...) - - return currentContract, currentTime -} - -func UnwindTranspileStage(u *UnwindState, s *StageState, tx kv.RwTx, cfg TranspileCfg, ctx context.Context) (err error) { - useExternalTx := tx != nil - if !useExternalTx { - tx, err = cfg.db.BeginRw(ctx) - if err != nil { - return err - } - defer tx.Rollback() - } - - keyStart := dbutils.EncodeBlockNumber(u.UnwindPoint + 1) - c, err := tx.CursorDupSort(kv.CallTraceSet) - if err != nil { - return err - } - defer c.Close() - - var ( - codeHash common.Hash - codeHashBytes []byte - addr common.Address - addrBytes []byte - acc *accounts.Account - ok bool - ) - - stateReader := state.NewPlainStateReader(tx) - - for k, addrStatus, err := c.Seek(keyStart); k != nil; k, addrStatus, err = c.Next() { - if err != nil { - return err - } - - if addrStatus[len(addrStatus)-1]&4 == 0 { - continue - } - - addrBytes = addrStatus[:len(addrStatus)-1] - addr = common.BytesToAddress(addrBytes) - - acc, err = stateReader.ReadAccountData(addr) - if err != nil { - if errors.Is(err, ethdb.ErrKeyNotFound) { - continue - } - return fmt.Errorf("can't read account by address %q: %w", addr, err) - } - if acc == nil { - continue - } - - codeHash = acc.CodeHash - if ok = accounts.IsEmptyCodeHash(codeHash); ok { - continue - } - codeHashBytes = codeHash.Bytes() - - // check if we already have TEVM code - ok, err = tx.Has(kv.ContractTEVMCode, codeHashBytes) - if err != nil && !errors.Is(err, ethdb.ErrKeyNotFound) { - return fmt.Errorf("can't read code TEVM bucket by contract hash %q: %w", codeHash, err) - } - if err != nil || !ok { - // doesn't have TEVM code - continue - } - - err = tx.Delete(kv.ContractTEVMCode, codeHashBytes) - if err != nil { - return fmt.Errorf("can't delete TEVM code by hash %q: %w", codeHash, err) - } - } - - if err = u.Done(tx); err != nil { - return err - } - if !useExternalTx { - if err = tx.Commit(); err != nil { - return err - } - } - return nil -} - -// todo: TBD actual TEVM translator -func transpileCode(code []byte) ([]byte, error) { - return append(make([]byte, 0, len(code)), code...), nil -} - -func PruneTranspileStage(p *PruneState, tx kv.RwTx, cfg TranspileCfg, initialCycle bool, ctx context.Context) (err error) { - useExternalTx := tx != nil - if !useExternalTx { - tx, err = cfg.db.BeginRw(ctx) - if err != nil { - return err - } - defer tx.Rollback() - } - - if !useExternalTx { - if err = tx.Commit(); err != nil { - return err - } - } - return nil -} diff --git a/eth/stagedsync/stage_txlookup.go b/eth/stagedsync/stage_txlookup.go index 73ed1a1795b..dd0391a42cf 100644 --- a/eth/stagedsync/stage_txlookup.go +++ b/eth/stagedsync/stage_txlookup.go @@ -12,7 +12,7 @@ import ( "github.com/ledgerwatch/erigon/common" "github.com/ledgerwatch/erigon/common/dbutils" "github.com/ledgerwatch/erigon/core/rawdb" - "github.com/ledgerwatch/erigon/crypto" + "github.com/ledgerwatch/erigon/core/types" "github.com/ledgerwatch/erigon/ethdb/prune" "github.com/ledgerwatch/erigon/turbo/snapshotsync" "github.com/ledgerwatch/log/v3" @@ -24,6 +24,7 @@ type TxLookupCfg struct { tmpdir string snapshots *snapshotsync.RoSnapshots isBor bool + borSprint uint64 } func StageTxLookupCfg( @@ -32,6 +33,7 @@ func StageTxLookupCfg( tmpdir string, snapshots *snapshotsync.RoSnapshots, isBor bool, + borSprint uint64, ) TxLookupCfg { return TxLookupCfg{ db: db, @@ -39,6 +41,7 @@ func StageTxLookupCfg( tmpdir: tmpdir, snapshots: snapshots, isBor: isBor, + borSprint: borSprint, } } @@ -86,8 +89,15 @@ func SpawnTxLookup(s *StageState, tx kv.RwTx, toBlock uint64, cfg TxLookupCfg, c } // etl.Transform uses ExtractEndKey as exclusive bound, therefore endBlock + 1 if err = txnLookupTransform(logPrefix, tx, startBlock, endBlock+1, quitCh, cfg); err != nil { - return err + return fmt.Errorf("txnLookupTransform: %w", err) + } + + if cfg.isBor { + if err = borTxnLookupTransform(logPrefix, tx, startBlock, endBlock+1, quitCh, cfg); err != nil { + return fmt.Errorf("borTxnLookupTransform: %w", err) + } } + if err = s.Update(tx, endBlock); err != nil { return err } @@ -117,9 +127,28 @@ func txnLookupTransform(logPrefix string, tx kv.RwTx, blockFrom, blockTo uint64, } } - if cfg.isBor { - borPrefix := []byte("matic-bor-receipt-") - if err := next(k, crypto.Keccak256(append(append(borPrefix, k...), v...)), blockNumBytes); err != nil { + return nil + }, etl.IdentityLoadFunc, etl.TransformArgs{ + Quit: quitCh, + ExtractStartKey: dbutils.EncodeBlockNumber(blockFrom), + ExtractEndKey: dbutils.EncodeBlockNumber(blockTo), + LogDetailsExtract: func(k, v []byte) (additionalLogArguments []interface{}) { + return []interface{}{"block", binary.BigEndian.Uint64(k)} + }, + }) +} + +// txnLookupTransform - [startKey, endKey) +func borTxnLookupTransform(logPrefix string, tx kv.RwTx, blockFrom, blockTo uint64, quitCh <-chan struct{}, cfg TxLookupCfg) error { + bigNum := new(big.Int) + return etl.Transform(logPrefix, tx, kv.HeaderCanonical, kv.BorTxLookup, cfg.tmpdir, func(k, v []byte, next etl.ExtractNextFunc) error { + blocknum, blockHash := binary.BigEndian.Uint64(k), common.CastToHash(v) + blockNumBytes := bigNum.SetUint64(blocknum).Bytes() + + // we add state sync transactions every bor Sprint amount of blocks + if blocknum%cfg.borSprint == 0 && rawdb.HasBorReceipts(tx, blocknum) { + txnHash := types.ComputeBorTxHash(blocknum, blockHash) + if err := next(k, txnHash.Bytes(), blockNumBytes); err != nil { return err } } @@ -157,7 +186,12 @@ func UnwindTxLookup(u *UnwindState, s *StageState, tx kv.RwTx, cfg TxLookupCfg, } // etl.Transform uses ExtractEndKey as exclusive bound, therefore blockTo + 1 if err := deleteTxLookupRange(tx, s.LogPrefix(), blockFrom, blockTo+1, ctx, cfg); err != nil { - return fmt.Errorf("unwind: %w", err) + return fmt.Errorf("unwind TxLookUp: %w", err) + } + if cfg.isBor { + if err := deleteBorTxLookupRange(tx, s.LogPrefix(), blockFrom, blockTo+1, ctx, cfg); err != nil { + return fmt.Errorf("unwind BorTxLookUp: %w", err) + } } if err := u.Done(tx); err != nil { return err @@ -190,8 +224,15 @@ func PruneTxLookup(s *PruneState, tx kv.RwTx, cfg TxLookupCfg, ctx context.Conte } if blockFrom < blockTo { if err = deleteTxLookupRange(tx, logPrefix, blockFrom, blockTo, ctx, cfg); err != nil { - return fmt.Errorf("prune: %w", err) + return fmt.Errorf("prune TxLookUp: %w", err) } + + if cfg.isBor { + if err = deleteBorTxLookupRange(tx, logPrefix, blockFrom, blockTo, ctx, cfg); err != nil { + return fmt.Errorf("prune BorTxLookUp: %w", err) + } + } + if err = s.DoneAt(tx, blockTo); err != nil { return err } @@ -223,11 +264,26 @@ func deleteTxLookupRange(tx kv.RwTx, logPrefix string, blockFrom, blockTo uint64 return err } } - if cfg.isBor { - borPrefix := []byte("matic-bor-receipt-") - if err := next(k, crypto.Keccak256(append(append(borPrefix, k...), v...)), nil); err != nil { - return err - } + + return nil + }, etl.IdentityLoadFunc, etl.TransformArgs{ + Quit: ctx.Done(), + ExtractStartKey: dbutils.EncodeBlockNumber(blockFrom), + ExtractEndKey: dbutils.EncodeBlockNumber(blockTo), + LogDetailsExtract: func(k, v []byte) (additionalLogArguments []interface{}) { + return []interface{}{"block", binary.BigEndian.Uint64(k)} + }, + }) +} + +// deleteTxLookupRange - [blockFrom, blockTo) +func deleteBorTxLookupRange(tx kv.RwTx, logPrefix string, blockFrom, blockTo uint64, ctx context.Context, cfg TxLookupCfg) error { + return etl.Transform(logPrefix, tx, kv.HeaderCanonical, kv.BorTxLookup, cfg.tmpdir, func(k, v []byte, next etl.ExtractNextFunc) error { + blocknum, blockHash := binary.BigEndian.Uint64(k), common.CastToHash(v) + + borTxHash := types.ComputeBorTxHash(blocknum, blockHash) + if err := next(k, borTxHash.Bytes(), nil); err != nil { + return err } return nil diff --git a/eth/stagedsync/stage_verkle_trie.go b/eth/stagedsync/stage_verkle_trie.go new file mode 100644 index 00000000000..b8a578ec7d9 --- /dev/null +++ b/eth/stagedsync/stage_verkle_trie.go @@ -0,0 +1,108 @@ +package stagedsync + +import ( + "context" + "fmt" + + "github.com/ledgerwatch/erigon-lib/kv" + "github.com/ledgerwatch/erigon/cmd/verkle/verkletrie" + "github.com/ledgerwatch/erigon/common" + "github.com/ledgerwatch/erigon/core/rawdb" + "github.com/ledgerwatch/erigon/eth/stagedsync/stages" +) + +func SpawnVerkleTrie(s *StageState, u Unwinder, tx kv.RwTx, cfg TrieCfg, ctx context.Context) (common.Hash, error) { + var err error + useExternalTx := tx != nil + if !useExternalTx { + tx, err = cfg.db.BeginRw(ctx) + if err != nil { + return common.Hash{}, err + } + defer tx.Rollback() + } + from := uint64(0) + if s.BlockNumber > 0 { + from = s.BlockNumber + 1 + } + to, err := s.ExecutionAt(tx) + if err != nil { + return common.Hash{}, err + } + verkleWriter := verkletrie.NewVerkleTreeWriter(tx, cfg.tmpDir) + if err := verkletrie.IncrementAccount(tx, tx, 10, verkleWriter, from, to); err != nil { + return common.Hash{}, err + } + var newRoot common.Hash + if newRoot, err = verkletrie.IncrementStorage(tx, tx, 10, verkleWriter, from, to); err != nil { + return common.Hash{}, err + } + if cfg.checkRoot { + header := rawdb.ReadHeaderByNumber(tx, to) + if header.Root != newRoot { + return common.Hash{}, fmt.Errorf("invalid verkle root, header has %x, computed: %x", header.Root, newRoot) + } + } + if err := s.Update(tx, to); err != nil { + return common.Hash{}, err + } + if err := stages.SaveStageProgress(tx, stages.VerkleTrie, to); err != nil { + return common.Hash{}, err + } + if !useExternalTx { + return newRoot, tx.Commit() + } + return newRoot, nil +} + +func UnwindVerkleTrie(u *UnwindState, s *StageState, tx kv.RwTx, cfg TrieCfg, ctx context.Context) (err error) { + useExternalTx := tx != nil + if !useExternalTx { + tx, err = cfg.db.BeginRw(ctx) + if err != nil { + return err + } + defer tx.Rollback() + } + from := u.UnwindPoint + 1 + to, err := s.ExecutionAt(tx) + if err != nil { + return err + } + verkleWriter := verkletrie.NewVerkleTreeWriter(tx, cfg.tmpDir) + if err := verkletrie.IncrementAccount(tx, tx, 10, verkleWriter, from, to); err != nil { + return err + } + if _, err = verkletrie.IncrementStorage(tx, tx, 10, verkleWriter, from, to); err != nil { + return err + } + if err := s.Update(tx, from); err != nil { + return err + } + if err := stages.SaveStageProgress(tx, stages.VerkleTrie, from); err != nil { + return err + } + if !useExternalTx { + return tx.Commit() + } + return nil +} + +func PruneVerkleTries(s *PruneState, tx kv.RwTx, cfg TrieCfg, ctx context.Context) (err error) { + useExternalTx := tx != nil + if !useExternalTx { + tx, err = cfg.db.BeginRw(ctx) + if err != nil { + return err + } + defer tx.Rollback() + } + s.Done(tx) + + if !useExternalTx { + if err = tx.Commit(); err != nil { + return err + } + } + return nil +} diff --git a/eth/stagedsync/stagebuilder.go b/eth/stagedsync/stagebuilder.go index 408f1af8042..e0800d8475d 100644 --- a/eth/stagedsync/stagebuilder.go +++ b/eth/stagedsync/stagebuilder.go @@ -7,8 +7,6 @@ import ( "github.com/ledgerwatch/erigon-lib/kv" "github.com/ledgerwatch/erigon/core/types" "github.com/ledgerwatch/erigon/eth/stagedsync/stages" - "github.com/ledgerwatch/erigon/ethdb/privateapi" - "github.com/ledgerwatch/erigon/turbo/shards" ) type ChainEventNotifier interface { @@ -18,12 +16,6 @@ type ChainEventNotifier interface { HasLogSubsriptions() bool } -type Notifications struct { - Events *privateapi.Events - Accumulator *shards.Accumulator - StateChangesConsumer shards.StateChangeConsumer -} - func MiningStages( ctx context.Context, createBlockCfg MiningCreateBlockCfg, @@ -36,7 +28,7 @@ func MiningStages( { ID: stages.MiningCreateBlock, Description: "Mining: construct new block from tx pool", - Forward: func(firstCycle bool, badBlockUnwind bool, s *StageState, u Unwinder, tx kv.RwTx) error { + Forward: func(firstCycle bool, badBlockUnwind bool, s *StageState, u Unwinder, tx kv.RwTx, quiet bool) error { return SpawnMiningCreateBlockStage(s, tx, createBlockCfg, ctx.Done()) }, Unwind: func(firstCycle bool, u *UnwindState, s *StageState, tx kv.RwTx) error { return nil }, @@ -45,7 +37,7 @@ func MiningStages( { ID: stages.MiningExecution, Description: "Mining: construct new block from tx pool", - Forward: func(firstCycle bool, badBlockUnwind bool, s *StageState, u Unwinder, tx kv.RwTx) error { + Forward: func(firstCycle bool, badBlockUnwind bool, s *StageState, u Unwinder, tx kv.RwTx, quiet bool) error { return SpawnMiningExecStage(s, tx, execCfg, ctx.Done()) }, Unwind: func(firstCycle bool, u *UnwindState, s *StageState, tx kv.RwTx) error { return nil }, @@ -54,8 +46,8 @@ func MiningStages( { ID: stages.HashState, Description: "Hash the key in the state", - Forward: func(firstCycle bool, badBlockUnwind bool, s *StageState, u Unwinder, tx kv.RwTx) error { - return SpawnHashStateStage(s, tx, hashStateCfg, ctx) + Forward: func(firstCycle bool, badBlockUnwind bool, s *StageState, u Unwinder, tx kv.RwTx, quiet bool) error { + return SpawnHashStateStage(s, tx, hashStateCfg, ctx, quiet) }, Unwind: func(firstCycle bool, u *UnwindState, s *StageState, tx kv.RwTx) error { return nil }, Prune: func(firstCycle bool, u *PruneState, tx kv.RwTx) error { return nil }, @@ -63,8 +55,8 @@ func MiningStages( { ID: stages.IntermediateHashes, Description: "Generate intermediate hashes and computing state root", - Forward: func(firstCycle bool, badBlockUnwind bool, s *StageState, u Unwinder, tx kv.RwTx) error { - stateRoot, err := SpawnIntermediateHashesStage(s, u, tx, trieCfg, ctx) + Forward: func(firstCycle bool, badBlockUnwind bool, s *StageState, u Unwinder, tx kv.RwTx, quiet bool) error { + stateRoot, err := SpawnIntermediateHashesStage(s, u, tx, trieCfg, ctx, quiet) if err != nil { return err } @@ -77,7 +69,7 @@ func MiningStages( { ID: stages.MiningFinish, Description: "Mining: create and propagate valid block", - Forward: func(firstCycle bool, badBlockUnwind bool, s *StageState, u Unwinder, tx kv.RwTx) error { + Forward: func(firstCycle bool, badBlockUnwind bool, s *StageState, u Unwinder, tx kv.RwTx, quiet bool) error { return SpawnMiningFinishStage(s, tx, finish, ctx.Done()) }, Unwind: func(firstCycle bool, u *UnwindState, s *StageState, tx kv.RwTx) error { return nil }, diff --git a/eth/stagedsync/stages/stages.go b/eth/stagedsync/stages/stages.go index 750d18c27e3..1e8726e9d9f 100644 --- a/eth/stagedsync/stages/stages.go +++ b/eth/stagedsync/stages/stages.go @@ -29,13 +29,15 @@ import ( type SyncStage string var ( - Headers SyncStage = "Headers" // Headers are downloaded, their Proof-Of-Work validity and chaining is verified - CumulativeIndex SyncStage = "CumulativeIndex" // Calculate how much gas has been used up to each block. - BlockHashes SyncStage = "BlockHashes" // Headers Number are written, fills blockHash => number bucket - Bodies SyncStage = "Bodies" // Block bodies are downloaded, TxHash and UncleHash are getting verified - Senders SyncStage = "Senders" // "From" recovered from signatures, bodies re-written - Execution SyncStage = "Execution" // Executing each block w/o buildinf a trie - Translation SyncStage = "Translation" // Translation each marked for translation contract (from EVM to TEVM) + Snapshots SyncStage = "Snapshots" // Snapshots + Headers SyncStage = "Headers" // Headers are downloaded, their Proof-Of-Work validity and chaining is verified + CumulativeIndex SyncStage = "CumulativeIndex" // Calculate how much gas has been used up to each block. + BlockHashes SyncStage = "BlockHashes" // Headers Number are written, fills blockHash => number bucket + Bodies SyncStage = "Bodies" // Block bodies are downloaded, TxHash and UncleHash are getting verified + Senders SyncStage = "Senders" // "From" recovered from signatures, bodies re-written + Execution SyncStage = "Execution" // Executing each block w/o buildinf a trie + Translation SyncStage = "Translation" // Translation each marked for translation contract (from EVM to TEVM) + VerkleTrie SyncStage = "VerkleTrie" IntermediateHashes SyncStage = "IntermediateHashes" // Generate intermediate hashes, calculate the state root hash HashState SyncStage = "HashState" // Apply Keccak256 to all the keys in the state AccountHistoryIndex SyncStage = "AccountHistoryIndex" // Generating history index for accounts @@ -52,6 +54,7 @@ var ( ) var AllStages = []SyncStage{ + Snapshots, Headers, BlockHashes, Bodies, diff --git a/eth/stagedsync/sync.go b/eth/stagedsync/sync.go index 2fd46f1c1fb..c284929810f 100644 --- a/eth/stagedsync/sync.go +++ b/eth/stagedsync/sync.go @@ -209,7 +209,7 @@ func (s *Sync) RunUnwind(db kv.RwDB, tx kv.RwTx) error { } return nil } -func (s *Sync) Run(db kv.RwDB, tx kv.RwTx, firstCycle bool) error { +func (s *Sync) Run(db kv.RwDB, tx kv.RwTx, firstCycle bool, quiet bool) error { s.prevUnwindPoint = nil s.timings = s.timings[:0] @@ -252,10 +252,15 @@ func (s *Sync) Run(db kv.RwDB, tx kv.RwTx, firstCycle bool) error { continue } - if err := s.runStage(stage, db, tx, firstCycle, badBlockUnwind); err != nil { + if err := s.runStage(stage, db, tx, firstCycle, badBlockUnwind, quiet); err != nil { return err } + if string(stage.ID) == debug.StopAfterStage() { // stop process for debugging reasons + log.Warn("STOP_AFTER_STAGE env flag forced to stop app") + return libcommon.ErrStopped + } + s.NextStage() } @@ -271,87 +276,84 @@ func (s *Sync) Run(db kv.RwDB, tx kv.RwTx, firstCycle bool) error { return err } - if err := printLogs(db, tx, s.timings); err != nil { - return err - } s.currentStage = 0 return nil } -func printLogs(db kv.RoDB, tx kv.RwTx, timings []Timing) error { +func (s *Sync) PrintTimings() []interface{} { var logCtx []interface{} count := 0 - for i := range timings { - if timings[i].took < 50*time.Millisecond { + for i := range s.timings { + if s.timings[i].took < 50*time.Millisecond { continue } count++ if count == 50 { break } - if timings[i].isUnwind { - logCtx = append(logCtx, "Unwind "+string(timings[i].stage), timings[i].took.Truncate(time.Millisecond).String()) - } else if timings[i].isPrune { - logCtx = append(logCtx, "Prune "+string(timings[i].stage), timings[i].took.Truncate(time.Millisecond).String()) + if s.timings[i].isUnwind { + logCtx = append(logCtx, "Unwind "+string(s.timings[i].stage), s.timings[i].took.Truncate(time.Millisecond).String()) + } else if s.timings[i].isPrune { + logCtx = append(logCtx, "Prune "+string(s.timings[i].stage), s.timings[i].took.Truncate(time.Millisecond).String()) } else { - logCtx = append(logCtx, string(timings[i].stage), timings[i].took.Truncate(time.Millisecond).String()) + logCtx = append(logCtx, string(s.timings[i].stage), s.timings[i].took.Truncate(time.Millisecond).String()) } } - if len(logCtx) > 0 { - log.Info("Timings (slower than 50ms)", logCtx...) - } + return logCtx +} +func PrintTables(db kv.RoDB, tx kv.RwTx) []interface{} { if tx == nil { return nil } - - if len(logCtx) > 0 { // also don't print this logs if everything is fast - buckets := []string{ - kv.PlainState, - kv.AccountChangeSet, - kv.StorageChangeSet, - kv.EthTx, - kv.Log, - } - bucketSizes := make([]interface{}, 0, 2*(len(buckets)+2)) - for _, bucket := range buckets { - sz, err1 := tx.BucketSize(bucket) - if err1 != nil { - return err1 - } - bucketSizes = append(bucketSizes, bucket, libcommon.ByteCount(sz)) - } - - sz, err1 := tx.BucketSize("freelist") + buckets := []string{ + kv.PlainState, + kv.AccountChangeSet, + kv.StorageChangeSet, + kv.EthTx, + kv.Log, + } + bucketSizes := make([]interface{}, 0, 2*(len(buckets)+2)) + for _, bucket := range buckets { + sz, err1 := tx.BucketSize(bucket) if err1 != nil { - return err1 - } - bucketSizes = append(bucketSizes, "FreeList", libcommon.ByteCount(sz)) - amountOfFreePagesInDb := sz / 4 // page_id encoded as bigEndian_u32 - if db != nil { - bucketSizes = append(bucketSizes, "ReclaimableSpace", libcommon.ByteCount(amountOfFreePagesInDb*db.PageSize())) + return bucketSizes } - log.Info("Tables", bucketSizes...) + bucketSizes = append(bucketSizes, bucket, libcommon.ByteCount(sz)) + } + + sz, err1 := tx.BucketSize("freelist") + if err1 != nil { + return bucketSizes + } + bucketSizes = append(bucketSizes, "FreeList", libcommon.ByteCount(sz)) + amountOfFreePagesInDb := sz / 4 // page_id encoded as bigEndian_u32 + if db != nil { + bucketSizes = append(bucketSizes, "ReclaimableSpace", libcommon.ByteCount(amountOfFreePagesInDb*db.PageSize())) } tx.CollectMetrics() - return nil + return bucketSizes } -func (s *Sync) runStage(stage *Stage, db kv.RwDB, tx kv.RwTx, firstCycle bool, badBlockUnwind bool) (err error) { +func (s *Sync) runStage(stage *Stage, db kv.RwDB, tx kv.RwTx, firstCycle bool, badBlockUnwind bool, quiet bool) (err error) { start := time.Now() stageState, err := s.StageState(stage.ID, tx, db) if err != nil { return err } - if err = stage.Forward(firstCycle, badBlockUnwind, stageState, s, tx); err != nil { - return fmt.Errorf("[%s] %w", s.LogPrefix(), err) + if err = stage.Forward(firstCycle, badBlockUnwind, stageState, s, tx, quiet); err != nil { + wrappedError := fmt.Errorf("[%s] %w", s.LogPrefix(), err) + log.Debug("Error while executing stage", "err", wrappedError) + return wrappedError } took := time.Since(start) + logPrefix := s.LogPrefix() if took > 60*time.Second { - logPrefix := s.LogPrefix() log.Info(fmt.Sprintf("[%s] DONE", logPrefix), "in", took) + } else { + log.Debug(fmt.Sprintf("[%s] DONE", logPrefix), "in", took) } s.timings = append(s.timings, Timing{stage: stage.ID, took: took}) return nil diff --git a/eth/stagedsync/sync_test.go b/eth/stagedsync/sync_test.go index 2e5358d9fbc..c811c8e640e 100644 --- a/eth/stagedsync/sync_test.go +++ b/eth/stagedsync/sync_test.go @@ -18,7 +18,7 @@ func TestStagesSuccess(t *testing.T) { { ID: stages.Headers, Description: "Downloading headers", - Forward: func(firstCycle bool, badBlockUnwind bool, s *StageState, u Unwinder, tx kv.RwTx) error { + Forward: func(firstCycle bool, badBlockUnwind bool, s *StageState, u Unwinder, tx kv.RwTx, quiet bool) error { flow = append(flow, stages.Headers) return nil }, @@ -26,7 +26,7 @@ func TestStagesSuccess(t *testing.T) { { ID: stages.Bodies, Description: "Downloading block bodiess", - Forward: func(firstCycle bool, badBlockUnwind bool, s *StageState, u Unwinder, tx kv.RwTx) error { + Forward: func(firstCycle bool, badBlockUnwind bool, s *StageState, u Unwinder, tx kv.RwTx, quiet bool) error { flow = append(flow, stages.Bodies) return nil }, @@ -34,7 +34,7 @@ func TestStagesSuccess(t *testing.T) { { ID: stages.Senders, Description: "Recovering senders from tx signatures", - Forward: func(firstCycle bool, badBlockUnwind bool, s *StageState, u Unwinder, tx kv.RwTx) error { + Forward: func(firstCycle bool, badBlockUnwind bool, s *StageState, u Unwinder, tx kv.RwTx, quiet bool) error { flow = append(flow, stages.Senders) return nil }, @@ -42,7 +42,7 @@ func TestStagesSuccess(t *testing.T) { } state := New(s, nil, nil) db, tx := memdb.NewTestTx(t) - err := state.Run(db, tx, true) + err := state.Run(db, tx, true /* initialCycle */, false /* quiet */) assert.NoError(t, err) expectedFlow := []stages.SyncStage{ @@ -57,7 +57,7 @@ func TestDisabledStages(t *testing.T) { { ID: stages.Headers, Description: "Downloading headers", - Forward: func(firstCycle bool, badBlockUnwind bool, s *StageState, u Unwinder, tx kv.RwTx) error { + Forward: func(firstCycle bool, badBlockUnwind bool, s *StageState, u Unwinder, tx kv.RwTx, quiet bool) error { flow = append(flow, stages.Headers) return nil }, @@ -65,7 +65,7 @@ func TestDisabledStages(t *testing.T) { { ID: stages.Bodies, Description: "Downloading block bodiess", - Forward: func(firstCycle bool, badBlockUnwind bool, s *StageState, u Unwinder, tx kv.RwTx) error { + Forward: func(firstCycle bool, badBlockUnwind bool, s *StageState, u Unwinder, tx kv.RwTx, quiet bool) error { flow = append(flow, stages.Bodies) return nil }, @@ -74,7 +74,7 @@ func TestDisabledStages(t *testing.T) { { ID: stages.Senders, Description: "Recovering senders from tx signatures", - Forward: func(firstCycle bool, badBlockUnwind bool, s *StageState, u Unwinder, tx kv.RwTx) error { + Forward: func(firstCycle bool, badBlockUnwind bool, s *StageState, u Unwinder, tx kv.RwTx, quiet bool) error { flow = append(flow, stages.Senders) return nil }, @@ -82,7 +82,7 @@ func TestDisabledStages(t *testing.T) { } state := New(s, nil, nil) db, tx := memdb.NewTestTx(t) - err := state.Run(db, tx, true) + err := state.Run(db, tx, true /* initialCycle */, false /* quiet */) assert.NoError(t, err) expectedFlow := []stages.SyncStage{ @@ -98,7 +98,7 @@ func TestErroredStage(t *testing.T) { { ID: stages.Headers, Description: "Downloading headers", - Forward: func(firstCycle bool, badBlockUnwind bool, s *StageState, u Unwinder, tx kv.RwTx) error { + Forward: func(firstCycle bool, badBlockUnwind bool, s *StageState, u Unwinder, tx kv.RwTx, quiet bool) error { flow = append(flow, stages.Headers) return nil }, @@ -106,7 +106,7 @@ func TestErroredStage(t *testing.T) { { ID: stages.Bodies, Description: "Downloading block bodiess", - Forward: func(firstCycle bool, badBlockUnwind bool, s *StageState, u Unwinder, tx kv.RwTx) error { + Forward: func(firstCycle bool, badBlockUnwind bool, s *StageState, u Unwinder, tx kv.RwTx, quiet bool) error { flow = append(flow, stages.Bodies) return expectedErr }, @@ -114,7 +114,7 @@ func TestErroredStage(t *testing.T) { { ID: stages.Senders, Description: "Recovering senders from tx signatures", - Forward: func(firstCycle bool, badBlockUnwind bool, s *StageState, u Unwinder, tx kv.RwTx) error { + Forward: func(firstCycle bool, badBlockUnwind bool, s *StageState, u Unwinder, tx kv.RwTx, quiet bool) error { flow = append(flow, stages.Senders) return nil }, @@ -122,7 +122,7 @@ func TestErroredStage(t *testing.T) { } state := New(s, []stages.SyncStage{s[2].ID, s[1].ID, s[0].ID}, nil) db, tx := memdb.NewTestTx(t) - err := state.Run(db, tx, true) + err := state.Run(db, tx, true /* initialCycle */, false /* quiet */) assert.Equal(t, fmt.Errorf("[2/3 Bodies] %w", expectedErr), err) expectedFlow := []stages.SyncStage{ @@ -138,7 +138,7 @@ func TestUnwindSomeStagesBehindUnwindPoint(t *testing.T) { { ID: stages.Headers, Description: "Downloading headers", - Forward: func(firstCycle bool, badBlockUnwind bool, s *StageState, u Unwinder, tx kv.RwTx) error { + Forward: func(firstCycle bool, badBlockUnwind bool, s *StageState, u Unwinder, tx kv.RwTx, quiet bool) error { flow = append(flow, stages.Headers) if s.BlockNumber == 0 { return s.Update(tx, 2000) @@ -153,7 +153,7 @@ func TestUnwindSomeStagesBehindUnwindPoint(t *testing.T) { { ID: stages.Bodies, Description: "Downloading block bodiess", - Forward: func(firstCycle bool, badBlockUnwind bool, s *StageState, u Unwinder, tx kv.RwTx) error { + Forward: func(firstCycle bool, badBlockUnwind bool, s *StageState, u Unwinder, tx kv.RwTx, quiet bool) error { flow = append(flow, stages.Bodies) if s.BlockNumber == 0 { return s.Update(tx, 1000) @@ -168,7 +168,7 @@ func TestUnwindSomeStagesBehindUnwindPoint(t *testing.T) { { ID: stages.Senders, Description: "Recovering senders from tx signatures", - Forward: func(firstCycle bool, badBlockUnwind bool, s *StageState, u Unwinder, tx kv.RwTx) error { + Forward: func(firstCycle bool, badBlockUnwind bool, s *StageState, u Unwinder, tx kv.RwTx, quiet bool) error { if s.BlockNumber == 0 { if err := s.Update(tx, 1700); err != nil { return err @@ -190,7 +190,7 @@ func TestUnwindSomeStagesBehindUnwindPoint(t *testing.T) { { ID: stages.IntermediateHashes, Disabled: true, - Forward: func(firstCycle bool, badBlockUnwind bool, s *StageState, u Unwinder, tx kv.RwTx) error { + Forward: func(firstCycle bool, badBlockUnwind bool, s *StageState, u Unwinder, tx kv.RwTx, quiet bool) error { flow = append(flow, stages.IntermediateHashes) if s.BlockNumber == 0 { return s.Update(tx, 2000) @@ -205,7 +205,7 @@ func TestUnwindSomeStagesBehindUnwindPoint(t *testing.T) { } state := New(s, []stages.SyncStage{s[3].ID, s[2].ID, s[1].ID, s[0].ID}, nil) db, tx := memdb.NewTestTx(t) - err := state.Run(db, tx, true) + err := state.Run(db, tx, true /* initialCycle */, false /* quiet */) assert.NoError(t, err) expectedFlow := []stages.SyncStage{ @@ -236,7 +236,7 @@ func TestUnwind(t *testing.T) { { ID: stages.Headers, Description: "Downloading headers", - Forward: func(firstCycle bool, badBlockUnwind bool, s *StageState, u Unwinder, tx kv.RwTx) error { + Forward: func(firstCycle bool, badBlockUnwind bool, s *StageState, u Unwinder, tx kv.RwTx, quiet bool) error { flow = append(flow, stages.Headers) if s.BlockNumber == 0 { return s.Update(tx, 2000) @@ -251,7 +251,7 @@ func TestUnwind(t *testing.T) { { ID: stages.Bodies, Description: "Downloading block bodiess", - Forward: func(firstCycle bool, badBlockUnwind bool, s *StageState, u Unwinder, tx kv.RwTx) error { + Forward: func(firstCycle bool, badBlockUnwind bool, s *StageState, u Unwinder, tx kv.RwTx, quiet bool) error { flow = append(flow, stages.Bodies) if s.BlockNumber == 0 { return s.Update(tx, 2000) @@ -266,7 +266,7 @@ func TestUnwind(t *testing.T) { { ID: stages.Senders, Description: "Recovering senders from tx signatures", - Forward: func(firstCycle bool, badBlockUnwind bool, s *StageState, u Unwinder, tx kv.RwTx) error { + Forward: func(firstCycle bool, badBlockUnwind bool, s *StageState, u Unwinder, tx kv.RwTx, quiet bool) error { flow = append(flow, stages.Senders) if !unwound { unwound = true @@ -283,7 +283,7 @@ func TestUnwind(t *testing.T) { { ID: stages.IntermediateHashes, Disabled: true, - Forward: func(firstCycle bool, badBlockUnwind bool, s *StageState, u Unwinder, tx kv.RwTx) error { + Forward: func(firstCycle bool, badBlockUnwind bool, s *StageState, u Unwinder, tx kv.RwTx, quiet bool) error { flow = append(flow, stages.IntermediateHashes) if s.BlockNumber == 0 { return s.Update(tx, 2000) @@ -298,7 +298,7 @@ func TestUnwind(t *testing.T) { } state := New(s, []stages.SyncStage{s[3].ID, s[2].ID, s[1].ID, s[0].ID}, nil) db, tx := memdb.NewTestTx(t) - err := state.Run(db, tx, true) + err := state.Run(db, tx, true /* initialCycle */, false /* quiet */) assert.NoError(t, err) expectedFlow := []stages.SyncStage{ @@ -325,7 +325,7 @@ func TestUnwind(t *testing.T) { flow = flow[:0] state.unwindOrder = []*Stage{s[3], s[2], s[1], s[0]} state.UnwindTo(100, common.Hash{}) - err = state.Run(db, tx, true) + err = state.Run(db, tx, true /* initialCycle */, false /* quiet */) assert.NoError(t, err) expectedFlow = []stages.SyncStage{ @@ -344,7 +344,7 @@ func TestUnwindEmptyUnwinder(t *testing.T) { { ID: stages.Headers, Description: "Downloading headers", - Forward: func(firstCycle bool, badBlockUnwind bool, s *StageState, u Unwinder, tx kv.RwTx) error { + Forward: func(firstCycle bool, badBlockUnwind bool, s *StageState, u Unwinder, tx kv.RwTx, quiet bool) error { flow = append(flow, stages.Headers) if s.BlockNumber == 0 { return s.Update(tx, 2000) @@ -359,7 +359,7 @@ func TestUnwindEmptyUnwinder(t *testing.T) { { ID: stages.Bodies, Description: "Downloading block bodiess", - Forward: func(firstCycle bool, badBlockUnwind bool, s *StageState, u Unwinder, tx kv.RwTx) error { + Forward: func(firstCycle bool, badBlockUnwind bool, s *StageState, u Unwinder, tx kv.RwTx, quiet bool) error { flow = append(flow, stages.Bodies) if s.BlockNumber == 0 { return s.Update(tx, 2000) @@ -370,7 +370,7 @@ func TestUnwindEmptyUnwinder(t *testing.T) { { ID: stages.Senders, Description: "Recovering senders from tx signatures", - Forward: func(firstCycle bool, badBlockUnwind bool, s *StageState, u Unwinder, tx kv.RwTx) error { + Forward: func(firstCycle bool, badBlockUnwind bool, s *StageState, u Unwinder, tx kv.RwTx, quiet bool) error { flow = append(flow, stages.Senders) if !unwound { unwound = true @@ -387,7 +387,7 @@ func TestUnwindEmptyUnwinder(t *testing.T) { } state := New(s, []stages.SyncStage{s[2].ID, s[1].ID, s[0].ID}, nil) db, tx := memdb.NewTestTx(t) - err := state.Run(db, tx, true) + err := state.Run(db, tx, true /* initialCycle */, false /* quiet */) assert.NoError(t, err) expectedFlow := []stages.SyncStage{ @@ -418,7 +418,7 @@ func TestSyncDoTwice(t *testing.T) { { ID: stages.Headers, Description: "Downloading headers", - Forward: func(firstCycle bool, badBlockUnwind bool, s *StageState, u Unwinder, tx kv.RwTx) error { + Forward: func(firstCycle bool, badBlockUnwind bool, s *StageState, u Unwinder, tx kv.RwTx, quiet bool) error { flow = append(flow, stages.Headers) return s.Update(tx, s.BlockNumber+100) }, @@ -426,7 +426,7 @@ func TestSyncDoTwice(t *testing.T) { { ID: stages.Bodies, Description: "Downloading block bodiess", - Forward: func(firstCycle bool, badBlockUnwind bool, s *StageState, u Unwinder, tx kv.RwTx) error { + Forward: func(firstCycle bool, badBlockUnwind bool, s *StageState, u Unwinder, tx kv.RwTx, quiet bool) error { flow = append(flow, stages.Bodies) return s.Update(tx, s.BlockNumber+200) }, @@ -434,7 +434,7 @@ func TestSyncDoTwice(t *testing.T) { { ID: stages.Senders, Description: "Recovering senders from tx signatures", - Forward: func(firstCycle bool, badBlockUnwind bool, s *StageState, u Unwinder, tx kv.RwTx) error { + Forward: func(firstCycle bool, badBlockUnwind bool, s *StageState, u Unwinder, tx kv.RwTx, quiet bool) error { flow = append(flow, stages.Senders) return s.Update(tx, s.BlockNumber+300) }, @@ -443,11 +443,11 @@ func TestSyncDoTwice(t *testing.T) { state := New(s, nil, nil) db, tx := memdb.NewTestTx(t) - err := state.Run(db, tx, true) + err := state.Run(db, tx, true /* initialCycle */, false /* quiet */) assert.NoError(t, err) state = New(s, nil, nil) - err = state.Run(db, tx, true) + err = state.Run(db, tx, true /* initialCycle */, false /* quiet */) assert.NoError(t, err) expectedFlow := []stages.SyncStage{ @@ -476,7 +476,7 @@ func TestStateSyncInterruptRestart(t *testing.T) { { ID: stages.Headers, Description: "Downloading headers", - Forward: func(firstCycle bool, badBlockUnwind bool, s *StageState, u Unwinder, tx kv.RwTx) error { + Forward: func(firstCycle bool, badBlockUnwind bool, s *StageState, u Unwinder, tx kv.RwTx, quiet bool) error { flow = append(flow, stages.Headers) return nil }, @@ -484,7 +484,7 @@ func TestStateSyncInterruptRestart(t *testing.T) { { ID: stages.Bodies, Description: "Downloading block bodiess", - Forward: func(firstCycle bool, badBlockUnwind bool, s *StageState, u Unwinder, tx kv.RwTx) error { + Forward: func(firstCycle bool, badBlockUnwind bool, s *StageState, u Unwinder, tx kv.RwTx, quiet bool) error { flow = append(flow, stages.Bodies) return expectedErr }, @@ -492,7 +492,7 @@ func TestStateSyncInterruptRestart(t *testing.T) { { ID: stages.Senders, Description: "Recovering senders from tx signatures", - Forward: func(firstCycle bool, badBlockUnwind bool, s *StageState, u Unwinder, tx kv.RwTx) error { + Forward: func(firstCycle bool, badBlockUnwind bool, s *StageState, u Unwinder, tx kv.RwTx, quiet bool) error { flow = append(flow, stages.Senders) return nil }, @@ -501,13 +501,13 @@ func TestStateSyncInterruptRestart(t *testing.T) { state := New(s, nil, nil) db, tx := memdb.NewTestTx(t) - err := state.Run(db, tx, true) + err := state.Run(db, tx, true /* initialCycle */, false /* quiet */) assert.Equal(t, fmt.Errorf("[2/3 Bodies] %w", expectedErr), err) expectedErr = nil state = New(s, nil, nil) - err = state.Run(db, tx, true) + err = state.Run(db, tx, true /* initialCycle */, false /* quiet */) assert.NoError(t, err) expectedFlow := []stages.SyncStage{ @@ -528,7 +528,7 @@ func TestSyncInterruptLongUnwind(t *testing.T) { { ID: stages.Headers, Description: "Downloading headers", - Forward: func(firstCycle bool, badBlockUnwind bool, s *StageState, u Unwinder, tx kv.RwTx) error { + Forward: func(firstCycle bool, badBlockUnwind bool, s *StageState, u Unwinder, tx kv.RwTx, quiet bool) error { flow = append(flow, stages.Headers) if s.BlockNumber == 0 { return s.Update(tx, 2000) @@ -543,7 +543,7 @@ func TestSyncInterruptLongUnwind(t *testing.T) { { ID: stages.Bodies, Description: "Downloading block bodiess", - Forward: func(firstCycle bool, badBlockUnwind bool, s *StageState, u Unwinder, tx kv.RwTx) error { + Forward: func(firstCycle bool, badBlockUnwind bool, s *StageState, u Unwinder, tx kv.RwTx, quiet bool) error { flow = append(flow, stages.Bodies) if s.BlockNumber == 0 { return s.Update(tx, 2000) @@ -558,7 +558,7 @@ func TestSyncInterruptLongUnwind(t *testing.T) { { ID: stages.Senders, Description: "Recovering senders from tx signatures", - Forward: func(firstCycle bool, badBlockUnwind bool, s *StageState, u Unwinder, tx kv.RwTx) error { + Forward: func(firstCycle bool, badBlockUnwind bool, s *StageState, u Unwinder, tx kv.RwTx, quiet bool) error { flow = append(flow, stages.Senders) if !unwound { unwound = true @@ -580,7 +580,7 @@ func TestSyncInterruptLongUnwind(t *testing.T) { } state := New(s, []stages.SyncStage{s[2].ID, s[1].ID, s[0].ID}, nil) db, tx := memdb.NewTestTx(t) - err := state.Run(db, tx, true) + err := state.Run(db, tx, true /* initialCycle */, false /* quiet */) assert.Error(t, errInterrupted, err) //state = NewState(s) @@ -588,7 +588,7 @@ func TestSyncInterruptLongUnwind(t *testing.T) { //err = state.LoadUnwindInfo(tx) //assert.NoError(t, err) //state.UnwindTo(500, common.Hash{}) - err = state.Run(db, tx, true) + err = state.Run(db, tx, true /* initialCycle */, false /* quiet */) assert.NoError(t, err) expectedFlow := []stages.SyncStage{ diff --git a/eth/stagedsync/testutil.go b/eth/stagedsync/testutil.go index e0364978744..cf1c4dfd21a 100644 --- a/eth/stagedsync/testutil.go +++ b/eth/stagedsync/testutil.go @@ -63,6 +63,80 @@ func plainWriterGen(tx kv.RwTx) stateWriterGen { return state.NewPlainStateWriter(tx, tx, blockNum) } } + +type testGenHook func(n, from, numberOfBlocks uint64) + +func generateBlocks2(t *testing.T, from uint64, numberOfBlocks uint64, blockWriter state.StateWriter, beforeBlock, afterBlock testGenHook, difficulty int) { + acc1 := accounts.NewAccount() + acc1.Incarnation = 1 + acc1.Initialised = true + acc1.Balance.SetUint64(0) + + acc2 := accounts.NewAccount() + acc2.Incarnation = 0 + acc2.Initialised = true + acc2.Balance.SetUint64(0) + + testAccounts := []*accounts.Account{ + &acc1, + &acc2, + } + + for blockNumber := uint64(1); blockNumber < from+numberOfBlocks; blockNumber++ { + beforeBlock(blockNumber, from, numberOfBlocks) + updateIncarnation := difficulty != staticCodeStaticIncarnations && blockNumber%10 == 0 + + for i, oldAcc := range testAccounts { + addr := common.HexToAddress(fmt.Sprintf("0x1234567890%d", i)) + + newAcc := oldAcc.SelfCopy() + newAcc.Balance.SetUint64(blockNumber) + if updateIncarnation && oldAcc.Incarnation > 0 /* only update for contracts */ { + newAcc.Incarnation = oldAcc.Incarnation + 1 + } + + if blockNumber == 1 && newAcc.Incarnation > 0 { + if blockNumber >= from { + if err := blockWriter.CreateContract(addr); err != nil { + t.Fatal(err) + } + } + } + if blockNumber == 1 || updateIncarnation || difficulty == changeCodeIndepenentlyOfIncarnations { + if newAcc.Incarnation > 0 { + code := []byte(fmt.Sprintf("acc-code-%v", blockNumber)) + codeHash, _ := common.HashData(code) + if blockNumber >= from { + if err := blockWriter.UpdateAccountCode(addr, newAcc.Incarnation, codeHash, code); err != nil { + t.Fatal(err) + } + } + newAcc.CodeHash = codeHash + } + } + + if newAcc.Incarnation > 0 { + var oldValue, newValue uint256.Int + newValue.SetOne() + var location common.Hash + location.SetBytes(big.NewInt(int64(blockNumber)).Bytes()) + if blockNumber >= from { + if err := blockWriter.WriteAccountStorage(addr, newAcc.Incarnation, &location, &oldValue, &newValue); err != nil { + t.Fatal(err) + } + } + } + if blockNumber >= from { + if err := blockWriter.UpdateAccountData(addr, oldAcc, newAcc); err != nil { + t.Fatal(err) + } + } + testAccounts[i] = newAcc + } + afterBlock(blockNumber, from, numberOfBlocks) + } +} + func generateBlocks(t *testing.T, from uint64, numberOfBlocks uint64, stateWriterGen stateWriterGen, difficulty int) { acc1 := accounts.NewAccount() acc1.Incarnation = 1 diff --git a/eth/tracers/internal/tracers/4byte_tracer.js b/eth/tracers/internal/tracers/4byte_tracer.js index 462b4ad4cb5..e4714b8bfb7 100644 --- a/eth/tracers/internal/tracers/4byte_tracer.js +++ b/eth/tracers/internal/tracers/4byte_tracer.js @@ -46,7 +46,7 @@ return false; }, - // store save the given indentifier and datasize. + // store save the given identifier and datasize. store: function(id, size){ var key = "" + toHex(id) + "-" + size; this.ids[key] = this.ids[key] + 1 || 1; diff --git a/eth/tracers/internal/tracers/assets.go b/eth/tracers/internal/tracers/assets.go index fa4e2df9433..a8d9a90d8be 100644 --- a/eth/tracers/internal/tracers/assets.go +++ b/eth/tracers/internal/tracers/assets.go @@ -366,11 +366,13 @@ const AssetDebug = false // directory embedded in the file by go-bindata. // For example if you run go-bindata on data/... and data contains the // following hierarchy: -// data/ -// foo.txt -// img/ -// a.png -// b.png +// +// data/ +// foo.txt +// img/ +// a.png +// b.png +// // then AssetDir("data") would return []string{"foo.txt", "img"}, // AssetDir("data/img") would return []string{"a.png", "b.png"}, // AssetDir("foo.txt") and AssetDir("notexist") would return an error, and diff --git a/eth/tracers/internal/tracers/evmdis_tracer.js b/eth/tracers/internal/tracers/evmdis_tracer.js index bb19777aba9..3134bbb8aaf 100644 --- a/eth/tracers/internal/tracers/evmdis_tracer.js +++ b/eth/tracers/internal/tracers/evmdis_tracer.js @@ -71,7 +71,7 @@ opinfo["ops"] = []; this.stack.push(opinfo); break; - case "RETURN": + case "RETURN": case "REVERT": var out = log.stack.peek(0).valueOf(); var outsize = log.stack.peek(1).valueOf(); frame.return = log.memory.slice(out, out + outsize); diff --git a/eth/tracers/internal/tracers/prestate_tracer.js b/eth/tracers/internal/tracers/prestate_tracer.js index 084c04ec46b..77f25209cd9 100644 --- a/eth/tracers/internal/tracers/prestate_tracer.js +++ b/eth/tracers/internal/tracers/prestate_tracer.js @@ -47,6 +47,13 @@ // result is invoked when all the opcodes have been iterated over and returns // the final result of the tracing. result: function(ctx, db) { + if (this.prestate === null) { + this.prestate = {}; + // If tx is transfer-only, the recipient account + // hasn't been populated. + this.lookupAccount(ctx.to, db); + } + // At this point, we need to deduct the 'value' from the // outer transaction, and move it back to the origin this.lookupAccount(ctx.from, db); @@ -79,7 +86,7 @@ } // Whenever new state is accessed, add it to the prestate switch (log.op.toString()) { - case "EXTCODECOPY": case "EXTCODESIZE": case "BALANCE": + case "EXTCODECOPY": case "EXTCODESIZE": case "EXTCODEHASH": case "BALANCE": this.lookupAccount(toAddress(log.stack.peek(0).toString(16)), db); break; case "CREATE": diff --git a/eth/tracers/jsvm.go b/eth/tracers/jsvm.go index 073900b968d..10ae7ed9908 100644 --- a/eth/tracers/jsvm.go +++ b/eth/tracers/jsvm.go @@ -1,8 +1,9 @@ package tracers import ( - "github.com/dop251/goja" "unsafe" + + "github.com/dop251/goja" ) type JSVM struct { @@ -22,9 +23,7 @@ func (vm *JSVM) Pop() { } func (vm *JSVM) Swap(index1 int, index2 int) { - t := vm.stack[len(vm.stack)+index1] - vm.stack[len(vm.stack)+index1] = vm.stack[len(vm.stack)+index2] - vm.stack[len(vm.stack)+index2] = t + vm.stack[len(vm.stack)+index1], vm.stack[len(vm.stack)+index2] = vm.stack[len(vm.stack)+index2], vm.stack[len(vm.stack)+index1] } func (vm *JSVM) pushAny(val interface{}) { diff --git a/eth/tracers/jsvm_test.go b/eth/tracers/jsvm_test.go index 00b7d881b6b..4adc0f07df2 100644 --- a/eth/tracers/jsvm_test.go +++ b/eth/tracers/jsvm_test.go @@ -1,8 +1,9 @@ package tracers import ( - "github.com/stretchr/testify/assert" "testing" + + "github.com/stretchr/testify/assert" ) func TestSwap(t *testing.T) { diff --git a/eth/tracers/tracer.go b/eth/tracers/tracer.go index 21a1e823a36..81c6ebaa250 100644 --- a/eth/tracers/tracer.go +++ b/eth/tracers/tracer.go @@ -20,12 +20,13 @@ import ( "encoding/json" "errors" "fmt" - "github.com/ledgerwatch/erigon/core" "math/big" "sync/atomic" "time" "unsafe" + "github.com/ledgerwatch/erigon/core" + "github.com/holiman/uint256" "github.com/ledgerwatch/erigon/common" diff --git a/eth/tracers/tracer_test.go b/eth/tracers/tracer_test.go index 1365b257a8a..fb22ed097ea 100644 --- a/eth/tracers/tracer_test.go +++ b/eth/tracers/tracer_test.go @@ -58,8 +58,7 @@ type vmContext struct { func testCtx() *vmContext { return &vmContext{blockCtx: vm.BlockContext{ - BlockNumber: 1, - ContractHasTEVM: func(common.Hash) (bool, error) { return false, nil }, + BlockNumber: 1, }, txCtx: vm.TxContext{GasPrice: big.NewInt(100000)}} } @@ -69,7 +68,7 @@ func runTrace(tracer *Tracer, vmctx *vmContext) (json.RawMessage, error) { startGas uint64 = 10000 value = uint256.NewInt(0) ) - contract := vm.NewContract(account{}, account{}, value, startGas, false, false) + contract := vm.NewContract(account{}, account{}, value, startGas, false) contract.Code = []byte{byte(vm.PUSH1), 0x1, byte(vm.PUSH1), 0x1, 0x0} tracer.CaptureStart(env, 0, contract.Caller(), contract.Address(), false, false, vm.CallType(0), []byte{}, startGas, big.NewInt(int64(value.Uint64())), contract.Code) @@ -85,8 +84,7 @@ func TestTracer(t *testing.T) { execTracer := func(code string) ([]byte, string) { t.Helper() ctx := &vmContext{blockCtx: vm.BlockContext{ - BlockNumber: 1, - ContractHasTEVM: func(common.Hash) (bool, error) { return false, nil }, + BlockNumber: 1, }, txCtx: vm.TxContext{GasPrice: big.NewInt(100000)}} tracer, err := New(code, new(Context)) if err != nil { @@ -158,10 +156,9 @@ func TestHaltBetweenSteps(t *testing.T) { t.Fatal(err) } env := vm.NewEVM(vm.BlockContext{ - BlockNumber: 1, - ContractHasTEVM: func(common.Hash) (bool, error) { return false, nil }, + BlockNumber: 1, }, vm.TxContext{}, &dummyStatedb{}, params.TestChainConfig, vm.Config{Debug: true, Tracer: tracer}) - contract := vm.NewContract(&account{}, &account{}, uint256.NewInt(0), 0, false, false) + contract := vm.NewContract(&account{}, &account{}, uint256.NewInt(0), 0, false) tracer.CaptureState(env, 0, 0, 0, 0, &vm.ScopeContext{Contract: contract}, nil, 0, nil) //nolint:errcheck timeout := errors.New("stahp") @@ -179,7 +176,7 @@ func TestNoStepExec(t *testing.T) { runEmptyTrace := func(tracer *Tracer, vmctx *vmContext) (json.RawMessage, error) { env := vm.NewEVM(vmctx.blockCtx, vmctx.txCtx, &dummyStatedb{}, params.TestChainConfig, vm.Config{Debug: true, Tracer: tracer}) startGas := uint64(10000) - contract := vm.NewContract(account{}, account{}, uint256.NewInt(1), startGas, true, false) + contract := vm.NewContract(account{}, account{}, uint256.NewInt(1), startGas, true) tracer.CaptureStart(env, 0, contract.Caller(), contract.Address(), false, false, vm.CALLT, nil, 0, big.NewInt(0), nil) tracer.CaptureEnd(0, nil, startGas-contract.Gas, 1, 0, nil) return tracer.GetResult() diff --git a/eth/tracers/tracers_test.go b/eth/tracers/tracers_test.go index 36b31c447c3..f0e751518d0 100644 --- a/eth/tracers/tracers_test.go +++ b/eth/tracers/tracers_test.go @@ -152,14 +152,13 @@ func TestPrestateTracerCreate2(t *testing.T) { GasPrice: big.NewInt(1), } context := vm.BlockContext{ - CanTransfer: core.CanTransfer, - Transfer: core.Transfer, - Coinbase: common.Address{}, - ContractHasTEVM: func(common.Hash) (bool, error) { return false, nil }, - BlockNumber: 8000000, - Time: 5, - Difficulty: big.NewInt(0x30000), - GasLimit: uint64(6000000), + CanTransfer: core.CanTransfer, + Transfer: core.Transfer, + Coinbase: common.Address{}, + BlockNumber: 8000000, + Time: 5, + Difficulty: big.NewInt(0x30000), + GasLimit: uint64(6000000), } alloc := core.GenesisAlloc{} @@ -245,14 +244,13 @@ func TestCallTracer(t *testing.T) { GasPrice: big.NewInt(int64(txn.GetPrice().Uint64())), } context := vm.BlockContext{ - CanTransfer: core.CanTransfer, - Transfer: core.Transfer, - Coinbase: test.Context.Miner, - BlockNumber: uint64(test.Context.Number), - Time: uint64(test.Context.Time), - Difficulty: (*big.Int)(test.Context.Difficulty), - GasLimit: uint64(test.Context.GasLimit), - ContractHasTEVM: func(common.Hash) (bool, error) { return false, nil }, + CanTransfer: core.CanTransfer, + Transfer: core.Transfer, + Coinbase: test.Context.Miner, + BlockNumber: uint64(test.Context.Number), + Time: uint64(test.Context.Time), + Difficulty: (*big.Int)(test.Context.Difficulty), + GasLimit: uint64(test.Context.GasLimit), } _, tx := memdb.NewTestTx(t) diff --git a/ethdb/bitmapdb/dbutils.go b/ethdb/bitmapdb/dbutils.go index 0631a4f5788..c54d9f5513b 100644 --- a/ethdb/bitmapdb/dbutils.go +++ b/ethdb/bitmapdb/dbutils.go @@ -4,6 +4,7 @@ import ( "bytes" "encoding/binary" "sort" + "sync" "github.com/RoaringBitmap/roaring" "github.com/RoaringBitmap/roaring/roaring64" @@ -14,6 +15,36 @@ import ( "github.com/ledgerwatch/erigon/ethdb" ) +var roaringPool = sync.Pool{ + New: func() any { + return roaring.New() + }, +} + +func NewBitmap() *roaring.Bitmap { + a := roaringPool.Get().(*roaring.Bitmap) + a.Clear() + return a +} +func ReturnToPool(a *roaring.Bitmap) { + roaringPool.Put(a) +} + +var roaring64Pool = sync.Pool{ + New: func() any { + return roaring64.New() + }, +} + +func NewBitmap64() *roaring64.Bitmap { + a := roaring64Pool.Get().(*roaring64.Bitmap) + a.Clear() + return a +} +func ReturnToPool64(a *roaring64.Bitmap) { + roaring64Pool.Put(a) +} + const ChunkLimit = uint64(1950 * datasize.B) // threshold beyond which MDBX overflow pages appear: 4096 / 2 - (keySize + 8) // CutLeft - cut from bitmap `targetSize` bytes from left @@ -137,7 +168,8 @@ func Get(db kv.Tx, bucket string, key []byte, from, to uint32) (*roaring.Bitmap, if !bytes.HasPrefix(k, key) { break } - bm := roaring.New() + bm := NewBitmap() + defer ReturnToPool(bm) if _, err := bm.ReadFrom(bytes.NewReader(v)); err != nil { return nil, err } @@ -146,7 +178,6 @@ func Get(db kv.Tx, bucket string, key []byte, from, to uint32) (*roaring.Bitmap, break } } - if len(chunks) == 0 { return roaring.New(), nil } @@ -154,6 +185,7 @@ func Get(db kv.Tx, bucket string, key []byte, from, to uint32) (*roaring.Bitmap, } // SeekInBitmap - returns value in bitmap which is >= n +// //nolint:deadcode func SeekInBitmap(m *roaring.Bitmap, n uint32) (found uint32, ok bool) { i := m.Iterator() @@ -292,7 +324,8 @@ func Get64(db kv.Tx, bucket string, key []byte, from, to uint64) (*roaring64.Bit if !bytes.HasPrefix(k, key) { break } - bm := roaring64.New() + bm := NewBitmap64() + defer ReturnToPool64(bm) _, err := bm.ReadFrom(bytes.NewReader(v)) if err != nil { return nil, err diff --git a/ethdb/kv_util.go b/ethdb/kv_util.go index c64656521ce..430d9d73860 100644 --- a/ethdb/kv_util.go +++ b/ethdb/kv_util.go @@ -2,11 +2,8 @@ package ethdb import ( "bytes" - "errors" - "fmt" "github.com/ledgerwatch/erigon-lib/kv" - "github.com/ledgerwatch/erigon/common" ) func Walk(c kv.Cursor, startkey []byte, fixedbits int, walker func(k, v []byte) (bool, error)) error { @@ -31,38 +28,6 @@ func Walk(c kv.Cursor, startkey []byte, fixedbits int, walker func(k, v []byte) return nil } -// todo: return TEVM code and use it -func GetHasTEVM(db kv.Has) func(contractHash common.Hash) (bool, error) { - contractsWithTEVM := map[common.Hash]struct{}{} - var ok bool - - return func(contractHash common.Hash) (bool, error) { - if contractHash == (common.Hash{}) { - return false, nil - } - - if _, ok = contractsWithTEVM[contractHash]; ok { - return true, nil - } - - ok, err := db.Has(kv.ContractTEVMCode, contractHash.Bytes()) - if err != nil && !errors.Is(err, ErrKeyNotFound) { - return false, fmt.Errorf("can't check TEVM bucket by contract %q hash: %w", - contractHash.String(), err) - } - - if errors.Is(err, ErrKeyNotFound) { - return false, nil - } - - if ok { - contractsWithTEVM[contractHash] = struct{}{} - } - - return true, nil - } -} - func Bytesmask(fixedbits int) (fixedbytes int, mask byte) { fixedbytes = (fixedbits + 7) / 8 shiftbits := fixedbits & 7 diff --git a/ethdb/olddb/mapmutation.go b/ethdb/olddb/mapmutation.go index 8755385abcd..a88a971b6ea 100644 --- a/ethdb/olddb/mapmutation.go +++ b/ethdb/olddb/mapmutation.go @@ -111,9 +111,6 @@ func (m *mapmutation) ReadSequence(bucket string) (res uint64, err error) { // Can only be called from the worker thread func (m *mapmutation) GetOne(table string, key []byte) ([]byte, error) { if value, ok := m.getMem(table, key); ok { - if value == nil { - return nil, nil - } return value, nil } if m.db != nil { @@ -285,5 +282,9 @@ func (m *mapmutation) panicOnEmptyDB() { } func (m *mapmutation) SetRwKV(kv kv.RwDB) { - m.db.(ethdb.HasRwKV).SetRwKV(kv) + hasRwKV, ok := m.db.(ethdb.HasRwKV) + if !ok { + log.Warn("Failed to convert mapmutation type to HasRwKV interface") + } + hasRwKV.SetRwKV(kv) } diff --git a/ethdb/olddb/mutation.go b/ethdb/olddb/mutation.go index 4e1f7d61a7f..f395bbd1384 100644 --- a/ethdb/olddb/mutation.go +++ b/ethdb/olddb/mutation.go @@ -57,7 +57,10 @@ func NewBatch(tx kv.RwTx, quit <-chan struct{}) *mutation { } func (mi *MutationItem) Less(than btree.Item) bool { - i := than.(*MutationItem) + i, ok := than.(*MutationItem) + if !ok { + log.Warn("Failed to convert btree.Item to MutationItem pointer") + } c := strings.Compare(mi.table, i.table) if c != 0 { return c < 0 @@ -192,7 +195,7 @@ func (m *mutation) Put(table string, k, v []byte) error { m.size += int(unsafe.Sizeof(newMi)) + len(k) + len(v) if i != nil { oldMi := i.(*MutationItem) - m.size -= (int(unsafe.Sizeof(oldMi)) + len(oldMi.key) + len(oldMi.value)) + m.size -= int(unsafe.Sizeof(oldMi)) + len(oldMi.key) + len(oldMi.value) } return nil } diff --git a/ethdb/olddb/object_db.go b/ethdb/olddb/object_db.go index a8252f4ab61..24d03523175 100644 --- a/ethdb/olddb/object_db.go +++ b/ethdb/olddb/object_db.go @@ -33,7 +33,7 @@ type ObjectDatabase struct { } // NewObjectDatabase returns a AbstractDB wrapper. -//Deprecated +// Deprecated func NewObjectDatabase(kv kv.RwDB) *ObjectDatabase { return &ObjectDatabase{ kv: kv, diff --git a/ethdb/olddb/tx_db.go b/ethdb/olddb/tx_db.go index 5dd1ce38bc6..f54727ba04e 100644 --- a/ethdb/olddb/tx_db.go +++ b/ethdb/olddb/tx_db.go @@ -14,8 +14,8 @@ import ( // TxDb not usable after .Commit()/.Rollback() call, but usable after .CommitAndBegin() call // you can put unlimited amount of data into this class // Walk and MultiWalk methods - work outside of Tx object yet, will implement it later -//Deprecated -//nolint +// Deprecated +// nolint type TxDb struct { db ethdb.Database tx kv.Tx @@ -24,7 +24,7 @@ type TxDb struct { len uint64 } -//nolint +// nolint func WrapIntoTxDB(tx kv.RwTx) *TxDb { return &TxDb{tx: tx, cursors: map[string]kv.Cursor{}} } diff --git a/ethdb/privateapi/engine_test.go b/ethdb/privateapi/engine_test.go index a2c5eb9c1fd..21737b0d3b8 100644 --- a/ethdb/privateapi/engine_test.go +++ b/ethdb/privateapi/engine_test.go @@ -13,6 +13,7 @@ import ( "github.com/ledgerwatch/erigon/core/rawdb" "github.com/ledgerwatch/erigon/params" "github.com/ledgerwatch/erigon/turbo/engineapi" + "github.com/ledgerwatch/erigon/turbo/shards" "github.com/ledgerwatch/erigon/turbo/stages/headerdownload" "github.com/stretchr/testify/require" ) @@ -27,7 +28,7 @@ var ( // Payloads var ( - mockPayload1 *types2.ExecutionPayload = &types2.ExecutionPayload{ + mockPayload1 = &types2.ExecutionPayload{ ParentHash: gointerfaces.ConvertHashToH256(common.HexToHash("0x2")), BlockHash: gointerfaces.ConvertHashToH256(payload1Hash), ReceiptRoot: gointerfaces.ConvertHashToH256(common.HexToHash("0x3")), @@ -43,7 +44,7 @@ var ( Coinbase: gointerfaces.ConvertAddressToH160(common.HexToAddress("0x1")), Transactions: make([][]byte, 0), } - mockPayload2 *types2.ExecutionPayload = &types2.ExecutionPayload{ + mockPayload2 = &types2.ExecutionPayload{ ParentHash: gointerfaces.ConvertHashToH256(payload1Hash), BlockHash: gointerfaces.ConvertHashToH256(payload2Hash), ReceiptRoot: gointerfaces.ConvertHashToH256(common.HexToHash("0x3")), @@ -91,8 +92,8 @@ func TestMockDownloadRequest(t *testing.T) { makeTestDb(ctx, db) hd := headerdownload.NewHeaderDownload(0, 0, nil, nil) - - events := NewEvents() + hd.SetPOSSync(true) + events := shards.NewEvents() backend := NewEthBackendServer(ctx, nil, db, events, nil, ¶ms.ChainConfig{TerminalTotalDifficulty: common.Big1}, nil, hd, false) var err error @@ -149,8 +150,9 @@ func TestMockValidExecution(t *testing.T) { makeTestDb(ctx, db) hd := headerdownload.NewHeaderDownload(0, 0, nil, nil) + hd.SetPOSSync(true) - events := NewEvents() + events := shards.NewEvents() backend := NewEthBackendServer(ctx, nil, db, events, nil, ¶ms.ChainConfig{TerminalTotalDifficulty: common.Big1}, nil, hd, false) var err error @@ -184,8 +186,9 @@ func TestMockInvalidExecution(t *testing.T) { makeTestDb(ctx, db) hd := headerdownload.NewHeaderDownload(0, 0, nil, nil) + hd.SetPOSSync(true) - events := NewEvents() + events := shards.NewEvents() backend := NewEthBackendServer(ctx, nil, db, events, nil, ¶ms.ChainConfig{TerminalTotalDifficulty: common.Big1}, nil, hd, false) var err error @@ -220,7 +223,7 @@ func TestNoTTD(t *testing.T) { hd := headerdownload.NewHeaderDownload(0, 0, nil, nil) - events := NewEvents() + events := shards.NewEvents() backend := NewEthBackendServer(ctx, nil, db, events, nil, ¶ms.ChainConfig{}, nil, hd, false) var err error diff --git a/ethdb/privateapi/ethbackend.go b/ethdb/privateapi/ethbackend.go index fe9b93dfbb7..678885eea25 100644 --- a/ethdb/privateapi/ethbackend.go +++ b/ethdb/privateapi/ethbackend.go @@ -13,6 +13,10 @@ import ( "github.com/ledgerwatch/erigon-lib/gointerfaces/remote" types2 "github.com/ledgerwatch/erigon-lib/gointerfaces/types" "github.com/ledgerwatch/erigon-lib/kv" + "github.com/ledgerwatch/log/v3" + "golang.org/x/exp/slices" + "google.golang.org/protobuf/types/known/emptypb" + "github.com/ledgerwatch/erigon/common" "github.com/ledgerwatch/erigon/consensus/serenity" "github.com/ledgerwatch/erigon/core" @@ -24,10 +28,8 @@ import ( "github.com/ledgerwatch/erigon/turbo/builder" "github.com/ledgerwatch/erigon/turbo/engineapi" "github.com/ledgerwatch/erigon/turbo/services" + "github.com/ledgerwatch/erigon/turbo/shards" "github.com/ledgerwatch/erigon/turbo/stages/headerdownload" - "github.com/ledgerwatch/log/v3" - "golang.org/x/exp/slices" - "google.golang.org/protobuf/types/known/emptypb" ) // EthBackendAPIVersion @@ -49,7 +51,7 @@ type EthBackendServer struct { ctx context.Context eth EthBackend - events *Events + events *shards.Events db kv.RoDB blockReader services.BlockAndTxnReader config *params.ChainConfig @@ -72,7 +74,7 @@ type EthBackend interface { Peers(ctx context.Context) (*remote.PeersReply, error) } -func NewEthBackendServer(ctx context.Context, eth EthBackend, db kv.RwDB, events *Events, blockReader services.BlockAndTxnReader, +func NewEthBackendServer(ctx context.Context, eth EthBackend, db kv.RwDB, events *shards.Events, blockReader services.BlockAndTxnReader, config *params.ChainConfig, builderFunc builder.BlockBuilderFunc, hd *headerdownload.HeaderDownload, proposing bool, ) *EthBackendServer { s := &EthBackendServer{ctx: ctx, eth: eth, events: events, db: db, blockReader: blockReader, config: config, @@ -228,6 +230,28 @@ func (s *EthBackendServer) Block(ctx context.Context, req *remote.BlockRequest) return &remote.BlockReply{BlockRlp: blockRlp, Senders: sendersBytes}, nil } +func (s *EthBackendServer) PendingBlock(_ context.Context, _ *emptypb.Empty) (*remote.PendingBlockReply, error) { + s.lock.Lock() + defer s.lock.Unlock() + + b := s.builders[s.payloadId] + if b == nil { + return nil, nil + } + + pendingBlock := b.Block() + if pendingBlock == nil { + return nil, nil + } + + blockRlp, err := rlp.EncodeToBytes(pendingBlock) + if err != nil { + return nil, err + } + + return &remote.PendingBlockReply{BlockRlp: blockRlp}, nil +} + func convertPayloadStatus(payloadStatus *engineapi.PayloadStatus) *remote.EnginePayloadStatus { reply := remote.EnginePayloadStatus{Status: payloadStatus.Status} if payloadStatus.Status != remote.EngineStatus_SYNCING { @@ -313,23 +337,14 @@ func (s *EthBackendServer) EngineNewPayloadV1(ctx context.Context, req *types2.E } block := types.NewBlockFromStorage(blockHash, &header, transactions, nil) - possibleStatus, err := s.getPayloadStatusFromHashIfPossible(blockHash, req.BlockNumber, header.ParentHash, true) + possibleStatus, err := s.getQuickPayloadStatusIfPossible(blockHash, req.BlockNumber, header.ParentHash, nil, true) if err != nil { return nil, err } if possibleStatus != nil { return convertPayloadStatus(possibleStatus), nil } - // If another payload is already commissioned then we just reply with syncing - if s.stageLoopIsBusy() { - // We are still syncing a commissioned payload - // TODO(yperbasis): not entirely correct since per the spec: - // The process of validating a payload on the canonical chain MUST NOT be affected by an active sync process on a side branch of the block tree. - // For example, if side branch B is SYNCING but the requisite data for validating a payload from canonical branch A is available, client software MUST initiate the validation process. - // https://github.com/ethereum/execution-apis/blob/v1.0.0-alpha.6/src/engine/specification.md#payload-validation - log.Debug("[NewPayload] stage loop is busy") - return &remote.EnginePayloadStatus{Status: remote.EngineStatus_SYNCING}, nil - } + s.lock.Lock() defer s.lock.Unlock() @@ -346,8 +361,8 @@ func (s *EthBackendServer) EngineNewPayloadV1(ctx context.Context, req *types2.E return convertPayloadStatus(&payloadStatus), nil } -// Check if we can make out a status from the payload hash/head hash. -func (s *EthBackendServer) getPayloadStatusFromHashIfPossible(blockHash common.Hash, blockNumber uint64, parentHash common.Hash, newPayload bool) (*engineapi.PayloadStatus, error) { +// Check if we can quickly determine the status of a newPayload or forkchoiceUpdated. +func (s *EthBackendServer) getQuickPayloadStatusIfPossible(blockHash common.Hash, blockNumber uint64, parentHash common.Hash, forkchoiceMessage *engineapi.ForkChoiceMessage, newPayload bool) (*engineapi.PayloadStatus, error) { // Determine which prefix to use for logs var prefix string if newPayload { @@ -361,7 +376,7 @@ func (s *EthBackendServer) getPayloadStatusFromHashIfPossible(blockHash common.H } if s.hd == nil { - return nil, nil + return nil, fmt.Errorf("headerdownload is nil") } tx, err := s.db.BeginRo(s.ctx) @@ -369,6 +384,14 @@ func (s *EthBackendServer) getPayloadStatusFromHashIfPossible(blockHash common.H return nil, err } defer tx.Rollback() + // Some Consensus layer clients sometimes sends us repeated FCUs and make Erigon print a gazillion logs. + // E.G teku sometimes will end up spamming fcu on the terminal block if it has not synced to that point. + if forkchoiceMessage != nil && + forkchoiceMessage.FinalizedBlockHash == rawdb.ReadForkchoiceFinalized(tx) && + forkchoiceMessage.HeadBlockHash == rawdb.ReadForkchoiceHead(tx) && + forkchoiceMessage.SafeBlockHash == rawdb.ReadForkchoiceSafe(tx) { + return &engineapi.PayloadStatus{Status: remote.EngineStatus_VALID, LatestValidHash: blockHash}, nil + } header, err := rawdb.ReadHeaderByHash(tx, blockHash) if err != nil { @@ -378,7 +401,6 @@ func (s *EthBackendServer) getPayloadStatusFromHashIfPossible(blockHash common.H var parent *types.Header var td *big.Int if newPayload { - // Obtain TD parent, err = rawdb.ReadHeaderByHash(tx, parentHash) if err != nil { return nil, err @@ -390,12 +412,17 @@ func (s *EthBackendServer) getPayloadStatusFromHashIfPossible(blockHash common.H if err != nil { return nil, err } - // Check if we already reached TTD. + if td != nil && td.Cmp(s.config.TerminalTotalDifficulty) < 0 { - log.Warn(fmt.Sprintf("[%s] TTD not reached yet", prefix), "hash", common.Hash(blockHash)) + log.Warn(fmt.Sprintf("[%s] Beacon Chain request before TTD", prefix), "hash", blockHash) return &engineapi.PayloadStatus{Status: remote.EngineStatus_INVALID, LatestValidHash: common.Hash{}}, nil } + if !s.hd.POSSync() { + log.Info(fmt.Sprintf("[%s] Still in PoW sync", prefix), "hash", blockHash) + return &engineapi.PayloadStatus{Status: remote.EngineStatus_SYNCING}, nil + } + var canonicalHash common.Hash if header != nil { canonicalHash, err = rawdb.ReadCanonicalHash(tx, header.Number.Uint64()) @@ -434,31 +461,33 @@ func (s *EthBackendServer) getPayloadStatusFromHashIfPossible(blockHash common.H return &engineapi.PayloadStatus{Status: remote.EngineStatus_VALID, LatestValidHash: blockHash}, nil } - if parent == nil && s.hd.PosStatus() == headerdownload.Syncing { + if parent == nil && s.hd.PosStatus() != headerdownload.Idle { + log.Debug(fmt.Sprintf("[%s] Downloading some other PoS blocks", prefix), "hash", blockHash) return &engineapi.PayloadStatus{Status: remote.EngineStatus_SYNCING}, nil } - - return nil, nil - } - - if header == nil { - if s.hd.PosStatus() == headerdownload.Syncing { + } else { + if header == nil && s.hd.PosStatus() != headerdownload.Idle { + log.Debug(fmt.Sprintf("[%s] Downloading some other PoS stuff", prefix), "hash", blockHash) return &engineapi.PayloadStatus{Status: remote.EngineStatus_SYNCING}, nil - } - return nil, nil - } + // Following code ensures we skip the fork choice state update if if forkchoiceState.headBlockHash references an ancestor of the head of canonical chain + headHash := rawdb.ReadHeadBlockHash(tx) + if err != nil { + return nil, err + } - headHash := rawdb.ReadHeadBlockHash(tx) - if err != nil { - return nil, err + // We add the extra restriction blockHash != headHash for the FCU case of canonicalHash == blockHash + // because otherwise (when FCU points to the head) we want go to stage headers + // so that it calls writeForkChoiceHashes. + if blockHash != headHash && canonicalHash == blockHash { + return &engineapi.PayloadStatus{Status: remote.EngineStatus_VALID, LatestValidHash: blockHash}, nil + } } - // We add the extra restriction blockHash != headHash for the FCU case of canonicalHash == blockHash - // because otherwise (when FCU points to the head) we want go to stage headers - // so that it calls writeForkChoiceHashes. - if blockHash != headHash && canonicalHash == blockHash { - return &engineapi.PayloadStatus{Status: remote.EngineStatus_VALID, LatestValidHash: blockHash}, nil + // If another payload is already commissioned then we just reply with syncing + if s.stageLoopIsBusy() { + log.Debug(fmt.Sprintf("[%s] stage loop is busy", prefix)) + return &engineapi.PayloadStatus{Status: remote.EngineStatus_SYNCING}, nil } return nil, nil @@ -498,7 +527,13 @@ func (s *EthBackendServer) EngineGetPayloadV1(ctx context.Context, req *remote.E if err != nil { return nil, err } - log.Info("Block request successful", "hash", block.Header().Hash(), "transactions count", len(encodedTransactions), "number", block.NumberU64()) + + blockRlp, err := rlp.EncodeToBytes(block) + if err != nil { + return nil, err + } + log.Info("PoS block built successfully", "hash", block.Header().Hash(), + "transactions count", len(encodedTransactions), "number", block.NumberU64(), "rlp", common.Bytes2Hex(blockRlp)) return &types2.ExecutionPayload{ ParentHash: gointerfaces.ConvertHashToH256(block.Header().ParentHash), @@ -526,18 +561,11 @@ func (s *EthBackendServer) EngineForkChoiceUpdatedV1(ctx context.Context, req *r FinalizedBlockHash: gointerfaces.ConvertH256ToHash(req.ForkchoiceState.FinalizedBlockHash), } - status, err := s.getPayloadStatusFromHashIfPossible(forkChoice.HeadBlockHash, 0, common.Hash{}, false) + status, err := s.getQuickPayloadStatusIfPossible(forkChoice.HeadBlockHash, 0, common.Hash{}, &forkChoice, false) if err != nil { return nil, err } - if status == nil && s.stageLoopIsBusy() { - log.Debug("[ForkChoiceUpdated] stage loop is busy") - return &remote.EngineForkChoiceUpdatedReply{ - PayloadStatus: &remote.EnginePayloadStatus{Status: remote.EngineStatus_SYNCING}, - }, nil - } - s.lock.Lock() defer s.lock.Unlock() @@ -606,9 +634,11 @@ func (s *EthBackendServer) EngineForkChoiceUpdatedV1(ctx context.Context, req *r Timestamp: req.PayloadAttributes.Timestamp, PrevRandao: emptyHeader.MixDigest, SuggestedFeeRecipient: emptyHeader.Coinbase, + PayloadId: s.payloadId, } s.builders[s.payloadId] = builder.NewBlockBuilder(s.builderFunc, ¶m, emptyHeader) + log.Debug("BlockBuilder added", "payload", s.payloadId) return &remote.EngineForkChoiceUpdatedReply{ PayloadStatus: &remote.EnginePayloadStatus{ diff --git a/ethdb/privateapi/logsfilter.go b/ethdb/privateapi/logsfilter.go index 93b6a09a730..0c8f5098073 100644 --- a/ethdb/privateapi/logsfilter.go +++ b/ethdb/privateapi/logsfilter.go @@ -9,6 +9,7 @@ import ( "github.com/ledgerwatch/erigon-lib/gointerfaces/remote" "github.com/ledgerwatch/erigon-lib/gointerfaces/types" "github.com/ledgerwatch/erigon/common" + "github.com/ledgerwatch/erigon/turbo/shards" ) type LogsFilterAggregator struct { @@ -16,7 +17,7 @@ type LogsFilterAggregator struct { logsFilters map[uint64]*LogsFilter // Filter for each subscriber, keyed by filterID logsFilterLock sync.Mutex nextFilterId uint64 - events *Events + events *shards.Events } // LogsFilter is used for both representing log filter for a specific subscriber (RPC daemon usually) @@ -32,7 +33,7 @@ type LogsFilter struct { sender remote.ETHBACKEND_SubscribeLogsServer // nil for aggregate subscriber, for appropriate stream server otherwise } -func NewLogsFilterAggregator(events *Events) *LogsFilterAggregator { +func NewLogsFilterAggregator(events *shards.Events) *LogsFilterAggregator { return &LogsFilterAggregator{ aggLogsFilter: LogsFilter{ addrs: make(map[common.Address]int), diff --git a/ethdb/privateapi/logsfilter_test.go b/ethdb/privateapi/logsfilter_test.go new file mode 100644 index 00000000000..519398b4007 --- /dev/null +++ b/ethdb/privateapi/logsfilter_test.go @@ -0,0 +1,246 @@ +package privateapi + +import ( + "context" + "testing" + + "github.com/ledgerwatch/erigon-lib/gointerfaces" + "github.com/ledgerwatch/erigon-lib/gointerfaces/remote" + types2 "github.com/ledgerwatch/erigon-lib/gointerfaces/types" + "google.golang.org/grpc" + + "github.com/ledgerwatch/erigon/common" + "github.com/ledgerwatch/erigon/turbo/shards" +) + +var ( + address1 = common.HexToHash("0xdac17f958d2ee523a2206206994597c13d831ec7") + topic1 = common.HexToHash("0xddf252ad1be2c89b69c2b068fc378daa952ba7f163c4a11628f55a4df523b3ef") + address160 *types2.H160 + topic1H256 *types2.H256 +) + +func init() { + var a common.Address + a.SetBytes(address1.Bytes()) + address160 = gointerfaces.ConvertAddressToH160(a) + topic1H256 = gointerfaces.ConvertHashToH256(topic1) +} + +type testServer struct { + received chan *remote.LogsFilterRequest + receiveCompleted chan struct{} + sent []*remote.SubscribeLogsReply + ctx context.Context + grpc.ServerStream +} + +func (ts *testServer) Send(m *remote.SubscribeLogsReply) error { + ts.sent = append(ts.sent, m) + return nil +} + +func (ts *testServer) Recv() (*remote.LogsFilterRequest, error) { + // notify complete when the last request has been processed + defer func() { + if len(ts.received) == 0 { + ts.receiveCompleted <- struct{}{} + } + }() + + return <-ts.received, nil +} + +func createLog() *remote.SubscribeLogsReply { + return &remote.SubscribeLogsReply{ + Address: gointerfaces.ConvertAddressToH160([20]byte{}), + BlockHash: gointerfaces.ConvertHashToH256([32]byte{}), + BlockNumber: 0, + Data: []byte{}, + LogIndex: 0, + Topics: []*types2.H256{gointerfaces.ConvertHashToH256([32]byte{99, 99})}, + TransactionHash: gointerfaces.ConvertHashToH256([32]byte{}), + TransactionIndex: 0, + Removed: false, + } +} + +func TestLogsFilter_EmptyFilter_DoesNotDistributeAnything(t *testing.T) { + events := shards.NewEvents() + agg := NewLogsFilterAggregator(events) + + srv := &testServer{ + received: make(chan *remote.LogsFilterRequest, 256), + receiveCompleted: make(chan struct{}, 1), + sent: make([]*remote.SubscribeLogsReply, 0), + ctx: context.Background(), + ServerStream: nil, + } + + req1 := &remote.LogsFilterRequest{ + AllAddresses: false, + Addresses: nil, + AllTopics: false, + Topics: nil, + } + srv.received <- req1 + + go func() { + err := agg.subscribeLogs(srv) + if err != nil { + t.Error(err) + } + }() + + <-srv.receiveCompleted + + // now see if a log would be sent or not + log := createLog() + agg.distributeLogs([]*remote.SubscribeLogsReply{log}) + + if len(srv.sent) != 0 { + t.Error("expected the sent slice to be empty") + } +} + +func TestLogsFilter_AllAddressesAndTopicsFilter_DistributesLogRegardless(t *testing.T) { + events := shards.NewEvents() + agg := NewLogsFilterAggregator(events) + + srv := &testServer{ + received: make(chan *remote.LogsFilterRequest, 256), + receiveCompleted: make(chan struct{}, 1), + sent: make([]*remote.SubscribeLogsReply, 0), + ctx: context.Background(), + ServerStream: nil, + } + + req1 := &remote.LogsFilterRequest{ + AllAddresses: true, + Addresses: nil, + AllTopics: true, + Topics: nil, + } + srv.received <- req1 + + go func() { + err := agg.subscribeLogs(srv) + if err != nil { + t.Error(err) + } + }() + + <-srv.receiveCompleted + + // now see if a log would be sent or not + log := createLog() + agg.distributeLogs([]*remote.SubscribeLogsReply{log}) + + if len(srv.sent) != 1 { + t.Error("expected the sent slice to have the log present") + } + + log = createLog() + log.Topics = []*types2.H256{topic1H256} + agg.distributeLogs([]*remote.SubscribeLogsReply{log}) + if len(srv.sent) != 2 { + t.Error("expected any topic to be allowed through the filter") + } + + log = createLog() + log.Address = address160 + agg.distributeLogs([]*remote.SubscribeLogsReply{log}) + if len(srv.sent) != 3 { + t.Error("expected any address to be allowed through the filter") + } +} + +func TestLogsFilter_TopicFilter_OnlyAllowsThatTopicThrough(t *testing.T) { + events := shards.NewEvents() + agg := NewLogsFilterAggregator(events) + + srv := &testServer{ + received: make(chan *remote.LogsFilterRequest, 256), + receiveCompleted: make(chan struct{}, 1), + sent: make([]*remote.SubscribeLogsReply, 0), + ctx: context.Background(), + ServerStream: nil, + } + + req1 := &remote.LogsFilterRequest{ + AllAddresses: true, // need to allow all addresses on the request else it will filter on them + Addresses: nil, + AllTopics: false, + Topics: []*types2.H256{topic1H256}, + } + srv.received <- req1 + + go func() { + err := agg.subscribeLogs(srv) + if err != nil { + t.Error(err) + } + }() + + <-srv.receiveCompleted + + // now see if a log would be sent or not + log := createLog() + agg.distributeLogs([]*remote.SubscribeLogsReply{log}) + + if len(srv.sent) != 0 { + t.Error("the sent slice should be empty as the topic didn't match") + } + + log = createLog() + log.Topics = []*types2.H256{topic1H256} + agg.distributeLogs([]*remote.SubscribeLogsReply{log}) + if len(srv.sent) != 1 { + t.Error("expected the log to be distributed as the topic matched") + } +} + +func TestLogsFilter_AddressFilter_OnlyAllowsThatAddressThrough(t *testing.T) { + events := shards.NewEvents() + agg := NewLogsFilterAggregator(events) + + srv := &testServer{ + received: make(chan *remote.LogsFilterRequest, 256), + receiveCompleted: make(chan struct{}, 1), + sent: make([]*remote.SubscribeLogsReply, 0), + ctx: context.Background(), + ServerStream: nil, + } + + req1 := &remote.LogsFilterRequest{ + AllAddresses: false, + Addresses: []*types2.H160{address160}, + AllTopics: true, + Topics: []*types2.H256{}, + } + srv.received <- req1 + + go func() { + err := agg.subscribeLogs(srv) + if err != nil { + t.Error(err) + } + }() + + <-srv.receiveCompleted + + // now see if a log would be sent or not + log := createLog() + agg.distributeLogs([]*remote.SubscribeLogsReply{log}) + + if len(srv.sent) != 0 { + t.Error("the sent slice should be empty as the address didn't match") + } + + log = createLog() + log.Address = address160 + agg.distributeLogs([]*remote.SubscribeLogsReply{log}) + if len(srv.sent) != 1 { + t.Error("expected the log to be distributed as the address matched") + } +} diff --git a/ethdb/privateapi/mining.go b/ethdb/privateapi/mining.go index 883673a44f0..39fc6dde687 100644 --- a/ethdb/privateapi/mining.go +++ b/ethdb/privateapi/mining.go @@ -132,6 +132,7 @@ func (s *MiningServer) OnMinedBlock(req *proto_txpool.OnMinedBlockRequest, reply } func (s *MiningServer) BroadcastMinedBlock(block *types.Block) error { + log.Debug("BroadcastMinedBlock", "block hash", block.Hash(), "block number", block.Number(), "root", block.Root(), "gas", block.GasUsed()) var buf bytes.Buffer if err := block.EncodeRLP(&buf); err != nil { return err diff --git a/ethdb/prune/storage_mode.go b/ethdb/prune/storage_mode.go index 29607850046..ac44bdafec3 100644 --- a/ethdb/prune/storage_mode.go +++ b/ethdb/prune/storage_mode.go @@ -1,6 +1,7 @@ package prune import ( + "bytes" "encoding/binary" "errors" "fmt" @@ -10,6 +11,7 @@ import ( "github.com/ledgerwatch/erigon-lib/kv" "github.com/ledgerwatch/erigon/params" + "github.com/ledgerwatch/log/v3" ) var DefaultMode = Mode{ @@ -21,15 +23,20 @@ var DefaultMode = Mode{ Experiments: Experiments{}, // all off } +var ( + mainnetDepositContractBlock uint64 = 11052984 + sepoliaDepositContractBlock uint64 = 1273020 + goerliDepositContractBlock uint64 = 4367322 +) + type Experiments struct { - TEVM bool } -func FromCli(flags string, exactHistory, exactReceipts, exactTxIndex, exactCallTraces, +func FromCli(chainId uint64, flags string, exactHistory, exactReceipts, exactTxIndex, exactCallTraces, beforeH, beforeR, beforeT, beforeC uint64, experiments []string) (Mode, error) { mode := DefaultMode + if flags != "default" && flags != "disabled" { - mode.Initialised = true for _, flag := range flags { switch flag { case 'h': @@ -46,55 +53,67 @@ func FromCli(flags string, exactHistory, exactReceipts, exactTxIndex, exactCallT } } + pruneBlockBefore := pruneBlockDefault(chainId) + if exactHistory > 0 { - mode.Initialised = true mode.History = Distance(exactHistory) } if exactReceipts > 0 { - mode.Initialised = true mode.Receipts = Distance(exactReceipts) } if exactTxIndex > 0 { - mode.Initialised = true mode.TxIndex = Distance(exactTxIndex) } if exactCallTraces > 0 { - mode.Initialised = true mode.CallTraces = Distance(exactCallTraces) } if beforeH > 0 { - mode.Initialised = true mode.History = Before(beforeH) } if beforeR > 0 { - mode.Initialised = true + if pruneBlockBefore != 0 { + log.Warn("specifying prune.before.r might break CL compatibility") + if beforeR > pruneBlockBefore { + log.Warn("the specified prune.before.r block number is higher than the deposit contract contract block number", "highest block number", pruneBlockBefore) + } + } mode.Receipts = Before(beforeR) + } else if exactReceipts == 0 && mode.Receipts.Enabled() && pruneBlockBefore != 0 { + // Default --prune=r to pruning receipts before the Beacon Chain genesis + mode.Receipts = Before(pruneBlockBefore) } if beforeT > 0 { - mode.Initialised = true mode.TxIndex = Before(beforeT) } if beforeC > 0 { - mode.Initialised = true mode.CallTraces = Before(beforeC) } for _, ex := range experiments { switch ex { - case "tevm": - mode.Initialised = true - mode.Experiments.TEVM = true case "": // skip default: return DefaultMode, fmt.Errorf("unexpected experiment found: %s", ex) } } - return mode, nil } +func pruneBlockDefault(chainId uint64) uint64 { + switch chainId { + case 1 /* mainnet */ : + return mainnetDepositContractBlock + case 11155111 /* sepolia */ : + return sepoliaDepositContractBlock + case 5 /* goerli */ : + return goerliDepositContractBlock + } + + return 0 +} + func Get(db kv.Getter) (Mode, error) { prune := DefaultMode prune.Initialised = true @@ -131,12 +150,6 @@ func Get(db kv.Getter) (Mode, error) { prune.CallTraces = blockAmount } - v, err := db.GetOne(kv.DatabaseInfo, kv.StorageModeTEVM) - if err != nil { - return prune, err - } - prune.Experiments.TEVM = len(v) == 1 && v[0] == 1 - return prune, nil } @@ -160,7 +173,9 @@ type BlockAmount interface { // Distance amount of blocks to keep in DB // but manual manipulation with such distance is very unsafe // for example: -// deleteUntil := currentStageProgress - pruningDistance +// +// deleteUntil := currentStageProgress - pruningDistance +// // may delete whole db - because of uint64 underflow when pruningDistance > currentStageProgress type Distance uint64 @@ -230,9 +245,6 @@ func (m Mode) String() string { long += fmt.Sprintf(" --prune.c.%s=%d", m.CallTraces.dbType(), m.CallTraces.toValue()) } } - if m.Experiments.TEVM { - long += " --experiments.tevm=enabled" - } return strings.TrimLeft(short+long, " ") } @@ -262,11 +274,6 @@ func Override(db kv.RwTx, sm Mode) error { return err } - err = setMode(db, kv.StorageModeTEVM, sm.Experiments.TEVM) - if err != nil { - return err - } - return nil } @@ -285,6 +292,10 @@ func EnsureNotChanged(tx kv.GetPut, pruneMode Mode) (Mode, error) { if pruneMode.Initialised { // If storage mode is not explicitly specified, we take whatever is in the database if !reflect.DeepEqual(pm, pruneMode) { + if bytes.Equal(pm.Receipts.dbType(), kv.PruneTypeOlder) && bytes.Equal(pruneMode.Receipts.dbType(), kv.PruneTypeBefore) { + log.Error("--prune=r flag has been changed to mean pruning of receipts before the Beacon Chain genesis. Please re-sync Erigon from scratch. " + + "Alternatively, enforce the old behaviour explicitly by --prune.r.older=90000 flag at the risk of breaking the Consensus Layer.") + } return pm, errors.New("not allowed change of --prune flag, last time you used: " + pm.String()) } } @@ -313,11 +324,6 @@ func setIfNotExist(db kv.GetPut, pm Mode) error { } } - err = setModeOnEmpty(db, kv.StorageModeTEVM, pm.Experiments.TEVM) - if err != nil { - return err - } - return nil } diff --git a/ethdb/prune/storage_mode_test.go b/ethdb/prune/storage_mode_test.go index 7afc5c81c5c..a5aeca248ac 100644 --- a/ethdb/prune/storage_mode_test.go +++ b/ethdb/prune/storage_mode_test.go @@ -15,16 +15,16 @@ func TestSetStorageModeIfNotExist(t *testing.T) { prune, err := Get(tx) assert.NoError(t, err) assert.Equal(t, Mode{true, Distance(math.MaxUint64), Distance(math.MaxUint64), - Distance(math.MaxUint64), Distance(math.MaxUint64), Experiments{TEVM: false}}, prune) + Distance(math.MaxUint64), Distance(math.MaxUint64), Experiments{}}, prune) err = setIfNotExist(tx, Mode{true, Distance(1), Distance(2), - Before(3), Before(4), Experiments{TEVM: false}}) + Before(3), Before(4), Experiments{}}) assert.NoError(t, err) prune, err = Get(tx) assert.NoError(t, err) assert.Equal(t, Mode{true, Distance(1), Distance(2), - Before(3), Before(4), Experiments{TEVM: false}}, prune) + Before(3), Before(4), Experiments{}}, prune) } var distanceTests = []struct { diff --git a/ethstats/ethstats.go b/ethstats/ethstats.go index bc440e34238..8cc85089b7b 100644 --- a/ethstats/ethstats.go +++ b/ethstats/ethstats.go @@ -62,7 +62,7 @@ type Service struct { host string // Remote address of the monitoring service quitCh <-chan struct{} - headCh <-chan *types.Block + headCh <-chan [][]byte pongCh chan struct{} // Pong notifications are fed into this channel histCh chan []uint64 // History request block numbers are fed into this channel @@ -73,13 +73,14 @@ type Service struct { // websocket. // // From Gorilla websocket docs: -// Connections support one concurrent reader and one concurrent writer. -// Applications are responsible for ensuring that no more than one goroutine calls the write methods -// - NextWriter, SetWriteDeadline, WriteMessage, WriteJSON, EnableWriteCompression, SetCompressionLevel -// concurrently and that no more than one goroutine calls the read methods -// - NextReader, SetReadDeadline, ReadMessage, ReadJSON, SetPongHandler, SetPingHandler -// concurrently. -// The Close and WriteControl methods can be called concurrently with all other methods. +// +// Connections support one concurrent reader and one concurrent writer. +// Applications are responsible for ensuring that no more than one goroutine calls the write methods +// - NextWriter, SetWriteDeadline, WriteMessage, WriteJSON, EnableWriteCompression, SetCompressionLevel +// concurrently and that no more than one goroutine calls the read methods +// - NextReader, SetReadDeadline, ReadMessage, ReadJSON, SetPongHandler, SetPingHandler +// concurrently. +// The Close and WriteControl methods can be called concurrently with all other methods. type connWrapper struct { conn *websocket.Conn @@ -118,7 +119,7 @@ func (w *connWrapper) Close() error { } // New returns a monitoring service ready for stats reporting. -func New(node *node.Node, servers []*sentry.GrpcServer, chainDB kv.RoDB, engine consensus.Engine, url string, networkid uint64, quitCh <-chan struct{}, headCh chan *types.Block) error { +func New(node *node.Node, servers []*sentry.GrpcServer, chainDB kv.RoDB, engine consensus.Engine, url string, networkid uint64, quitCh <-chan struct{}, headCh chan [][]byte) error { // Parse the netstats connection url re := regexp.MustCompile("([^:@]*)(:([^@]*))?@(.+)") parts := re.FindStringSubmatch(url) @@ -226,8 +227,8 @@ func (s *Service) loop() { if err = s.reportHistory(conn, list); err != nil { log.Warn("Requested history report failed", "err", err) } - case head := <-s.headCh: - if err = s.reportBlock(conn, head); err != nil { + case <-s.headCh: + if err = s.reportBlock(conn); err != nil { log.Warn("Block stats report failed", "err", err) } @@ -248,7 +249,12 @@ func (s *Service) loop() { // unknown packets. func (s *Service) readLoop(conn *connWrapper) { // If the read loop exists, close the connection - defer conn.Close() + defer func(conn *connWrapper) { + closeErr := conn.Close() + if closeErr != nil { + log.Warn("Failed to close connection", "err", closeErr) + } + }(conn) for { // Retrieve the next generic network packet and bail out on error @@ -359,7 +365,7 @@ func (s *Service) login(conn *connWrapper) error { // Construct and send the login authentication // infos := s.server.NodeInfo() - var protocols []string + protocols := make([]string, 0, len(s.servers)) for _, srv := range s.servers { protocols = append(protocols, fmt.Sprintf("%s/%d", srv.Protocol.Name, srv.Protocol.Version)) } @@ -409,7 +415,7 @@ func (s *Service) report(conn *connWrapper) error { if err := s.reportLatency(conn); err != nil { return err } - if err := s.reportBlock(conn, nil); err != nil { + if err := s.reportBlock(conn); err != nil { return err } if err := s.reportPending(conn); err != nil { @@ -492,20 +498,16 @@ func (s uncleStats) MarshalJSON() ([]byte, error) { } // reportBlock retrieves the current chain head and reports it to the stats server. -func (s *Service) reportBlock(conn *connWrapper, block *types.Block) error { +func (s *Service) reportBlock(conn *connWrapper) error { roTx, err := s.chaindb.BeginRo(context.Background()) if err != nil { return err } defer roTx.Rollback() + + block := rawdb.ReadCurrentBlock(roTx) if block == nil { - block, err = rawdb.ReadLastBlockSynced(roTx) - if err != nil { - return err - } - if block == nil { - return nil - } + return nil } td, err := rawdb.ReadTd(roTx, block.Hash(), block.NumberU64()) @@ -536,9 +538,7 @@ func (s *Service) assembleBlockStats(block *types.Block, td *big.Int) *blockStat td = common.Big0 } // Gather the block infos from the local blockchain - var ( - txs []txStats - ) + txs := make([]txStats, 0, len(block.Transactions())) for _, tx := range block.Transactions() { txs = append(txs, txStats{tx.Hash()}) } diff --git a/go.mod b/go.mod index 74b3c4f255e..69f2edfe891 100644 --- a/go.mod +++ b/go.mod @@ -3,32 +3,36 @@ module github.com/ledgerwatch/erigon go 1.18 require ( - github.com/ledgerwatch/erigon-lib v0.0.0-20220729020228-73ce1c43eaa7 - github.com/ledgerwatch/erigon-snapshot v1.0.0 - github.com/ledgerwatch/log/v3 v3.4.1 + github.com/gballet/go-verkle v0.0.0-20220923150140-6c08cd337774 + github.com/ledgerwatch/erigon-lib v0.0.0-20221006052028-22049904649b + github.com/ledgerwatch/erigon-snapshot v1.0.1-0.20220913092204-de54ee30c7b9 + github.com/ledgerwatch/log/v3 v3.4.2 github.com/ledgerwatch/secp256k1 v1.0.0 github.com/ledgerwatch/trackerslist v1.0.0 ) require ( github.com/RoaringBitmap/roaring v1.2.1 - github.com/VictoriaMetrics/fastcache v1.10.0 - github.com/VictoriaMetrics/metrics v1.20.1 + github.com/VictoriaMetrics/fastcache v1.12.0 + github.com/VictoriaMetrics/metrics v1.22.2 github.com/anacrolix/go-libutp v1.2.0 - github.com/anacrolix/log v0.13.2-0.20220427063716-a4894bb521c6 - github.com/anacrolix/torrent v1.46.1-0.20220713100403-caa9400c52fe + github.com/anacrolix/log v0.13.2-0.20220711050817-613cb738ef30 + github.com/anacrolix/sync v0.4.0 + github.com/anacrolix/torrent v1.47.0 github.com/benesch/cgosymbolizer v0.0.0-20190515212042-bec6fe6e597b github.com/btcsuite/btcd v0.22.0-beta + github.com/btcsuite/btcd/btcec/v2 v2.2.1 github.com/c2h5oh/datasize v0.0.0-20220606134207-859f65c6625b - github.com/consensys/gnark-crypto v0.4.0 + github.com/consensys/gnark-crypto v0.4.1-0.20210426202927-39ac3d4b3f1f + github.com/crate-crypto/go-ipa v0.0.0-20220916134416-c5abbdbdf644 github.com/davecgh/go-spew v1.1.1 - github.com/deckarep/golang-set v0.0.0-20180603214616-504e848d77ea - github.com/dop251/goja v0.0.0-20211011172007-d99e4b8cbf48 + github.com/deckarep/golang-set v1.8.0 + github.com/dop251/goja v0.0.0-20220405120441-9037c2b61cbf github.com/edsrzf/mmap-go v1.1.0 github.com/emicklei/dot v1.0.0 github.com/emirpasic/gods v1.18.1 + github.com/ferranbt/fastssz v0.1.2 github.com/fjl/gencodec v0.0.0-20220412091415-8bb9e558978c - github.com/go-sourcemap/sourcemap v2.1.3+incompatible // indirect github.com/goccy/go-json v0.9.7 github.com/gofrs/flock v0.8.1 github.com/golang-jwt/jwt/v4 v4.4.1 @@ -38,16 +42,22 @@ require ( github.com/gorilla/websocket v1.5.0 github.com/grpc-ecosystem/go-grpc-middleware v1.3.0 github.com/hashicorp/golang-lru v0.5.5-0.20210104140557-80c98217689d - github.com/holiman/uint256 v1.2.0 + github.com/holiman/uint256 v1.2.1 github.com/huin/goupnp v1.0.3 github.com/jackpal/go-nat-pmp v1.0.2 github.com/json-iterator/go v1.1.12 github.com/julienschmidt/httprouter v1.3.0 github.com/kevinburke/go-bindata v3.21.0+incompatible + github.com/libp2p/go-libp2p v0.23.2 + github.com/libp2p/go-libp2p-core v0.20.1 + github.com/libp2p/go-libp2p-pubsub v0.8.1 + github.com/multiformats/go-multiaddr v0.7.0 github.com/nxadm/tail v1.4.9-0.20211216163028-4472660a31a6 + github.com/pbnjay/memory v0.0.0-20210728143218-7b4eea64cf58 github.com/pelletier/go-toml v1.9.5 - github.com/pelletier/go-toml/v2 v2.0.2 + github.com/pelletier/go-toml/v2 v2.0.5 github.com/pion/stun v0.3.5 + github.com/prysmaticlabs/go-bitfield v0.0.0-20210809151128-385d8c5e3fb7 github.com/quasilyte/go-ruleguard/dsl v0.3.21 github.com/rs/cors v1.8.2 github.com/spf13/cobra v1.5.0 @@ -55,31 +65,106 @@ require ( github.com/stretchr/testify v1.8.0 github.com/tendermint/go-amino v0.14.1 github.com/tendermint/tendermint v0.31.11 - github.com/torquem-ch/mdbx-go v0.25.1-0.20220720103744-b96489e94ece + github.com/torquem-ch/mdbx-go v0.26.0 github.com/ugorji/go/codec v1.1.13 github.com/ugorji/go/codec/codecgen v1.1.13 github.com/urfave/cli v1.22.9 github.com/valyala/fastjson v1.6.3 github.com/xsleonard/go-merkle v1.1.0 - go.uber.org/atomic v1.9.0 + go.uber.org/atomic v1.10.0 golang.org/x/crypto v0.0.0-20220622213112-05595931fe9d - golang.org/x/exp v0.0.0-20220722155223-a9213eeb770e + golang.org/x/exp v0.0.0-20220921164117-439092de6870 golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4 - golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f + golang.org/x/sys v0.0.0-20220919091848-fb04ddd9f9c8 golang.org/x/time v0.0.0-20220609170525-579cf78fd858 google.golang.org/grpc v1.48.0 google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.2.0 - google.golang.org/protobuf v1.28.0 + google.golang.org/protobuf v1.28.1 gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c - modernc.org/sqlite v1.17.3 + gopkg.in/yaml.v2 v2.4.0 + modernc.org/sqlite v1.19.1 pgregory.net/rapid v0.4.7 ) +require ( + github.com/benbjohnson/clock v1.3.0 // indirect + github.com/beorn7/perks v1.0.1 // indirect + github.com/containerd/cgroups v1.0.4 // indirect + github.com/coreos/go-systemd/v22 v22.4.0 // indirect + github.com/davidlazar/go-crypto v0.0.0-20200604182044-b73af7476f6c // indirect + github.com/decred/dcrd/dcrec/secp256k1/v4 v4.1.0 // indirect + github.com/docker/go-units v0.5.0 // indirect + github.com/elastic/gosigar v0.14.2 // indirect + github.com/flynn/noise v1.0.0 // indirect + github.com/francoispqt/gojay v1.2.13 // indirect + github.com/go-logr/logr v1.2.3 // indirect + github.com/go-logr/stdr v1.2.2 // indirect + github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0 // indirect + github.com/godbus/dbus/v5 v5.1.0 // indirect + github.com/golang/mock v1.6.0 // indirect + github.com/google/gopacket v1.1.19 // indirect + github.com/ipfs/go-cid v0.3.2 // indirect + github.com/ipfs/go-log v1.0.5 // indirect + github.com/ipfs/go-log/v2 v2.5.1 // indirect + github.com/jbenet/go-temp-err-catcher v0.1.0 // indirect + github.com/klauspost/compress v1.15.10 // indirect + github.com/klauspost/cpuid/v2 v2.1.1 // indirect + github.com/koron/go-ssdp v0.0.3 // indirect + github.com/libp2p/go-buffer-pool v0.1.0 // indirect + github.com/libp2p/go-cidranger v1.1.0 // indirect + github.com/libp2p/go-flow-metrics v0.1.0 // indirect + github.com/libp2p/go-libp2p-asn-util v0.2.0 // indirect + github.com/libp2p/go-mplex v0.7.0 // indirect + github.com/libp2p/go-msgio v0.2.0 // indirect + github.com/libp2p/go-nat v0.1.0 // indirect + github.com/libp2p/go-netroute v0.2.0 // indirect + github.com/libp2p/go-openssl v0.1.0 // indirect + github.com/libp2p/go-reuseport v0.2.0 // indirect + github.com/libp2p/go-yamux/v4 v4.0.0 // indirect + github.com/lucas-clemente/quic-go v0.29.1 // indirect + github.com/marten-seemann/qtls-go1-18 v0.1.2 // indirect + github.com/marten-seemann/qtls-go1-19 v0.1.0 // indirect + github.com/marten-seemann/tcp v0.0.0-20210406111302-dfbc87cc63fd // indirect + github.com/mattn/go-pointer v0.0.1 // indirect + github.com/matttproud/golang_protobuf_extensions v1.0.1 // indirect + github.com/miekg/dns v1.1.50 // indirect + github.com/mikioh/tcpinfo v0.0.0-20190314235526-30a79bb1804b // indirect + github.com/mikioh/tcpopt v0.0.0-20190314235656-172688c1accc // indirect + github.com/minio/sha256-simd v1.0.0 // indirect + github.com/mitchellh/mapstructure v1.3.2 // indirect + github.com/mr-tron/base58 v1.2.0 // indirect + github.com/multiformats/go-base32 v0.1.0 // indirect + github.com/multiformats/go-base36 v0.1.0 // indirect + github.com/multiformats/go-multiaddr-dns v0.3.1 // indirect + github.com/multiformats/go-multiaddr-fmt v0.1.0 // indirect + github.com/multiformats/go-multibase v0.1.1 // indirect + github.com/multiformats/go-multicodec v0.6.0 // indirect + github.com/multiformats/go-multihash v0.2.1 // indirect + github.com/multiformats/go-multistream v0.3.3 // indirect + github.com/multiformats/go-varint v0.0.6 // indirect + github.com/onsi/ginkgo v1.16.5 // indirect + github.com/opencontainers/runtime-spec v1.0.2 // indirect + github.com/opentracing/opentracing-go v1.2.0 // indirect + github.com/prometheus/client_golang v1.13.0 // indirect + github.com/prometheus/client_model v0.2.0 // indirect + github.com/prometheus/common v0.37.0 // indirect + github.com/prometheus/procfs v0.8.0 // indirect + github.com/raulk/go-watchdog v1.3.0 // indirect + github.com/spacemonkeygo/spacelog v0.0.0-20180420211403-2296661a0572 // indirect + github.com/whyrusleeping/timecache v0.0.0-20160911033111-cfcb2f1abfee // indirect + go.opentelemetry.io/otel v1.8.0 // indirect + go.opentelemetry.io/otel/trace v1.8.0 // indirect + go.uber.org/multierr v1.8.0 // indirect + go.uber.org/zap v1.23.0 // indirect + lukechampine.com/blake3 v1.1.7 // indirect +) + require ( crawshaw.io/sqlite v0.3.3-0.20210127221821-98b1f83c5508 // indirect github.com/ajwerner/btree v0.0.0-20211221152037-f427b3e689c0 // indirect + github.com/alecthomas/atomic v0.1.0-alpha2 // indirect github.com/anacrolix/chansync v0.3.0 // indirect - github.com/anacrolix/dht/v2 v2.18.0 // indirect + github.com/anacrolix/dht/v2 v2.19.0 // indirect github.com/anacrolix/envpprof v1.2.1 // indirect github.com/anacrolix/generics v0.0.0-20220618083756-f99e35403a60 // indirect github.com/anacrolix/missinggo v1.3.0 // indirect @@ -88,7 +173,6 @@ require ( github.com/anacrolix/mmsg v1.0.0 // indirect github.com/anacrolix/multiless v0.3.0 // indirect github.com/anacrolix/stm v0.4.0 // indirect - github.com/anacrolix/sync v0.4.0 // indirect github.com/anacrolix/upnp v0.1.3-0.20220123035249-922794e51c96 // indirect github.com/anacrolix/utp v0.1.0 // indirect github.com/bahlo/generic-list-go v0.2.0 // indirect @@ -100,11 +184,11 @@ require ( github.com/dlclark/regexp2 v1.4.1-0.20201116162257-a2a8dda75c91 // indirect github.com/docker/docker v20.10.17+incompatible github.com/dustin/go-humanize v1.0.0 // indirect - github.com/flanglet/kanzi-go v1.9.1-0.20211212184056-72dda96261ee // indirect github.com/fsnotify/fsnotify v1.5.4 // indirect github.com/garslo/gogen v0.0.0-20170306192744-1d203ffc1f61 // indirect github.com/go-kit/kit v0.10.0 // indirect - github.com/go-logfmt/logfmt v0.5.0 // indirect + github.com/go-logfmt/logfmt v0.5.1 // indirect + github.com/go-sourcemap/sourcemap v2.1.3+incompatible // indirect github.com/go-stack/stack v1.8.1 // indirect github.com/gogo/protobuf v1.3.2 // indirect github.com/golang/protobuf v1.5.2 // indirect @@ -118,7 +202,7 @@ require ( github.com/lispad/go-generics-tools v1.1.0 // indirect github.com/magiconair/properties v1.8.6 // indirect github.com/mattn/go-colorable v0.1.11 // indirect - github.com/mattn/go-isatty v0.0.14 // indirect + github.com/mattn/go-isatty v0.0.16 // indirect github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect github.com/modern-go/reflect2 v1.0.2 // indirect github.com/mschoch/smat v0.2.0 // indirect @@ -138,7 +222,7 @@ require ( github.com/pion/turn/v2 v2.0.8 // indirect github.com/pion/udp v0.1.1 // indirect github.com/pion/webrtc/v3 v3.1.42 // indirect - github.com/pkg/errors v0.9.1 // indirect + github.com/pkg/errors v0.9.1 github.com/pmezard/go-difflib v1.0.0 // indirect github.com/remyoudompheng/bigfft v0.0.0-20200410134404-eec4a21b6bb0 // indirect github.com/rogpeppe/go-internal v1.8.1 // indirect @@ -149,26 +233,21 @@ require ( github.com/valyala/fastrand v1.1.0 // indirect github.com/valyala/histogram v1.2.0 // indirect go.etcd.io/bbolt v1.3.6 // indirect - golang.org/x/mod v0.6.0-dev.0.20220106191415-9b9b3d81d5e3 // indirect - golang.org/x/net v0.0.0-20220630215102-69896b714898 // indirect + golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4 // indirect + golang.org/x/net v0.0.0-20220920183852-bf014ff85ad5 // indirect golang.org/x/text v0.3.7 // indirect - golang.org/x/tools v0.1.10 // indirect - golang.org/x/xerrors v0.0.0-20220609144429-65e65417b02f // indirect - google.golang.org/genproto v0.0.0-20200825200019-8632dd797987 // indirect + golang.org/x/tools v0.1.12 // indirect + google.golang.org/genproto v0.0.0-20211118181313-81c1377c94b1 // indirect gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 // indirect - gopkg.in/yaml.v3 v3.0.1 // indirect - lukechampine.com/uint128 v1.1.1 // indirect - modernc.org/cc/v3 v3.36.0 // indirect - modernc.org/ccgo/v3 v3.16.6 // indirect - modernc.org/libc v1.16.7 // indirect - modernc.org/mathutil v1.4.1 // indirect - modernc.org/memory v1.1.1 // indirect - modernc.org/opt v0.1.1 // indirect - modernc.org/strutil v1.1.1 // indirect - modernc.org/token v1.0.0 // indirect -) - -require ( - github.com/alecthomas/atomic v0.1.0-alpha2 // indirect + gopkg.in/yaml.v3 v3.0.1 gotest.tools/v3 v3.3.0 // indirect + lukechampine.com/uint128 v1.1.1 // indirect + modernc.org/cc/v3 v3.38.1 // indirect + modernc.org/ccgo/v3 v3.16.9 // indirect + modernc.org/libc v1.19.0 // indirect + modernc.org/mathutil v1.5.0 // indirect + modernc.org/memory v1.4.0 // indirect + modernc.org/opt v0.1.3 // indirect + modernc.org/strutil v1.1.3 // indirect + modernc.org/token v1.0.1 // indirect ) diff --git a/go.sum b/go.sum index d1d421ceeb8..b5aa32d55ca 100644 --- a/go.sum +++ b/go.sum @@ -1,13 +1,53 @@ cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= +cloud.google.com/go v0.31.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= +cloud.google.com/go v0.37.0/go.mod h1:TS1dMSSfndXH133OKGwekG838Om/cQT0BUHV3HcBgoo= +cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSRU= +cloud.google.com/go v0.44.1/go.mod h1:iSa0KzasP4Uvy3f1mN/7PiObzGgflwredwwASm/v6AU= +cloud.google.com/go v0.44.2/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY= +cloud.google.com/go v0.45.1/go.mod h1:RpBamKRgapWJb87xiFSdk4g1CME7QZg3uwTez+TSTjc= +cloud.google.com/go v0.46.3/go.mod h1:a6bKKbmY7er1mI7TEI4lsAkts/mkhTSZK8w33B4RAg0= +cloud.google.com/go v0.50.0/go.mod h1:r9sluTvynVuxRIOHXQEHMFffphuXHOMZMycpNR5e6To= +cloud.google.com/go v0.52.0/go.mod h1:pXajvRH/6o3+F9jDHZWQ5PbGhn+o8w9qiu/CffaVdO4= +cloud.google.com/go v0.53.0/go.mod h1:fp/UouUEsRkN6ryDKNW/Upv/JBKnv6WDthjR6+vze6M= +cloud.google.com/go v0.54.0/go.mod h1:1rq2OEkV3YMf6n/9ZvGWI3GWw0VoqH/1x2nd8Is/bPc= +cloud.google.com/go v0.56.0/go.mod h1:jr7tqZxxKOVYizybht9+26Z/gUq7tiRzu+ACVAMbKVk= +cloud.google.com/go v0.57.0/go.mod h1:oXiQ6Rzq3RAkkY7N6t3TcE6jE+CIBBbA36lwQ1JyzZs= +cloud.google.com/go v0.62.0/go.mod h1:jmCYTdRCQuc1PHIIJ/maLInMho30T/Y0M4hTdTShOYc= +cloud.google.com/go v0.65.0/go.mod h1:O5N8zS7uWy9vkA9vayVHs65eM1ubvY4h553ofrNHObY= +cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o= +cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE= +cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc= +cloud.google.com/go/bigquery v1.5.0/go.mod h1:snEHRnqQbz117VIFhE8bmtwIDY80NLUZUMb4Nv6dBIg= +cloud.google.com/go/bigquery v1.7.0/go.mod h1://okPTzCYNXSlb24MZs83e2Do+h+VXtc4gLoIoXIAPc= +cloud.google.com/go/bigquery v1.8.0/go.mod h1:J5hqkt3O0uAFnINi6JXValWIb1v0goeZM77hZzJN/fQ= +cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE= +cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk= +cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I= +cloud.google.com/go/pubsub v1.1.0/go.mod h1:EwwdRX2sKPjnvnqCa270oGRyludottCI76h+R3AArQw= +cloud.google.com/go/pubsub v1.2.0/go.mod h1:jhfEVHT8odbXTkndysNHCcx0awwzvfOlguIAii9o8iA= +cloud.google.com/go/pubsub v1.3.1/go.mod h1:i+ucay31+CNRpDW4Lu78I4xXG+O1r/MAHgjpRVR+TSU= +cloud.google.com/go/storage v1.0.0/go.mod h1:IhtSnM/ZTZV8YYJWCY8RULGVqBDmpoyjwiyrjsg+URw= +cloud.google.com/go/storage v1.5.0/go.mod h1:tpKbwo567HUNpVclU5sGELwQWBDZ8gh0ZeosJ0Rtdos= +cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohlUTyfDhBk= +cloud.google.com/go/storage v1.8.0/go.mod h1:Wv1Oy7z6Yz3DshWRJFhqM/UCfaWIRTdp0RXyy7KQOVs= +cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9ullr3+Kg0= crawshaw.io/iox v0.0.0-20181124134642-c51c3df30797 h1:yDf7ARQc637HoxDho7xjqdvO5ZA2Yb+xzv/fOnnvZzw= crawshaw.io/iox v0.0.0-20181124134642-c51c3df30797/go.mod h1:sXBiorCo8c46JlQV3oXPKINnZ8mcqnye1EkVkqsectk= crawshaw.io/sqlite v0.3.2/go.mod h1:igAO5JulrQ1DbdZdtVq48mnZUBAPOeFzer7VhDWNtW4= crawshaw.io/sqlite v0.3.3-0.20210127221821-98b1f83c5508 h1:fILCBBFnjnrQ0whVJlGhfv1E/QiaFDNtGFBObEVRnYg= crawshaw.io/sqlite v0.3.3-0.20210127221821-98b1f83c5508/go.mod h1:igAO5JulrQ1DbdZdtVq48mnZUBAPOeFzer7VhDWNtW4= +dmitri.shuralyov.com/app/changes v0.0.0-20180602232624-0a106ad413e3/go.mod h1:Yl+fi1br7+Rr3LqpNJf1/uxUdtRUV+Tnj0o93V2B9MU= +dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= +dmitri.shuralyov.com/html/belt v0.0.0-20180602232347-f7d459c86be0/go.mod h1:JLBrvjyP0v+ecvNYvCpyZgu5/xkfAUhi6wJj28eUfSU= +dmitri.shuralyov.com/service/change v0.0.0-20181023043359-a85b471d5412/go.mod h1:a1inKt/atXimZ4Mv927x+r7UpyzRUf4emIoiiSC2TN4= +dmitri.shuralyov.com/state v0.0.0-20180228185332-28bcc343414c/go.mod h1:0PRwlb0D6DFvNNtx+9ybjezNCa8XF0xaYcETyp6rHWU= filippo.io/edwards25519 v1.0.0-rc.1 h1:m0VOOB23frXZvAOK44usCgLWvtsxIoMCTBGJZlpmGfU= +git.apache.org/thrift.git v0.0.0-20180902110319-2566ecd5d999/go.mod h1:fPE2ZNJGynbRyZ4dJvy6G277gSllfV2HJqblrnkyeyg= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= +github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= github.com/Knetic/govaluate v3.0.1-0.20171022003610-9aa49832a739+incompatible/go.mod h1:r7JcOSlj0wfOMncg0iLm8Leh48TZaKVeNIfJntJ2wa0= +github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU= github.com/RoaringBitmap/roaring v0.4.7/go.mod h1:8khRDP4HmeXns4xIj9oGrKSz7XTQiJx2zgh7AcNke4w= github.com/RoaringBitmap/roaring v0.4.17/go.mod h1:D3qVegWTmfCaX4Bl5CrBE9hfrSrrXIr8KVNvRsDi1NI= github.com/RoaringBitmap/roaring v0.4.23/go.mod h1:D0gp8kJQgE1A4LQ5wFLggQEyvDi06Mq5mKs52e1TwOo= @@ -15,10 +55,10 @@ github.com/RoaringBitmap/roaring v1.2.1 h1:58/LJlg/81wfEHd5L9qsHduznOIhyv4qb1yWc github.com/RoaringBitmap/roaring v1.2.1/go.mod h1:icnadbWcNyfEHlYdr+tDlOTih1Bf/h+rzPpv4sbomAA= github.com/Shopify/sarama v1.19.0/go.mod h1:FVkBWblsNy7DGZRfXLU0O9RCGt5g3g3yEuWXgklEdEo= github.com/Shopify/toxiproxy v2.1.4+incompatible/go.mod h1:OXgGpZ6Cli1/URJOF1DMxUHB2q5Ap20/P/eIdh4G0pI= -github.com/VictoriaMetrics/fastcache v1.10.0 h1:5hDJnLsKLpnUEToub7ETuRu8RCkb40woBZAUiKonXzY= -github.com/VictoriaMetrics/fastcache v1.10.0/go.mod h1:tjiYeEfYXCqacuvYw/7UoDIeJaNxq6132xHICNP77w8= -github.com/VictoriaMetrics/metrics v1.20.1 h1:XqQbRKYzwkmo0DKKDbvp6V7upUqErlqd0vXPoeBsEbU= -github.com/VictoriaMetrics/metrics v1.20.1/go.mod h1:ArjwVz7WpgpegX/JpB0zpNF2h2232kErkEnzH1sxMmA= +github.com/VictoriaMetrics/fastcache v1.12.0 h1:vnVi/y9yKDcD9akmc4NqAoqgQhJrOwUF+j9LTgn4QDE= +github.com/VictoriaMetrics/fastcache v1.12.0/go.mod h1:tjiYeEfYXCqacuvYw/7UoDIeJaNxq6132xHICNP77w8= +github.com/VictoriaMetrics/metrics v1.22.2 h1:A6LsNidYwkAHetxsvNFaUWjtzu5ltdgNEoS6i7Bn+6I= +github.com/VictoriaMetrics/metrics v1.22.2/go.mod h1:rAr/llLpEnAdTehiNlUxKgnjcOuROSzpw0GvjpEbvFc= github.com/VividCortex/gohistogram v1.0.0/go.mod h1:Pf5mBqqDxYaXu3hDrrU+w6nw50o/4+TcAqDqk/vUH7g= github.com/aead/siphash v1.0.1/go.mod h1:Nywa3cDsYNNK3gaciGTWPwHt0wlpNV15vwmswBAUSII= github.com/afex/hystrix-go v0.0.0-20180502004556-fa1af6a1f4f5/go.mod h1:SkGFH1ia65gfNATL8TAiHDNxPzPdmEL5uirI2Uyuz6c= @@ -32,12 +72,13 @@ github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuy github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= +github.com/alecthomas/units v0.0.0-20190924025748-f65c72e2690d/go.mod h1:rBZYJk541a8SKzHPHnH3zbiI+7dagKZ0cgpgrD7Fyho= github.com/allegro/bigcache v1.2.1-0.20190218064605-e24eb225f156 h1:eMwmnE/GDgah4HI848JfFxHt+iPb26b4zyfspmqY0/8= github.com/allegro/bigcache v1.2.1-0.20190218064605-e24eb225f156/go.mod h1:Cb/ax3seSYIx7SuZdm2G2xzfwmv3TPSk2ucNfQESPXM= github.com/anacrolix/chansync v0.3.0 h1:lRu9tbeuw3wl+PhMu/r+JJCRu5ArFXIluOgdF0ao6/U= github.com/anacrolix/chansync v0.3.0/go.mod h1:DZsatdsdXxD0WiwcGl0nJVwyjCKMDv+knl1q2iBjA2k= -github.com/anacrolix/dht/v2 v2.18.0 h1:btjVjzjKqO5nKGbJHJ2UmuwiRx+EgX3e+OCHC9+WRz8= -github.com/anacrolix/dht/v2 v2.18.0/go.mod h1:mxrSeP/LIY429SgWMO9o6UdjBjB8ZjBh6HHCmd8Ly1g= +github.com/anacrolix/dht/v2 v2.19.0 h1:A9oMHWRGbLmCyx1JlYzg79bDrur8V60+0ts8ZwEVYt4= +github.com/anacrolix/dht/v2 v2.19.0/go.mod h1:0h83KnnAQ2AUYhpQ/CkoZP45K41pjDAlPR9zGHgFjQE= github.com/anacrolix/envpprof v0.0.0-20180404065416-323002cec2fa/go.mod h1:KgHhUaQMc8cC0+cEflSgCFNFbKwi5h54gqtVn8yhP7c= github.com/anacrolix/envpprof v1.0.0/go.mod h1:KgHhUaQMc8cC0+cEflSgCFNFbKwi5h54gqtVn8yhP7c= github.com/anacrolix/envpprof v1.1.0/go.mod h1:My7T5oSqVfEn4MD4Meczkw/f5lSIndGAKu/0SM/rkf4= @@ -51,8 +92,8 @@ github.com/anacrolix/log v0.3.0/go.mod h1:lWvLTqzAnCWPJA08T2HCstZi0L1y2Wyvm3FJgw github.com/anacrolix/log v0.6.0/go.mod h1:lWvLTqzAnCWPJA08T2HCstZi0L1y2Wyvm3FJgwU9jwU= github.com/anacrolix/log v0.10.0/go.mod h1:s5yBP/j046fm9odtUTbHOfDUq/zh1W8OkPpJtnX0oQI= github.com/anacrolix/log v0.10.1-0.20220123034749-3920702c17f8/go.mod h1:GmnE2c0nvz8pOIPUSC9Rawgefy1sDXqposC2wgtBZE4= -github.com/anacrolix/log v0.13.2-0.20220427063716-a4894bb521c6 h1:WH/Xcok0GpNID/NUV80CfTwUYXdbhR3pX/DXboxGhNI= -github.com/anacrolix/log v0.13.2-0.20220427063716-a4894bb521c6/go.mod h1:D4+CvN8SnruK6zIFS/xPoRJmtvtnxs+CSfDQ+BFxZ68= +github.com/anacrolix/log v0.13.2-0.20220711050817-613cb738ef30 h1:bAgFzUxN1K3U8KwOzqCOhiygOr5NqYO3kNlV9tvp2Rc= +github.com/anacrolix/log v0.13.2-0.20220711050817-613cb738ef30/go.mod h1:D4+CvN8SnruK6zIFS/xPoRJmtvtnxs+CSfDQ+BFxZ68= github.com/anacrolix/lsan v0.0.0-20211126052245-807000409a62 h1:P04VG6Td13FHMgS5ZBcJX23NPC/fiC4cp9bXwYujdYM= github.com/anacrolix/lsan v0.0.0-20211126052245-807000409a62/go.mod h1:66cFKPCO7Sl4vbFnAaSq7e4OXtdMhRSBagJGWgmpJbM= github.com/anacrolix/missinggo v0.0.0-20180725070939-60ef2fbf63df/go.mod h1:kwGiTUTZ0+p4vAz3VbAI5a30t2YbvemcmspjKwrAz5s= @@ -83,12 +124,13 @@ github.com/anacrolix/sync v0.4.0/go.mod h1:BbecHL6jDSExojhNtgTFSBcdGerzNc64tz3DC github.com/anacrolix/tagflag v0.0.0-20180109131632-2146c8d41bf0/go.mod h1:1m2U/K6ZT+JZG0+bdMK6qauP49QT4wE5pmhJXOKKCHw= github.com/anacrolix/tagflag v1.0.0/go.mod h1:1m2U/K6ZT+JZG0+bdMK6qauP49QT4wE5pmhJXOKKCHw= github.com/anacrolix/tagflag v1.1.0/go.mod h1:Scxs9CV10NQatSmbyjqmqmeQNwGzlNe0CMUMIxqHIG8= -github.com/anacrolix/torrent v1.46.1-0.20220713100403-caa9400c52fe h1:Unun5w67tVuGWK8Z5ERcHFCYOeOVgeWUIgHmp7Z8Apw= -github.com/anacrolix/torrent v1.46.1-0.20220713100403-caa9400c52fe/go.mod h1:/XEFAKfEx08Eng4vZRqnthqPB7ZqTH6DSDEHzGpDJRk= +github.com/anacrolix/torrent v1.47.0 h1:aDUnhQZ8+kfStLICHiXOGGYVFgDENK+kz4q96linyRg= +github.com/anacrolix/torrent v1.47.0/go.mod h1:SYPxEUjMwqhDr3kWGzyQLkFMuAb1bgJ57JRMpuD3ZzE= github.com/anacrolix/upnp v0.1.3-0.20220123035249-922794e51c96 h1:QAVZ3pN/J4/UziniAhJR2OZ9Ox5kOY2053tBbbqUPYA= github.com/anacrolix/upnp v0.1.3-0.20220123035249-922794e51c96/go.mod h1:Wa6n8cYIdaG35x15aH3Zy6d03f7P728QfdcDeD/IEOs= github.com/anacrolix/utp v0.1.0 h1:FOpQOmIwYsnENnz7tAGohA+r6iXpRjrq8ssKSre2Cp4= github.com/anacrolix/utp v0.1.0/go.mod h1:MDwc+vsGEq7RMw6lr2GKOEqjWny5hO5OZXRVNaBJ2Dk= +github.com/anmitsu/go-shlex v0.0.0-20161002113705-648efa622239/go.mod h1:2FmKhYUyUczH0OGQWaF5ceTx0UBShxjsH6f8oGKYe2c= github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY= github.com/apache/thrift v0.12.0/go.mod h1:cp2SuWMxlEZw2r+iP2GNCdIi4C1qmUzdZFSVb+bacwQ= github.com/apache/thrift v0.13.0/go.mod h1:cp2SuWMxlEZw2r+iP2GNCdIi4C1qmUzdZFSVb+bacwQ= @@ -101,6 +143,9 @@ github.com/aws/aws-sdk-go v1.27.0/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN github.com/aws/aws-sdk-go-v2 v0.18.0/go.mod h1:JWVYvqSMppoMJC0x5wdwiImzgXTI9FuZwxzkQq9wy+g= github.com/bahlo/generic-list-go v0.2.0 h1:5sz/EEAK+ls5wF+NeqDpk5+iNdMDXrh3z3nPnH1Wvgk= github.com/bahlo/generic-list-go v0.2.0/go.mod h1:2KvAjgMlE5NNynlg/5iLrrCCZ2+5xWbdbCW3pNTGyYg= +github.com/benbjohnson/clock v1.1.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA= +github.com/benbjohnson/clock v1.3.0 h1:ip6w0uFQkncKQ979AypyG0ER7mqUSBdKLOgAle/AT8A= +github.com/benbjohnson/clock v1.3.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA= github.com/benbjohnson/immutable v0.2.0/go.mod h1:uc6OHo6PN2++n98KHLxW8ef4W42ylHiQSENghE1ezxI= github.com/benbjohnson/immutable v0.3.0 h1:TVRhuZx2wG9SZ0LRdqlbs9S5BZ6Y24hJEHTCgWHZEIw= github.com/benbjohnson/immutable v0.3.0/go.mod h1:uc6OHo6PN2++n98KHLxW8ef4W42ylHiQSENghE1ezxI= @@ -108,11 +153,13 @@ github.com/benesch/cgosymbolizer v0.0.0-20190515212042-bec6fe6e597b h1:5JgaFtHFR github.com/benesch/cgosymbolizer v0.0.0-20190515212042-bec6fe6e597b/go.mod h1:eMD2XUcPsHYbakFEocKrWZp47G0MRJYoC60qFblGjpA= github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= +github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs= github.com/bits-and-blooms/bitset v1.2.0/go.mod h1:gIdJ4wp64HaoK2YrL1Q5/N7Y16edYb8uY+O0FJTyyDA= github.com/bits-and-blooms/bitset v1.2.2 h1:J5gbX05GpMdBjCvQ9MteIg2KKDExr7DrgK+Yc15FvIk= github.com/bits-and-blooms/bitset v1.2.2/go.mod h1:gIdJ4wp64HaoK2YrL1Q5/N7Y16edYb8uY+O0FJTyyDA= +github.com/bradfitz/go-smtpd v0.0.0-20170404230938-deb6d6237625/go.mod h1:HYsPBTaaSFSlLx/70C2HPIMNZpVV8+vt/A+FMnYP11g= github.com/bradfitz/iter v0.0.0-20140124041915-454541ec3da2/go.mod h1:PyRFw1Lt2wKX4ZVSQ2mk+PeDa1rxyObEDlApuIsUKuo= github.com/bradfitz/iter v0.0.0-20190303215204-33e6a9893b0c/go.mod h1:PyRFw1Lt2wKX4ZVSQ2mk+PeDa1rxyObEDlApuIsUKuo= github.com/bradfitz/iter v0.0.0-20191230175014-e8f45d346db8 h1:GKTyiRCL6zVf5wWaqKnf+7Qs6GbEPfd4iMOitWzXJx8= @@ -120,6 +167,8 @@ github.com/bradfitz/iter v0.0.0-20191230175014-e8f45d346db8/go.mod h1:spo1JLcs67 github.com/btcsuite/btcd v0.20.1-beta/go.mod h1:wVuoA8VJLEcwgqHBwHmzLRazpKxTv13Px/pDuV7OomQ= github.com/btcsuite/btcd v0.22.0-beta h1:LTDpDKUM5EeOFBPM8IXpinEcmZ6FWfNZbE3lfrfdnWo= github.com/btcsuite/btcd v0.22.0-beta/go.mod h1:9n5ntfhhHQBIhUvlhDvD3Qg6fRUj4jkN0VB8L8svzOA= +github.com/btcsuite/btcd/btcec/v2 v2.2.1 h1:xP60mv8fvp+0khmrN0zTdPC3cNm24rfeE6lh2R/Yv3E= +github.com/btcsuite/btcd/btcec/v2 v2.2.1/go.mod h1:9/CSmJxmuvqzX9Wh2fXMWToLOHhPd11lSPuIupwTkI8= github.com/btcsuite/btclog v0.0.0-20170628155309-84c8d2346e9f/go.mod h1:TdznJufoqS23FtqVCzL0ZqgP5MqXbb4fg/WgDys70nA= github.com/btcsuite/btcutil v0.0.0-20190425235716-9e5f4b9a998d/go.mod h1:+5NJ2+qvTyV9exUAL/rxXi3DcLg2Ts+ymUAY5y4NvMg= github.com/btcsuite/btcutil v1.0.3-0.20201208143702-a53e38424cce h1:YtWJF7RHm2pYCvA5t0RPmAaLUhREsKuKd+SLhxFbFeQ= @@ -131,50 +180,75 @@ github.com/btcsuite/snappy-go v0.0.0-20151229074030-0bdef8d06723/go.mod h1:8woku github.com/btcsuite/snappy-go v1.0.0/go.mod h1:8woku9dyThutzjeg+3xrA5iCpBRH8XEEg3lh6TiUghc= github.com/btcsuite/websocket v0.0.0-20150119174127-31079b680792/go.mod h1:ghJtEyQwv5/p4Mg4C0fgbePVuGr935/5ddU9Z3TmDRY= github.com/btcsuite/winsvc v1.0.0/go.mod h1:jsenWakMcC0zFBFurPLEAyrnc/teJEM1O46fmI40EZs= +github.com/buger/jsonparser v0.0.0-20181115193947-bf1c66bbce23/go.mod h1:bbYlZJ7hK1yFx9hf58LP0zeX7UjIGs20ufpu3evjr+s= github.com/c2h5oh/datasize v0.0.0-20220606134207-859f65c6625b h1:6+ZFm0flnudZzdSE0JxlhR2hKnGPcNB35BjQf4RYQDY= github.com/c2h5oh/datasize v0.0.0-20220606134207-859f65c6625b/go.mod h1:S/7n9copUssQ56c7aAgHqftWO4LTf4xY6CGWt8Bc+3M= github.com/casbin/casbin/v2 v2.1.2/go.mod h1:YcPU1XXisHhLzuxH9coDNf2FbKpjGlbCg3n9yuLkIJQ= github.com/cenkalti/backoff v2.2.1+incompatible/go.mod h1:90ReRw6GdpyfrHakVjL/QHaoyV4aDUVVkXQJJJ3NXXM= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= +github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/cespare/xxhash/v2 v2.1.2 h1:YRXhKfTDauu4ajMg1TPgFO5jnlC2HCbmLXMcTG5cbYE= github.com/cespare/xxhash/v2 v2.1.2/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= +github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI= +github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI= +github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= +github.com/cilium/ebpf v0.2.0/go.mod h1:To2CFviqOWL/M0gIMsvSMlqe7em/l1ALkX1PyjrX2Qs= github.com/clbanning/x2j v0.0.0-20191024224557-825249438eec/go.mod h1:jMjuTZXRI4dUb/I5gc9Hdhagfvm9+RyrPryS/auMzxE= github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= github.com/cncf/udpa/go v0.0.0-20210930031921-04548b0d99d4/go.mod h1:6pvJx4me5XPnfI9Z40ddWsdw2W/uZgQLFXToKeRcDiI= +github.com/cncf/xds/go v0.0.0-20210312221358-fbca930ec8ed/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= github.com/cncf/xds/go v0.0.0-20210922020428-25de7278fc84/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= github.com/cncf/xds/go v0.0.0-20211001041855-01bcc9b48dfe/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= github.com/cncf/xds/go v0.0.0-20211011173535-cb28da3451f1/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= github.com/cockroachdb/datadriven v0.0.0-20190809214429-80d97fb3cbaa/go.mod h1:zn76sxSg3SzpJ0PPJaLDCu+Bu0Lg3sKTORVIj19EIF8= github.com/codahale/hdrhistogram v0.0.0-20161010025455-3a0bb77429bd/go.mod h1:sE/e/2PUdi/liOCUjSTXgM1o87ZssimdTWN964YiIeI= -github.com/consensys/bavard v0.1.8-0.20210329205436-c3e862ba4e5f/go.mod h1:Bpd0/3mZuaj6Sj+PqrmIquiOKy397AKGThQPaGzNXAQ= -github.com/consensys/gnark-crypto v0.4.0 h1:KHf7Ta876Ys6L8+i0DLRRKOAa3PfJ8oobAX1CEeIa4A= -github.com/consensys/gnark-crypto v0.4.0/go.mod h1:wK/gpXP9B06qTzTVML71GhKD1ygP9xOzukbI68NJqsQ= +github.com/consensys/bavard v0.1.8-0.20210406032232-f3452dc9b572/go.mod h1:Bpd0/3mZuaj6Sj+PqrmIquiOKy397AKGThQPaGzNXAQ= +github.com/consensys/gnark-crypto v0.4.1-0.20210426202927-39ac3d4b3f1f h1:C43yEtQ6NIf4ftFXD/V55gnGFgPbMQobd//YlnLjUJ8= +github.com/consensys/gnark-crypto v0.4.1-0.20210426202927-39ac3d4b3f1f/go.mod h1:815PAHg3wvysy0SyIqanF8gZ0Y1wjk/hrDHD/iT88+Q= +github.com/containerd/cgroups v0.0.0-20201119153540-4cbc285b3327/go.mod h1:ZJeTFisyysqgcCdecO57Dj79RfL0LNeGiFUqLYQRYLE= +github.com/containerd/cgroups v1.0.4 h1:jN/mbWBEaz+T1pi5OFtnkQ+8qnmEbAr1Oo1FRm5B0dA= +github.com/containerd/cgroups v1.0.4/go.mod h1:nLNQtsF7Sl2HxNebu77i1R0oDlhiTG+kO4JTrUzo6IA= github.com/coreos/go-semver v0.2.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= github.com/coreos/go-systemd v0.0.0-20180511133405-39ca1b05acc7/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= +github.com/coreos/go-systemd v0.0.0-20181012123002-c6f51f82210d/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= +github.com/coreos/go-systemd/v22 v22.1.0/go.mod h1:xO0FLkIi5MaZafQlIrOotqXZ90ih+1atmu1JpKERPPk= +github.com/coreos/go-systemd/v22 v22.4.0 h1:y9YHcjnjynCd/DVbg5j9L/33jQM3MxJlbj/zWskzfGU= +github.com/coreos/go-systemd/v22 v22.4.0/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc= github.com/coreos/pkg v0.0.0-20160727233714-3ac0863d7acf/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= github.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= +github.com/cpuguy83/go-md2man/v2 v2.0.0/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= github.com/cpuguy83/go-md2man/v2 v2.0.2 h1:p1EgwI/C7NhT0JmVkwCD2ZBK8j4aeHQX2pMHHBfMQ6w= github.com/cpuguy83/go-md2man/v2 v2.0.2/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= +github.com/crate-crypto/go-ipa v0.0.0-20220916134416-c5abbdbdf644 h1:1BOsVjUetPH2Lqv71Dh6uKLVj9WKdDr5KY57KZBbsWU= +github.com/crate-crypto/go-ipa v0.0.0-20220916134416-c5abbdbdf644/go.mod h1:gFnFS95y8HstDP6P9pPwzrxOOC5TRDkwbM+ao15ChAI= github.com/creack/pty v1.1.7/go.mod h1:lj5s0c3V2DBrqTV7llrYr5NG6My20zk30Fl46Y7DoTY= github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= github.com/davecgh/go-spew v0.0.0-20171005155431-ecdeabc65495/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/deckarep/golang-set v0.0.0-20180603214616-504e848d77ea h1:j4317fAZh7X6GqbFowYdYdI0L9bwxL07jyPZIdepyZ0= -github.com/deckarep/golang-set v0.0.0-20180603214616-504e848d77ea/go.mod h1:93vsz/8Wt4joVM7c2AVqh+YRMiUSc14yDtF28KmMOgQ= +github.com/davidlazar/go-crypto v0.0.0-20200604182044-b73af7476f6c h1:pFUpOrbxDR6AkioZ1ySsx5yxlDQZ8stG2b88gTPxgJU= +github.com/davidlazar/go-crypto v0.0.0-20200604182044-b73af7476f6c/go.mod h1:6UhI8N9EjYm1c2odKpFpAYeR8dsBeM7PtzQhRgxRr9U= +github.com/deckarep/golang-set v1.8.0 h1:sk9/l/KqpunDwP7pSjUg0keiOOLEnOBHzykLrsPppp4= +github.com/deckarep/golang-set v1.8.0/go.mod h1:5nI87KwE7wgsBU1F4GKAw2Qod7p5kyS383rP6+o6qqo= +github.com/decred/dcrd/crypto/blake256 v1.0.0 h1:/8DMNYp9SGi5f0w7uCm6d6M4OU2rGFK09Y2A4Xv7EE0= +github.com/decred/dcrd/dcrec/secp256k1/v4 v4.1.0 h1:HbphB4TFFXpv7MNrT52FGrrgVXF1owhMVTHFZIlnvd4= +github.com/decred/dcrd/dcrec/secp256k1/v4 v4.1.0/go.mod h1:DZGJHZMqrU4JJqFAWUS2UO1+lbSKsdiOoYi9Zzey7Fc= github.com/decred/dcrd/lru v1.0.0/go.mod h1:mxKOwFd7lFjN2GZYsiz/ecgqR6kkYAl+0pz0tEMk218= github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= github.com/dlclark/regexp2 v1.4.1-0.20201116162257-a2a8dda75c91 h1:Izz0+t1Z5nI16/II7vuEo/nHjodOg0p7+OiDpjX5t1E= github.com/dlclark/regexp2 v1.4.1-0.20201116162257-a2a8dda75c91/go.mod h1:2pZnwuY/m+8K6iRw6wQdMtk+rH5tNGR1i55kozfMjCc= github.com/docker/docker v20.10.17+incompatible h1:JYCuMrWaVNophQTOrMMoSwudOVEfcegoZZrleKc1xwE= github.com/docker/docker v20.10.17+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= +github.com/docker/go-units v0.4.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= +github.com/docker/go-units v0.5.0 h1:69rxXcBk27SvSaaxTtLh/8llcHD8vYHT7WSdRZ/jvr4= +github.com/docker/go-units v0.5.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= github.com/docopt/docopt-go v0.0.0-20180111231733-ee0de3bc6815/go.mod h1:WwZ+bS3ebgob9U8Nd0kOddGdZWjyMGR8Wziv+TBNwSE= -github.com/dop251/goja v0.0.0-20211011172007-d99e4b8cbf48 h1:iZOop7pqsg+56twTopWgwCGxdB5SI2yDO8Ti7eTRliQ= -github.com/dop251/goja v0.0.0-20211011172007-d99e4b8cbf48/go.mod h1:R9ET47fwRVRPZnOGvHxxhuZcbrMCuiqOz3Rlrh4KSnk= +github.com/dop251/goja v0.0.0-20220405120441-9037c2b61cbf h1:Yt+4K30SdjOkRoRRm3vYNQgR+/ZIy0RmeUDZo7Y8zeQ= +github.com/dop251/goja v0.0.0-20220405120441-9037c2b61cbf/go.mod h1:R9ET47fwRVRPZnOGvHxxhuZcbrMCuiqOz3Rlrh4KSnk= github.com/dop251/goja_nodejs v0.0.0-20210225215109-d91c329300e7/go.mod h1:hn7BA7c8pLvoGndExHudxTDKZ84Pyvv+90pbBjbTz0Y= github.com/dustin/go-humanize v0.0.0-20171111073723-bb3d318650d4/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= github.com/dustin/go-humanize v0.0.0-20180421182945-02af3965c54e/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= @@ -186,6 +260,9 @@ github.com/eapache/queue v1.1.0/go.mod h1:6eCeP0CKFpHLu8blIFXhExK/dRa7WDZfr6jVFP github.com/edsrzf/mmap-go v1.0.0/go.mod h1:YO35OhQPt3KJa3ryjFM5Bs14WD66h8eGKpfaBNrHW5M= github.com/edsrzf/mmap-go v1.1.0 h1:6EUwBLQ/Mcr1EYLE4Tn1VdW1A4ckqCQWZBw8Hr0kjpQ= github.com/edsrzf/mmap-go v1.1.0/go.mod h1:19H/e8pUPLicwkyNgOykDXkJ9F0MHE+Z52B8EIth78Q= +github.com/elastic/gosigar v0.12.0/go.mod h1:iXRIGg2tLnu7LBdpqzyQfGDEidKCfWcCMS0WKyPWoMs= +github.com/elastic/gosigar v0.14.2 h1:Dg80n8cr90OZ7x+bAax/QjoW/XqTI11RmA79ZwIm9/4= +github.com/elastic/gosigar v0.14.2/go.mod h1:iXRIGg2tLnu7LBdpqzyQfGDEidKCfWcCMS0WKyPWoMs= github.com/emicklei/dot v1.0.0 h1:yyObALINBOuI1GdCRwVea2IPtGtVgh0NQgJDrE03Tqc= github.com/emicklei/dot v1.0.0/go.mod h1:DeV7GvQtIw4h2u73RKBkkFdvVAz0D9fzeJrgPW6gy/s= github.com/emirpasic/gods v1.18.1 h1:FXtiHYKDGKCW2KzwZKx0iC0PQmdlorYgdFG9jPXJ1Bc= @@ -195,13 +272,19 @@ github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymF github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= +github.com/envoyproxy/go-control-plane v0.9.9-0.20210512163311-63b5d3c536b0/go.mod h1:hliV/p42l8fGbc6Y9bQ70uLwIvmJyVE5k4iMKlh8wCQ= github.com/envoyproxy/go-control-plane v0.10.2-0.20220325020618-49ff273808a1/go.mod h1:KJwIaB5Mv44NWtYuAOFCVOjcI94vtpEz2JU/D2v6IjE= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= +github.com/ferranbt/fastssz v0.1.2 h1:Dky6dXlngF6Qjc+EfDipAkE83N5I5DE68bY6O0VLNPk= +github.com/ferranbt/fastssz v0.1.2/go.mod h1:X5UPrE2u1UJjxHA8X54u04SBwdAQjG2sFtWs39YxyWs= github.com/fjl/gencodec v0.0.0-20220412091415-8bb9e558978c h1:CndMRAH4JIwxbW8KYq6Q+cGWcGHz0FjGR3QqcInWcW0= github.com/fjl/gencodec v0.0.0-20220412091415-8bb9e558978c/go.mod h1:AzA8Lj6YtixmJWL+wkKoBGsLWy9gFrAzi4g+5bCKwpY= -github.com/flanglet/kanzi-go v1.9.1-0.20211212184056-72dda96261ee h1:CaVlPeoz5kJQ+cAOV+ZDdlr3J2FmKyNkGu9LY+x7cDM= -github.com/flanglet/kanzi-go v1.9.1-0.20211212184056-72dda96261ee/go.mod h1:/sUSVgDcbjsisuW42GPDgaMqvJ0McZERNICnD7b1nRA= +github.com/flynn/go-shlex v0.0.0-20150515145356-3f9db97f8568/go.mod h1:xEzjJPgXI435gkrCt3MPfRiAkVrwSbHsst4LCFVfpJc= +github.com/flynn/noise v1.0.0 h1:DlTHqmzmvcEiKj+4RYo/imoswx/4r6iBlCMfVtrMXpQ= +github.com/flynn/noise v1.0.0/go.mod h1:xbMo+0i6+IGbYdJhF31t2eR1BIU0CYc12+BNAKwUTag= +github.com/francoispqt/gojay v1.2.13 h1:d2m3sFjloqoIUQU3TsHBgj6qg/BVGlTBeHDUmyJnXKk= +github.com/francoispqt/gojay v1.2.13/go.mod h1:ehT5mTG4ua4581f1++1WLG0vPdaA9HaiDsoyrBGkyDY= github.com/franela/goblin v0.0.0-20200105215937-c9ffbefa60db/go.mod h1:7dvUGVsVBjqR7JHJk0brhHOZYGmfBYOrK0ZhYMEtBr4= github.com/franela/goreq v0.0.0-20171204163338-bcd34c9993f8/go.mod h1:ZhphrRTfi2rbfLwlschooIH4+wKKDR4Pdxhh+TRoA20= github.com/frankban/quicktest v1.9.0/go.mod h1:ui7WezCLWMWxVWr1GETZY3smRy0G4KWq9vcPtJmFl7Y= @@ -213,36 +296,57 @@ github.com/fsnotify/fsnotify v1.5.4 h1:jRbGcIw6P2Meqdwuo0H1p6JVLbL5DHKAKlYndzMwV github.com/fsnotify/fsnotify v1.5.4/go.mod h1:OVB6XrOHzAwXMpEM7uPOzcehqUV2UqJxmVXmkdnm1bU= github.com/garslo/gogen v0.0.0-20170306192744-1d203ffc1f61 h1:IZqZOB2fydHte3kUgxrzK5E1fW7RQGeDwE8F/ZZnUYc= github.com/garslo/gogen v0.0.0-20170306192744-1d203ffc1f61/go.mod h1:Q0X6pkwTILDlzrGEckF6HKjXe48EgsY/l7K7vhY4MW8= +github.com/gballet/go-verkle v0.0.0-20220923150140-6c08cd337774 h1:e6wjiFtgxpaYdBVYpsbUPPlW6JCGjNYwShXSBoPi43Q= +github.com/gballet/go-verkle v0.0.0-20220923150140-6c08cd337774/go.mod h1:A3FwOP19ARP2LMaO9gN/KrkiDTbmBOvCHlUy15YIXx0= github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= +github.com/gliderlabs/ssh v0.1.1/go.mod h1:U7qILu1NlMHj9FlMhZLlkCdDnU1DBEAqr0aevW3Awn0= github.com/glycerine/go-unsnap-stream v0.0.0-20180323001048-9f0cb55181dd/go.mod h1:/20jfyN9Y5QPEAprSgKAUr+glWDY39ZiUEAYOEv5dsE= github.com/glycerine/go-unsnap-stream v0.0.0-20181221182339-f9677308dec2/go.mod h1:/20jfyN9Y5QPEAprSgKAUr+glWDY39ZiUEAYOEv5dsE= github.com/glycerine/go-unsnap-stream v0.0.0-20190901134440-81cf024a9e0a/go.mod h1:/20jfyN9Y5QPEAprSgKAUr+glWDY39ZiUEAYOEv5dsE= github.com/glycerine/goconvey v0.0.0-20180728074245-46e3a41ad493/go.mod h1:Ogl1Tioa0aV7gstGFO7KhffUsb9M4ydbEbbxpcEDc24= github.com/glycerine/goconvey v0.0.0-20190315024820-982ee783a72e/go.mod h1:Ogl1Tioa0aV7gstGFO7KhffUsb9M4ydbEbbxpcEDc24= github.com/glycerine/goconvey v0.0.0-20190410193231-58a59202ab31/go.mod h1:Ogl1Tioa0aV7gstGFO7KhffUsb9M4ydbEbbxpcEDc24= +github.com/go-errors/errors v1.0.1/go.mod h1:f4zRHt4oKfwPJE5k8C9vpYG+aDHdBFUsgrm6/TyX73Q= +github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU= +github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= +github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= github.com/go-kit/kit v0.10.0 h1:dXFJfIHVvUcpSgDOV+Ne6t7jXri8Tfv2uOLHUZ2XNuo= github.com/go-kit/kit v0.10.0/go.mod h1:xUsJbQ/Fp4kEt7AFgCuvyX4a71u8h9jB8tj/ORgOZ7o= +github.com/go-kit/log v0.1.0/go.mod h1:zbhenjAZHb184qTLMA9ZjW7ThYL0H2mk7Q6pNt4vbaY= +github.com/go-kit/log v0.2.0/go.mod h1:NwTd00d/i8cPZ3xOwwiv2PO5MOcx78fFErGNcVmBjv0= github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE= github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk= -github.com/go-logfmt/logfmt v0.5.0 h1:TrB8swr/68K7m9CcGut2g3UOihhbcbiMAYiuTXdEih4= github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG1KdI/P7A= +github.com/go-logfmt/logfmt v0.5.1 h1:otpy5pqBCBZ1ng9RQ0dPu4PN7ba75Y/aA+UpowDyNVA= +github.com/go-logfmt/logfmt v0.5.1/go.mod h1:WYhtIu8zTZfxdn5+rREduYbwxfcBr/Vr6KEVveWlfTs= +github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= +github.com/go-logr/logr v1.2.3 h1:2DntVwHkVopvECVRSlL5PSo9eG+cAkDCuckLubN+rq0= +github.com/go-logr/logr v1.2.3/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= +github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= +github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= github.com/go-sourcemap/sourcemap v2.1.3+incompatible h1:W1iEw64niKVGogNgBN3ePyLFfuisuzeidWPMPWmECqU= github.com/go-sourcemap/sourcemap v2.1.3+incompatible/go.mod h1:F8jJfvm2KbVjc5NqelyYJmf/v5J0dwNLS2mL4sNA1Jg= github.com/go-sql-driver/mysql v1.4.0/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG5ZlKdlhCg5w= github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= github.com/go-stack/stack v1.8.1 h1:ntEHSVwIt7PNXNpgPmVfMrNhLtgjlmnZha2kOpuRiDw= github.com/go-stack/stack v1.8.1/go.mod h1:dcoOX6HbPZSZptuspn9bctJ+N/CnF5gGygcUP3XYfe4= +github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0 h1:p104kn46Q8WdvHunIJ9dAyjPVtrBPhSr3KT2yUst43I= github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0/go.mod h1:fyg7847qk6SyHyPtNmDHnmrv/HOrqktSC+C9fM+CJOE= github.com/goccy/go-json v0.9.7 h1:IcB+Aqpx/iMHu5Yooh7jEzJk1JZ7Pjtmys2ukPr7EeM= github.com/goccy/go-json v0.9.7/go.mod h1:6MelG93GURQebXPDq3khkgXZkazVtN9CRI+MGFi0w8I= +github.com/godbus/dbus/v5 v5.0.3/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= +github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= +github.com/godbus/dbus/v5 v5.1.0 h1:4KLkAxT3aOY8Li4FRJe/KvhoNFFxo0m6fNuFUO8QJUk= +github.com/godbus/dbus/v5 v5.1.0/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= github.com/gofrs/flock v0.8.1 h1:+gYjHKf32LDeiEEFhQaotPbLuUXjY5ZqxKgXy7n59aw= github.com/gofrs/flock v0.8.1/go.mod h1:F1TvTiK9OcQqauNUHlbJvyl9Qa1QvF/gOUDKA14jxHU= github.com/gogo/googleapis v1.1.0/go.mod h1:gf4bu3Q80BeJ6H1S1vYPm8/ELATdvryBaNFGgqEef3s= github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= github.com/gogo/protobuf v1.2.0/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= github.com/gogo/protobuf v1.2.1/go.mod h1:hp+jE20tsWTFYpLwKvXlhS1hjn+gTNwPg2I6zVXpSg4= +github.com/gogo/protobuf v1.3.1/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o= github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= github.com/golang-jwt/jwt/v4 v4.4.1 h1:pC5DB52sCeK48Wlb9oPcdhnjkz1TKt1D/P7WKJ0kUcQ= @@ -250,12 +354,24 @@ github.com/golang-jwt/jwt/v4 v4.4.1/go.mod h1:m21LjoU+eqJr34lmDMbreY2eSTRJ1cv77w github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= github.com/golang/groupcache v0.0.0-20160516000752-02826c3e7903/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/lint v0.0.0-20180702182130-06c8688daad7/go.mod h1:tluoj9z5200jBnyusfRPU2LqT6J+DAorxEvtC7LHB+E= github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= +github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= +github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y= +github.com/golang/mock v1.4.0/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= +github.com/golang/mock v1.4.1/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= +github.com/golang/mock v1.4.3/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= +github.com/golang/mock v1.4.4/go.mod h1:l3mdAwkq5BuhzHwde/uurv3sEJeZMXNpwsxVWU71h+4= +github.com/golang/mock v1.6.0 h1:ErTB+efbowRARo13NNdxyJji2egdxLGQhRaY+DUumQc= +github.com/golang/mock v1.6.0/go.mod h1:p6yTPP+5HYm5mzsMV8JkE6ZKdX+/wYM6Hr+LicevLPs= github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= +github.com/golang/protobuf v1.3.4/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= +github.com/golang/protobuf v1.3.5/go.mod h1:6O5/vntMXwX2lRkT1hjjk0nAC1IDOTvTlVgjlRvqsdk= github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8= github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA= github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs= @@ -280,19 +396,41 @@ github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5a github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.4.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.3/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.8 h1:e6P7q2lk1O+qJJb4BtCQXlK8vWEO8V1ZeuEdJNOqZyg= +github.com/google/go-github v17.0.0+incompatible/go.mod h1:zLgOLi98H3fifZn+44m+umXrS52loVEgC2AApnigrVQ= +github.com/google/go-querystring v1.0.0/go.mod h1:odCYkC5MyYFN7vkCjXpyrEuKhc/BUO6wN/zVPAxq5ck= github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= github.com/google/gofuzz v1.1.1-0.20200604201612-c04b05f3adfa h1:Q75Upo5UN4JbPFURXZ8nLKYUvF85dyFRop/vQ0Rv+64= github.com/google/gofuzz v1.1.1-0.20200604201612-c04b05f3adfa/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= +github.com/google/gopacket v1.1.17/go.mod h1:UdDNZ1OO62aGYVnPhxT1U6aI7ukYtA/kB8vaU0diBUM= +github.com/google/gopacket v1.1.19 h1:ves8RnFZPGiFnTS0uPQStjwru6uO6h+nlr9j6fL7kF8= +github.com/google/gopacket v1.1.19/go.mod h1:iJ8V8n6KS+z2U1A8pUwu8bW5SyEMkXJB8Yo/Vo+TKTo= +github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs= +github.com/google/martian/v3 v3.0.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0= +github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= +github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= +github.com/google/pprof v0.0.0-20191218002539-d4f498aebedc/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20200212024743-f11f1df84d12/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20200229191704-1ebb73c60ed3/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20200430221834-fc25d7d30c6d/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20200708004538-1a94d8640e99/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= github.com/google/uuid v1.0.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.3.0 h1:t6JiXgmwXMjEs8VusXIJk2BXHsn+wx8BZdTaoZ5fu7I= github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/googleapis/gax-go v2.0.0+incompatible/go.mod h1:SFVmujtThgffbyetf+mdk2eWhX2bMyUtNHzFKcPA9HY= +github.com/googleapis/gax-go/v2 v2.0.3/go.mod h1:LLvjysVCY1JZeum8Z6l8qUty8fiNwE08qbEPm1M08qg= +github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= +github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= github.com/gopherjs/gopherjs v0.0.0-20181103185306-d547d1d9531e/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= github.com/gopherjs/gopherjs v0.0.0-20190309154008-847fc94819f9/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= @@ -303,10 +441,12 @@ github.com/gorilla/mux v1.7.3/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2z github.com/gorilla/websocket v0.0.0-20170926233335-4201258b820c/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ= github.com/gorilla/websocket v1.5.0 h1:PPwGk2jz7EePpoHN/+ClbZu8SPxiqlu12wZP/3sWmnc= github.com/gorilla/websocket v1.5.0/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= +github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA= github.com/grpc-ecosystem/go-grpc-middleware v1.0.1-0.20190118093823-f849b5445de4/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs= github.com/grpc-ecosystem/go-grpc-middleware v1.3.0 h1:+9834+KizmvFV7pXQGSXQTsaWhq2GjuNUt0aUU0YBYw= github.com/grpc-ecosystem/go-grpc-middleware v1.3.0/go.mod h1:z0ButlSOZa5vEBq9m2m2hlwIgKw+rp3sdCBRoJY+30Y= github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk= +github.com/grpc-ecosystem/grpc-gateway v1.5.0/go.mod h1:RSKVYQBd5MCa4OVpNdGskqpgL2+G+NZTnrVHpWWfpdw= github.com/grpc-ecosystem/grpc-gateway v1.9.5/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw= github.com/hashicorp/consul/api v1.3.0/go.mod h1:MmDNSzIMUjNpY/mQ398R4bk2FnqQLoPndWW5VkKPlCE= @@ -332,8 +472,8 @@ github.com/hashicorp/mdns v1.0.0/go.mod h1:tL+uN++7HEJ6SQLQ2/p+z2pH24WQKWjBPkE0m github.com/hashicorp/memberlist v0.1.3/go.mod h1:ajVTdAv/9Im8oMAAj5G31PhhMCZJV2pPBoIllUwCN7I= github.com/hashicorp/serf v0.8.2/go.mod h1:6hOLApaqBFA1NXqRQAsxw9QxuDEvNxSQRwA/JwenrHc= github.com/hexops/gotextdiff v1.0.3 h1:gitA9+qJrrTCsiCl7+kh75nPqQt1cx4ZkudSTLoUqJM= -github.com/holiman/uint256 v1.2.0 h1:gpSYcPLWGv4sG43I2mVLiDZCNDh/EpGjSk8tmtxitHM= -github.com/holiman/uint256 v1.2.0/go.mod h1:y4ga/t+u+Xwd7CpDgZESaRcWy0I7XMlTMA25ApIH5Jw= +github.com/holiman/uint256 v1.2.1 h1:XRtyuda/zw2l+Bq/38n5XUoEF72aSOu/77Thd9pPp2o= +github.com/holiman/uint256 v1.2.1/go.mod h1:y4ga/t+u+Xwd7CpDgZESaRcWy0I7XMlTMA25ApIH5Jw= github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= github.com/huandu/xstrings v1.0.0/go.mod h1:4qWG/gcEcfX4z/mBDHJ++3ReCw9ibxbsNJbcucJdbSo= github.com/huandu/xstrings v1.2.0/go.mod h1:DvyZB1rfVYsBIigL8HwpZgxHwXozlTgGqn63UyNX5k4= @@ -342,27 +482,46 @@ github.com/huandu/xstrings v1.3.1/go.mod h1:y5/lhBue+AyNmUVz9RLU9xbLR0o4KIIExikq github.com/huandu/xstrings v1.3.2 h1:L18LIDzqlW6xN2rEkpdV8+oL/IXWJ1APd+vsdYy4Wdw= github.com/huandu/xstrings v1.3.2/go.mod h1:y5/lhBue+AyNmUVz9RLU9xbLR0o4KIIExikq4ovT0aE= github.com/hudl/fargo v1.3.0/go.mod h1:y3CKSmjA+wD2gak7sUSXTAoopbhU08POFhmITJgmKTg= +github.com/huin/goupnp v1.0.0/go.mod h1:n9v9KO1tAxYH82qOn+UTIFQDmx5n1Zxd/ClZDMX7Bnc= github.com/huin/goupnp v1.0.3 h1:N8No57ls+MnjlB+JPiCVSOyy/ot7MJTqlo7rn+NYSqQ= github.com/huin/goupnp v1.0.3/go.mod h1:ZxNlw5WqJj6wSsRK5+YfflQGXYfccj5VgQsMNixHM7Y= github.com/huin/goutil v0.0.0-20170803182201-1ca381bf3150/go.mod h1:PpLOETDnJ0o3iZrZfqZzyLl6l7F3c6L1oWn7OICBi6o= github.com/ianlancetaylor/cgosymbolizer v0.0.0-20220405231054-a1ae3e4bba26 h1:UT3hQ6+5hwqUT83cKhKlY5I0W/kqsl6lpn3iFb3Gtqs= github.com/ianlancetaylor/cgosymbolizer v0.0.0-20220405231054-a1ae3e4bba26/go.mod h1:DvXTE/K/RtHehxU8/GtDs4vFtfw64jJ3PaCnFri8CRg= +github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= github.com/inconshreveable/mousetrap v1.0.0 h1:Z8tu5sraLXCXIcARxBp/8cbvlwVa7Z1NHg9XEKhtSvM= github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8= github.com/influxdata/influxdb1-client v0.0.0-20191209144304-8bf82d3c094d/go.mod h1:qj24IKcXYK6Iy9ceXlo3Tc+vtHo9lIhSX5JddghvEPo= +github.com/ipfs/go-cid v0.3.2 h1:OGgOd+JCFM+y1DjWPmVH+2/4POtpDzwcr7VgnB7mZXc= +github.com/ipfs/go-cid v0.3.2/go.mod h1:gQ8pKqT/sUxGY+tIwy1RPpAojYu7jAyCp5Tz1svoupw= +github.com/ipfs/go-detect-race v0.0.1 h1:qX/xay2W3E4Q1U7d9lNs1sU9nvguX0a7319XbyQ6cOk= +github.com/ipfs/go-detect-race v0.0.1/go.mod h1:8BNT7shDZPo99Q74BpGMK+4D8Mn4j46UU0LZ723meps= +github.com/ipfs/go-log v1.0.5 h1:2dOuUCB1Z7uoczMWgAyDck5JLb72zHzrMnGnCNNbvY8= +github.com/ipfs/go-log v1.0.5/go.mod h1:j0b8ZoR+7+R99LD9jZ6+AJsrzkPbSXbZfGakb5JPtIo= +github.com/ipfs/go-log/v2 v2.1.3/go.mod h1:/8d0SH3Su5Ooc31QlL1WysJhvyOTDCjcCZ9Axpmri6g= +github.com/ipfs/go-log/v2 v2.5.1 h1:1XdUzF7048prq4aBjDQQ4SL5RxftpRGdXhNRwKSAlcY= +github.com/ipfs/go-log/v2 v2.5.1/go.mod h1:prSpmC1Gpllc9UYWxDiZDreBYw7zp4Iqp1kOLU9U5UI= github.com/jackpal/go-nat-pmp v1.0.2 h1:KzKSgb7qkJvOUTqYl9/Hg/me3pWgBmERKrTGD7BdWus= github.com/jackpal/go-nat-pmp v1.0.2/go.mod h1:QPH045xvCAeXUZOxsnwmrtiCoxIr9eob+4orBN1SBKc= +github.com/jbenet/go-temp-err-catcher v0.1.0 h1:zpb3ZH6wIE8Shj2sKS+khgRvf7T7RABoLk/+KKHggpk= +github.com/jbenet/go-temp-err-catcher v0.1.0/go.mod h1:0kJRvmDZXNMIiJirNPEYfhpPwbGVtZVWC34vc5WLsDk= +github.com/jellevandenhooff/dkim v0.0.0-20150330215556-f50fe3d243e1/go.mod h1:E0B/fFc00Y+Rasa88328GlI/XbtyysCtTHZS8h7IrBU= github.com/jessevdk/go-flags v0.0.0-20141203071132-1679536dcc89/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI= github.com/jessevdk/go-flags v1.4.0/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI= github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k= github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo= +github.com/jpillora/backoff v1.0.0/go.mod h1:J/6gKK9jxlEcS3zixgDgUAsiuZ7yrSoa/FX5e0EB2j4= github.com/jrick/logrotate v1.0.0/go.mod h1:LNinyqDIJnpAur+b8yyulnQw/wDuN1+BYKlTRt3OuAQ= github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= github.com/json-iterator/go v1.1.7/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= github.com/json-iterator/go v1.1.8/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= github.com/json-iterator/go v1.1.9/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= +github.com/json-iterator/go v1.1.10/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= +github.com/json-iterator/go v1.1.11/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM= github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo= +github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU= +github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk= github.com/jtolds/gls v4.2.1+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU= github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU= github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= @@ -373,10 +532,21 @@ github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51/go.mod h1:C github.com/kevinburke/go-bindata v3.21.0+incompatible h1:baK7hwFJDlAHrOqmE9U3u8tow1Uc5ihN9E/b7djcK2g= github.com/kevinburke/go-bindata v3.21.0+incompatible/go.mod h1:/pEEZ72flUW2p0yi30bslSp9YqD9pysLxunQDdb2CPM= github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q= +github.com/kisielk/errcheck v1.2.0/go.mod h1:/BMXB+zMLi60iA8Vv6Ksmxu/1UDYcXs4uQLJ+jE2L00= github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= github.com/kkdai/bstream v0.0.0-20161212061736-f391b8402d23/go.mod h1:J+Gs4SYgM6CZQHDETBtE9HaSEkGmuNXF86RwHhHUvq4= +github.com/klauspost/compress v1.15.10 h1:Ai8UzuomSCDw90e1qNMtb15msBXsNpH6gzkkENQNcJo= +github.com/klauspost/compress v1.15.10/go.mod h1:QPwzmACJjUTFsnSHH934V6woptycfrDDJnH7hvFVbGM= +github.com/klauspost/cpuid/v2 v2.0.4/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa0213Md/qVLRg= +github.com/klauspost/cpuid/v2 v2.0.9/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa0213Md/qVLRg= +github.com/klauspost/cpuid/v2 v2.1.1 h1:t0wUqjowdm8ezddV5k0tLWVklVuvLJpoHeb4WBdydm0= +github.com/klauspost/cpuid/v2 v2.1.1/go.mod h1:RVVoqg1df56z8g3pUjL/3lE5UfnlrJX8tyFgg4nqhuY= github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= +github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= +github.com/koron/go-ssdp v0.0.0-20191105050749-2e1c40ed0b5d/go.mod h1:5Ky9EC2xfoUKUor0Hjgi2BJhCSXJfMOFlmyYrVKGQMk= +github.com/koron/go-ssdp v0.0.3 h1:JivLMY45N76b4p/vsWGOKewBQu6uf39y8l+AQ7sDKx8= +github.com/koron/go-ssdp v0.0.3/go.mod h1:b2MxI6yh02pKrsyNoQUsk4+YNikaGhe4894J+Q5lDvA= github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= github.com/kr/pretty v0.2.0/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= @@ -384,42 +554,103 @@ github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfn github.com/kr/pretty v0.3.0 h1:WgNl7dwNpEZ6jJ9k1snq4pZsg7DOEN8hP9Xw0Tsjwk0= github.com/kr/pretty v0.3.0/go.mod h1:640gp4NfQd8pI5XOwp5fnNeVWj67G7CFk/SaSQn7NBk= github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= +github.com/kr/pty v1.1.3/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= github.com/kylelemons/godebug v0.0.0-20170224010052-a616ab194758 h1:0D5M2HQSGD3PYPwICLl+/9oulQauOuETfgFvhBDffs0= github.com/leanovate/gopter v0.2.9 h1:fQjYxZaynp97ozCzfOyOuAGOU4aU/z37zf/tOujFk7c= github.com/leanovate/gopter v0.2.9/go.mod h1:U2L/78B+KVFIx2VmW6onHJQzXtFb+p5y3y2Sh+Jxxv8= -github.com/ledgerwatch/erigon-lib v0.0.0-20220729020228-73ce1c43eaa7 h1:4yqgpRL2pJCrod0juCMepUMiXr10MKSASCBzxoUE7bw= -github.com/ledgerwatch/erigon-lib v0.0.0-20220729020228-73ce1c43eaa7/go.mod h1:19wwSb5qbagorz9a4QN9FzNqSPjmOJkwa5TezGjloks= -github.com/ledgerwatch/erigon-snapshot v1.0.0 h1:bp/7xoPdM5lK7LFdqEMH008RZmqxMZV0RUVEQiWs7v4= -github.com/ledgerwatch/erigon-snapshot v1.0.0/go.mod h1:3AuPxZc85jkehh/HA9h8gabv5MSi3kb/ddtzBsTVJFo= -github.com/ledgerwatch/log/v3 v3.4.1 h1:/xGwlVulXnsO9Uq+tzaExc8OWmXXHU0dnLalpbnY5Bc= -github.com/ledgerwatch/log/v3 v3.4.1/go.mod h1:VXcz6Ssn6XEeU92dCMc39/g1F0OYAjw1Mt+dGP5DjXY= +github.com/ledgerwatch/erigon-lib v0.0.0-20221006052028-22049904649b h1:nuOeJjIwx9uf/PTDlPR083teR0Ogey4r8yZ2ZLZiuh0= +github.com/ledgerwatch/erigon-lib v0.0.0-20221006052028-22049904649b/go.mod h1:YDP7ECNyjKo1dE7J5n8GXKBIYOWnmchvGCfALuwhBQg= +github.com/ledgerwatch/erigon-snapshot v1.0.1-0.20220913092204-de54ee30c7b9 h1:iWjzYLtOsp/Wpo9ZWV/eMIlnFzk8bm7POSzrXAILw24= +github.com/ledgerwatch/erigon-snapshot v1.0.1-0.20220913092204-de54ee30c7b9/go.mod h1:3AuPxZc85jkehh/HA9h8gabv5MSi3kb/ddtzBsTVJFo= +github.com/ledgerwatch/log/v3 v3.4.2 h1:chvjB7c100rlIFgPv+Col2eerxIrHL88OiZRuPZDkxw= +github.com/ledgerwatch/log/v3 v3.4.2/go.mod h1:VXcz6Ssn6XEeU92dCMc39/g1F0OYAjw1Mt+dGP5DjXY= github.com/ledgerwatch/secp256k1 v1.0.0 h1:Usvz87YoTG0uePIV8woOof5cQnLXGYa162rFf3YnwaQ= github.com/ledgerwatch/secp256k1 v1.0.0/go.mod h1:SPmqJFciiF/Q0mPt2jVs2dTr/1TZBTIA+kPMmKgBAak= github.com/ledgerwatch/trackerslist v1.0.0 h1:6gnQu93WCTL4jPcdmc8UEmw56Cb8IFQHLGnevfIeLwo= github.com/ledgerwatch/trackerslist v1.0.0/go.mod h1:pCC+eEw8izNcnBBiSwvIq8kKsxDLInAafSW275jqFrg= +github.com/libp2p/go-buffer-pool v0.1.0 h1:oK4mSFcQz7cTQIfqbe4MIj9gLW+mnanjyFtc6cdF0Y8= +github.com/libp2p/go-buffer-pool v0.1.0/go.mod h1:N+vh8gMqimBzdKkSMVuydVDq+UV5QTWy5HSiZacSbPg= +github.com/libp2p/go-cidranger v1.1.0 h1:ewPN8EZ0dd1LSnrtuwd4709PXVcITVeuwbag38yPW7c= +github.com/libp2p/go-cidranger v1.1.0/go.mod h1:KWZTfSr+r9qEo9OkI9/SIEeAtw+NNoU0dXIXt15Okic= +github.com/libp2p/go-flow-metrics v0.1.0 h1:0iPhMI8PskQwzh57jB9WxIuIOQ0r+15PChFGkx3Q3WM= +github.com/libp2p/go-flow-metrics v0.1.0/go.mod h1:4Xi8MX8wj5aWNDAZttg6UPmc0ZrnFNsMtpsYUClFtro= +github.com/libp2p/go-libp2p v0.23.2 h1:yqyTeKQJyofWXxEv/eEVUvOrGdt/9x+0PIQ4N1kaxmE= +github.com/libp2p/go-libp2p v0.23.2/go.mod h1:s9DEa5NLR4g+LZS+md5uGU4emjMWFiqkZr6hBTY8UxI= +github.com/libp2p/go-libp2p-asn-util v0.2.0 h1:rg3+Os8jbnO5DxkC7K/Utdi+DkY3q/d1/1q+8WeNAsw= +github.com/libp2p/go-libp2p-asn-util v0.2.0/go.mod h1:WoaWxbHKBymSN41hWSq/lGKJEca7TNm58+gGJi2WsLI= +github.com/libp2p/go-libp2p-core v0.20.1 h1:fQz4BJyIFmSZAiTbKV8qoYhEH5Dtv/cVhZbG3Ib/+Cw= +github.com/libp2p/go-libp2p-core v0.20.1/go.mod h1:6zR8H7CvQWgYLsbG4on6oLNSGcyKaYFSEYyDt51+bIY= +github.com/libp2p/go-libp2p-pubsub v0.8.1 h1:hSw09NauFUaA0FLgQPBJp6QOy0a2n+HSkb8IeOx8OnY= +github.com/libp2p/go-libp2p-pubsub v0.8.1/go.mod h1:e4kT+DYjzPUYGZeWk4I+oxCSYTXizzXii5LDRRhjKSw= +github.com/libp2p/go-libp2p-testing v0.12.0 h1:EPvBb4kKMWO29qP4mZGyhVzUyR25dvfUIK5WDu6iPUA= +github.com/libp2p/go-mplex v0.7.0 h1:BDhFZdlk5tbr0oyFq/xv/NPGfjbnrsDam1EvutpBDbY= +github.com/libp2p/go-mplex v0.7.0/go.mod h1:rW8ThnRcYWft/Jb2jeORBmPd6xuG3dGxWN/W168L9EU= +github.com/libp2p/go-msgio v0.2.0 h1:W6shmB+FeynDrUVl2dgFQvzfBZcXiyqY4VmpQLu9FqU= +github.com/libp2p/go-msgio v0.2.0/go.mod h1:dBVM1gW3Jk9XqHkU4eKdGvVHdLa51hoGfll6jMJMSlY= +github.com/libp2p/go-nat v0.1.0 h1:MfVsH6DLcpa04Xr+p8hmVRG4juse0s3J8HyNWYHffXg= +github.com/libp2p/go-nat v0.1.0/go.mod h1:X7teVkwRHNInVNWQiO/tAiAVRwSr5zoRz4YSTC3uRBM= +github.com/libp2p/go-netroute v0.1.2/go.mod h1:jZLDV+1PE8y5XxBySEBgbuVAXbhtuHSdmLPL2n9MKbk= +github.com/libp2p/go-netroute v0.2.0 h1:0FpsbsvuSnAhXFnCY0VLFbJOzaK0VnP0r1QT/o4nWRE= +github.com/libp2p/go-netroute v0.2.0/go.mod h1:Vio7LTzZ+6hoT4CMZi5/6CpY3Snzh2vgZhWgxMNwlQI= +github.com/libp2p/go-openssl v0.1.0 h1:LBkKEcUv6vtZIQLVTegAil8jbNpJErQ9AnT+bWV+Ooo= +github.com/libp2p/go-openssl v0.1.0/go.mod h1:OiOxwPpL3n4xlenjx2h7AwSGaFSC/KZvf6gNdOBQMtc= +github.com/libp2p/go-reuseport v0.2.0 h1:18PRvIMlpY6ZK85nIAicSBuXXvrYoSw3dsBAR7zc560= +github.com/libp2p/go-reuseport v0.2.0/go.mod h1:bvVho6eLMm6Bz5hmU0LYN3ixd3nPPvtIlaURZZgOY4k= +github.com/libp2p/go-sockaddr v0.0.2/go.mod h1:syPvOmNs24S3dFVGJA1/mrqdeijPxLV2Le3BRLKd68k= +github.com/libp2p/go-yamux/v4 v4.0.0 h1:+Y80dV2Yx/kv7Y7JKu0LECyVdMXm1VUoko+VQ9rBfZQ= +github.com/libp2p/go-yamux/v4 v4.0.0/go.mod h1:NWjl8ZTLOGlozrXSOZ/HlfG++39iKNnM5wwmtQP1YB4= github.com/lightstep/lightstep-tracer-common/golang/gogo v0.0.0-20190605223551-bc2310a04743/go.mod h1:qklhhLq1aX+mtWk9cPHPzaBjWImj5ULL6C7HFJtXQMM= github.com/lightstep/lightstep-tracer-go v0.18.1/go.mod h1:jlF1pusYV4pidLvZ+XD0UBX0ZE6WURAspgAczcDHrL4= github.com/lispad/go-generics-tools v1.1.0 h1:mbSgcxdFVmpoyso1X/MJHXbSbSL3dD+qhRryyxk+/XY= github.com/lispad/go-generics-tools v1.1.0/go.mod h1:2csd1EJljo/gy5qG4khXol7ivCPptNjG5Uv2X8MgK84= +github.com/lucas-clemente/quic-go v0.29.1 h1:Z+WMJ++qMLhvpFkRZA+jl3BTxUjm415YBmWanXB8zP0= +github.com/lucas-clemente/quic-go v0.29.1/go.mod h1:CTcNfLYJS2UuRNB+zcNlgvkjBhxX6Hm3WUxxAQx2mgE= +github.com/lunixbochs/vtclean v1.0.0/go.mod h1:pHhQNgMf3btfWnGBVipUOjRYhoOsdGqdm/+2c2E2WMI= github.com/lyft/protoc-gen-validate v0.0.13/go.mod h1:XbGvPuh87YZc5TdIa2/I4pLk0QoUACkjt2znoq26NVQ= github.com/magiconair/properties v1.8.6 h1:5ibWZ6iY0NctNGWo87LalDlEZ6R41TqbbDamhfG/Qzo= github.com/magiconair/properties v1.8.6/go.mod h1:y3VJvCyxH9uVvJTWEGAELF3aiYNyPKd5NZ3oSwXrF60= +github.com/mailru/easyjson v0.0.0-20190312143242-1de009706dbe/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= +github.com/marten-seemann/qpack v0.2.1 h1:jvTsT/HpCn2UZJdP+UUB53FfUUgeOyG5K1ns0OJOGVs= +github.com/marten-seemann/qtls-go1-18 v0.1.2 h1:JH6jmzbduz0ITVQ7ShevK10Av5+jBEKAHMntXmIV7kM= +github.com/marten-seemann/qtls-go1-18 v0.1.2/go.mod h1:mJttiymBAByA49mhlNZZGrH5u1uXYZJ+RW28Py7f4m4= +github.com/marten-seemann/qtls-go1-19 v0.1.0 h1:rLFKD/9mp/uq1SYGYuVZhm83wkmU95pK5df3GufyYYU= +github.com/marten-seemann/qtls-go1-19 v0.1.0/go.mod h1:5HTDWtVudo/WFsHKRNuOhWlbdjrfs5JHrYb0wIJqGpI= +github.com/marten-seemann/tcp v0.0.0-20210406111302-dfbc87cc63fd h1:br0buuQ854V8u83wA0rVZ8ttrq5CpaPZdvrK0LP2lOk= +github.com/marten-seemann/tcp v0.0.0-20210406111302-dfbc87cc63fd/go.mod h1:QuCEs1Nt24+FYQEqAAncTDPJIuGs+LxK1MCiFL25pMU= +github.com/marten-seemann/webtransport-go v0.1.1 h1:TnyKp3pEXcDooTaNn4s9dYpMJ7kMnTp7k5h+SgYP/mc= github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU= github.com/mattn/go-colorable v0.1.11 h1:nQ+aFkoE2TMGc0b68U2OKSexC+eq46+XwZzWXHRmPYs= github.com/mattn/go-colorable v0.1.11/go.mod h1:u5H1YNBxpqRaxsYJYSkiCWKzEfiAb1Gb520KVy5xxl4= github.com/mattn/go-isatty v0.0.3/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= github.com/mattn/go-isatty v0.0.4/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU= -github.com/mattn/go-isatty v0.0.14 h1:yVuAays6BHfxijgZPzw+3Zlu5yQgKGP2/hcQbHb7S9Y= github.com/mattn/go-isatty v0.0.14/go.mod h1:7GGIvUiUoEMVVmxf/4nioHXj79iQHKdU27kJ6hsGG94= +github.com/mattn/go-isatty v0.0.16 h1:bq3VjFmv/sOjHtdEhmkEV4x1AJtvUvOJ2PFAZ5+peKQ= +github.com/mattn/go-isatty v0.0.16/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM= +github.com/mattn/go-pointer v0.0.1 h1:n+XhsuGeVO6MEAp7xyEukFINEa+Quek5psIR/ylA6o0= +github.com/mattn/go-pointer v0.0.1/go.mod h1:2zXcozF6qYGgmsG+SeTZz3oAbFLdD3OWqnUbNvJZAlc= github.com/mattn/go-runewidth v0.0.2/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU= -github.com/mattn/go-sqlite3 v1.14.12 h1:TJ1bhYJPV44phC+IMu1u2K/i5RriLTPe+yc68XDJ1Z0= -github.com/mattn/go-sqlite3 v1.14.12/go.mod h1:NyWgC/yNuGj7Q9rpYnZvas74GogHl5/Z4A/KQRfk6bU= +github.com/mattn/go-sqlite3 v1.14.15 h1:vfoHhTN1af61xCRSWzFIWzx2YskyMTwHLrExkBOjvxI= +github.com/matttproud/golang_protobuf_extensions v1.0.1 h1:4hp9jkHxhMHkqkrB3Ix0jegS5sx/RkqARlsWZ6pIwiU= github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= +github.com/microcosm-cc/bluemonday v1.0.1/go.mod h1:hsXNsILzKxV+sX77C5b8FSuKF00vh2OMYv+xgHpAMF4= github.com/miekg/dns v1.0.14/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg= +github.com/miekg/dns v1.1.41/go.mod h1:p6aan82bvRIyn+zDIv9xYNUpwa73JcSh9BKwknJysuI= +github.com/miekg/dns v1.1.50 h1:DQUfb9uc6smULcREF09Uc+/Gd46YWqJd5DbpPE9xkcA= +github.com/miekg/dns v1.1.50/go.mod h1:e3IlAVfNqAllflbibAZEWOXOQ+Ynzk/dDozDxY7XnME= +github.com/mikioh/tcp v0.0.0-20190314235350-803a9b46060c h1:bzE/A84HN25pxAuk9Eej1Kz9OUelF97nAc82bDquQI8= +github.com/mikioh/tcp v0.0.0-20190314235350-803a9b46060c/go.mod h1:0SQS9kMwD2VsyFEB++InYyBJroV/FRmBgcydeSUcJms= +github.com/mikioh/tcpinfo v0.0.0-20190314235526-30a79bb1804b h1:z78hV3sbSMAUoyUMM0I83AUIT6Hu17AWfgjzIbtrYFc= +github.com/mikioh/tcpinfo v0.0.0-20190314235526-30a79bb1804b/go.mod h1:lxPUiZwKoFL8DUUmalo2yJJUCxbPKtm8OKfqr2/FTNU= +github.com/mikioh/tcpopt v0.0.0-20190314235656-172688c1accc h1:PTfri+PuQmWDqERdnNMiD9ZejrlswWrCpBEZgWOiTrc= +github.com/mikioh/tcpopt v0.0.0-20190314235656-172688c1accc/go.mod h1:cGKTAVKx4SxOuR/czcZ/E2RSJ3sfHs8FpHhQ5CWMf9s= +github.com/minio/blake2b-simd v0.0.0-20160723061019-3f5f724cb5b1/go.mod h1:pD8RvIylQ358TN4wwqatJ8rNavkEINozVn9DtGI3dfQ= +github.com/minio/sha256-simd v0.1.1-0.20190913151208-6de447530771/go.mod h1:B5e1o+1/KgNmWrSQK08Y6Z1Vb5pwIktudl0J58iy0KM= +github.com/minio/sha256-simd v1.0.0 h1:v1ta+49hkWZyvaKwrQB8elexRqm6Y0aMLjCNsrYxo6g= +github.com/minio/sha256-simd v1.0.0/go.mod h1:OuYzVNI5vcoYIAmbIvHPl3N3jUzVedXbKy5RFepssQM= github.com/mitchellh/cli v1.0.0/go.mod h1:hNIlj7HEI86fIcpObd7a0FcrxTWetlwJDGcceTlRvqc= github.com/mitchellh/go-homedir v1.0.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= github.com/mitchellh/go-testing-interface v1.0.0/go.mod h1:kRemZodwjscx+RGhAo8eIhFbs2+BFgRtFPeD/KE+zxI= @@ -427,6 +658,8 @@ github.com/mitchellh/gox v0.4.0/go.mod h1:Sd9lOJ0+aimLBi73mGofS1ycjY8lL3uZM3JPS4 github.com/mitchellh/iochan v1.0.0/go.mod h1:JwYml1nuB7xOzsp52dPpHFffvOCDupsG0QubkSMEySY= github.com/mitchellh/mapstructure v0.0.0-20160808181253-ca63d7c062ee/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= +github.com/mitchellh/mapstructure v1.3.2 h1:mRS76wmkOn3KkKAyXDu42V+6ebnXWIztFSYGN7GeoRg= +github.com/mitchellh/mapstructure v1.3.2/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= @@ -434,10 +667,38 @@ github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lN github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9Gz0M= github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= +github.com/mr-tron/base58 v1.1.2/go.mod h1:BinMc/sQntlIE1frQmRFPUoPA1Zkr8VRgBdjWI2mNwc= +github.com/mr-tron/base58 v1.2.0 h1:T/HDJBh4ZCPbU39/+c3rRvE0uKBQlU27+QI8LJ4t64o= +github.com/mr-tron/base58 v1.2.0/go.mod h1:BinMc/sQntlIE1frQmRFPUoPA1Zkr8VRgBdjWI2mNwc= github.com/mschoch/smat v0.0.0-20160514031455-90eadee771ae/go.mod h1:qAyveg+e4CE+eKJXWVjKXM4ck2QobLqTDytGJbLLhJg= github.com/mschoch/smat v0.2.0 h1:8imxQsjDm8yFEAVBe7azKmKSgzSkZXDuKkSq9374khM= github.com/mschoch/smat v0.2.0/go.mod h1:kc9mz7DoBKqDyiRL7VZN8KvXQMWeTaVnttLRXOlotKw= +github.com/multiformats/go-base32 v0.1.0 h1:pVx9xoSPqEIQG8o+UbAe7DNi51oej1NtK+aGkbLYxPE= +github.com/multiformats/go-base32 v0.1.0/go.mod h1:Kj3tFY6zNr+ABYMqeUNeGvkIC/UYgtWibDcT0rExnbI= +github.com/multiformats/go-base36 v0.1.0 h1:JR6TyF7JjGd3m6FbLU2cOxhC0Li8z8dLNGQ89tUg4F4= +github.com/multiformats/go-base36 v0.1.0/go.mod h1:kFGE83c6s80PklsHO9sRn2NCoffoRdUUOENyW/Vv6sM= +github.com/multiformats/go-multiaddr v0.1.1/go.mod h1:aMKBKNEYmzmDmxfX88/vz+J5IU55txyt0p4aiWVohjo= +github.com/multiformats/go-multiaddr v0.2.0/go.mod h1:0nO36NvPpyV4QzvTLi/lafl2y95ncPj0vFwVF6k6wJ4= +github.com/multiformats/go-multiaddr v0.7.0 h1:gskHcdaCyPtp9XskVwtvEeQOG465sCohbQIirSyqxrc= +github.com/multiformats/go-multiaddr v0.7.0/go.mod h1:Fs50eBDWvZu+l3/9S6xAE7ZYj6yhxlvaVZjakWN7xRs= +github.com/multiformats/go-multiaddr-dns v0.3.1 h1:QgQgR+LQVt3NPTjbrLLpsaT2ufAA2y0Mkk+QRVJbW3A= +github.com/multiformats/go-multiaddr-dns v0.3.1/go.mod h1:G/245BRQ6FJGmryJCrOuTdB37AMA5AMOVuO6NY3JwTk= +github.com/multiformats/go-multiaddr-fmt v0.1.0 h1:WLEFClPycPkp4fnIzoFoV9FVd49/eQsuaL3/CWe167E= +github.com/multiformats/go-multiaddr-fmt v0.1.0/go.mod h1:hGtDIW4PU4BqJ50gW2quDuPVjyWNZxToGUh/HwTZYJo= +github.com/multiformats/go-multibase v0.1.1 h1:3ASCDsuLX8+j4kx58qnJ4YFq/JWTJpCyDW27ztsVTOI= +github.com/multiformats/go-multibase v0.1.1/go.mod h1:ZEjHE+IsUrgp5mhlEAYjMtZwK1k4haNkcaPg9aoe1a8= +github.com/multiformats/go-multicodec v0.6.0 h1:KhH2kSuCARyuJraYMFxrNO3DqIaYhOdS039kbhgVwpE= +github.com/multiformats/go-multicodec v0.6.0/go.mod h1:GUC8upxSBE4oG+q3kWZRw/+6yC1BqO550bjhWsJbZlw= +github.com/multiformats/go-multihash v0.0.8/go.mod h1:YSLudS+Pi8NHE7o6tb3D8vrpKa63epEDmG8nTduyAew= +github.com/multiformats/go-multihash v0.2.1 h1:aem8ZT0VA2nCHHk7bPJ1BjUbHNciqZC/d16Vve9l108= +github.com/multiformats/go-multihash v0.2.1/go.mod h1:WxoMcYG85AZVQUyRyo9s4wULvW5qrI9vb2Lt6evduFc= +github.com/multiformats/go-multistream v0.3.3 h1:d5PZpjwRgVlbwfdTDjife7XszfZd8KYWfROYFlGcR8o= +github.com/multiformats/go-multistream v0.3.3/go.mod h1:ODRoqamLUsETKS9BNcII4gcRsJBU5VAwRIv7O39cEXg= +github.com/multiformats/go-varint v0.0.1/go.mod h1:3Ls8CIEsrijN6+B7PbrXRPxHRPuXSrVKRY101jdMZYE= +github.com/multiformats/go-varint v0.0.6 h1:gk85QWKxh3TazbLxED/NlDVv8+q+ReFJk7Y2W/KhfNY= +github.com/multiformats/go-varint v0.0.6/go.mod h1:3Ls8CIEsrijN6+B7PbrXRPxHRPuXSrVKRY101jdMZYE= github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= +github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= github.com/nats-io/jwt v0.3.0/go.mod h1:fRYCDE99xlTsqUzISS1Bi75UBJ6ljOJQOAAu5VglpSg= github.com/nats-io/jwt v0.3.2/go.mod h1:/euKqTS1ZD+zzjYrY7pseZrTtWQSjujC7xjPc8wL6eU= github.com/nats-io/nats-server/v2 v2.1.2/go.mod h1:Afk+wRZqkMQs/p45uXdrVLuab3gwv3Z8C4HTBu8GD/k= @@ -445,6 +706,8 @@ github.com/nats-io/nats.go v1.9.1/go.mod h1:ZjDU1L/7fJ09jvUSRVBR2e7+RnLiiIQyqyzE github.com/nats-io/nkeys v0.1.0/go.mod h1:xpnFELMwJABBLVhffcfd1MZx6VsNRFpEugbxziKVo7w= github.com/nats-io/nkeys v0.1.3/go.mod h1:xpnFELMwJABBLVhffcfd1MZx6VsNRFpEugbxziKVo7w= github.com/nats-io/nuid v1.0.1/go.mod h1:19wcPz3Ph3q0Jbyiqsd0kePYG7A95tJPxeL+1OSON2c= +github.com/neelance/astrewrite v0.0.0-20160511093645-99348263ae86/go.mod h1:kHJEU3ofeGjhHklVoIGuVj85JJwZ6kWPaJwCIxgnFmo= +github.com/neelance/sourcemap v0.0.0-20151028013722-8c68805598ab/go.mod h1:Qr6/a/Q4r9LP1IltGz7tA7iOK1WonHEYhu1HRBA7ZiM= github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A= github.com/nxadm/tail v1.4.8/go.mod h1:+ncqLTQzXmGhMZNUePPaPqPvBxHAIsmXswZKocGu+AU= github.com/nxadm/tail v1.4.9-0.20211216163028-4472660a31a6 h1:iZ5rEHU561k2tdi/atkIsrP5/3AX3BjyhYtC96nJ260= @@ -465,21 +728,28 @@ github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1y github.com/onsi/gomega v1.17.0 h1:9Luw4uT5HTjHTN8+aNcSThgH1vdXnmdJ8xIfZ4wyTRE= github.com/onsi/gomega v1.17.0/go.mod h1:HnhC7FXeEQY45zxNK3PPoIUhzk/80Xly9PcubAlGdZY= github.com/op/go-logging v0.0.0-20160315200505-970db520ece7/go.mod h1:HzydrMdWErDVzsI23lYNej1Htcns9BCg93Dk0bBINWk= +github.com/opencontainers/runtime-spec v1.0.2 h1:UfAcuLBJB9Coz72x1hgl8O5RVzTdNiaglX6v2DM6FI0= +github.com/opencontainers/runtime-spec v1.0.2/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= github.com/opentracing-contrib/go-observer v0.0.0-20170622124052-a52f23424492/go.mod h1:Ngi6UdF0k5OKD5t5wlmGhe/EDKPoUM3BXZSSfIuJbis= github.com/opentracing/basictracer-go v1.0.0/go.mod h1:QfBfYuafItcjQuMwinw9GhYKwFXS9KnPs5lxoYwgW74= github.com/opentracing/opentracing-go v1.0.2/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o= github.com/opentracing/opentracing-go v1.1.0/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o= +github.com/opentracing/opentracing-go v1.2.0 h1:uEJPy/1a5RIPAJ0Ov+OIO8OxWu77jEv+1B0VhjKrZUs= +github.com/opentracing/opentracing-go v1.2.0/go.mod h1:GxEUsuufX4nBwe+T+Wl9TAgYrxe9dPLANfrWvHYVTgc= github.com/openzipkin-contrib/zipkin-go-opentracing v0.4.5/go.mod h1:/wsWhb9smxSfWAKL3wpBW7V8scJMt8N8gnaMCS9E/cA= +github.com/openzipkin/zipkin-go v0.1.1/go.mod h1:NtoC/o8u3JlF1lSlyPNswIbeQH9bJTmOf0Erfk+hxe8= github.com/openzipkin/zipkin-go v0.1.6/go.mod h1:QgAqvLzwWbR/WpD4A3cGpPtJrZXNIiJc5AZX7/PBEpw= github.com/openzipkin/zipkin-go v0.2.1/go.mod h1:NaW6tEwdmWMaCDZzg8sh+IBNOxHMPnhQw8ySjnjRyN4= github.com/openzipkin/zipkin-go v0.2.2/go.mod h1:NaW6tEwdmWMaCDZzg8sh+IBNOxHMPnhQw8ySjnjRyN4= github.com/pact-foundation/pact-go v1.0.4/go.mod h1:uExwJY4kCzNPcHRj+hCR/HBbOOIwwtUjcrb0b5/5kLM= github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= +github.com/pbnjay/memory v0.0.0-20210728143218-7b4eea64cf58 h1:onHthvaw9LFnH4t2DcNVpwGmV9E1BkGknEliJkfwQj0= +github.com/pbnjay/memory v0.0.0-20210728143218-7b4eea64cf58/go.mod h1:DXv8WO4yhMYhSNPKjeNKa5WY9YCIEBRbNzFFPJbWO6Y= github.com/pborman/uuid v1.2.0/go.mod h1:X/NO0urCmaxf9VXbdlT7C2Yzkj2IKimNn4k+gtPdI/k= github.com/pelletier/go-toml v1.9.5 h1:4yBQzkHv+7BHq2PQUZF3Mx0IYxG7LsP222s7Agd3ve8= github.com/pelletier/go-toml v1.9.5/go.mod h1:u1nR/EPcESfeI/szUZKdtJ0xRNbUoANCkoOuaOx1Y+c= -github.com/pelletier/go-toml/v2 v2.0.2 h1:+jQXlF3scKIcSEKkdHzXhCTDLPFi5r1wnK6yPS+49Gw= -github.com/pelletier/go-toml/v2 v2.0.2/go.mod h1:MovirKjgVRESsAvNZlAjtFwV867yGuwRkXbG66OzopI= +github.com/pelletier/go-toml/v2 v2.0.5 h1:ipoSadvV8oGUjnUbMub59IDPPwfxF694nG/jwbMiyQg= +github.com/pelletier/go-toml/v2 v2.0.5/go.mod h1:OMHamSCAODeSsVrwwvcJOaoN0LIUIaFVNZzmWyNfXas= github.com/performancecopilot/speed v3.0.0+incompatible/go.mod h1:/CLtqpZ5gBg1M9iaPbIdPPGyKcA8hKdoy6hAWba7Yac= github.com/philhofer/fwd v1.0.0/go.mod h1:gk3iGcWd9+svBvR0sR+KPcfE+RNWozjowpeBVG3ZVNU= github.com/pierrec/lz4 v1.0.2-0.20190131084431-473cd7ce01a1/go.mod h1:3/3N9NVKO0jef7pBehbT1qWhCMrIgbYNnFAZCqQ5LRc= @@ -532,28 +802,52 @@ github.com/pkg/profile v1.2.1/go.mod h1:hJw3o1OdXxsrSjjVksARp5W95eeEaEfptyVZyv6J github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/posener/complete v1.1.1/go.mod h1:em0nMJCgc9GFtwrmVmEMR/ZL6WyhyjMBndrE9hABlRI= +github.com/prometheus/client_golang v0.8.0/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= github.com/prometheus/client_golang v0.9.3-0.20190127221311-3c4408c8b829/go.mod h1:p2iRAGwDERtqlqzRXnrOVns+ignqQo//hLXqYxZYVNs= github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo= github.com/prometheus/client_golang v1.3.0/go.mod h1:hJaj2vgQTGQmVCsAACORcieXFeDPbaTKGT+JTgUa3og= github.com/prometheus/client_golang v1.5.1/go.mod h1:e9GMxYsXl05ICDXkRhurwBS4Q3OK1iX/F2sw+iXX5zU= +github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M= +github.com/prometheus/client_golang v1.11.0/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqroYurpAkEiz0P2BEV0= +github.com/prometheus/client_golang v1.12.1/go.mod h1:3Z9XVyYiZYEO+YQWt3RD2R3jrbd179Rt297l4aS6nDY= +github.com/prometheus/client_golang v1.13.0 h1:b71QUfeo5M8gq2+evJdTPfZhYMAU0uKPkyPJ7TPsloU= +github.com/prometheus/client_golang v1.13.0/go.mod h1:vTeo+zgvILHsnnj/39Ou/1fPN5nJFOEMgftOUOmlvYQ= github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= github.com/prometheus/client_model v0.0.0-20190115171406-56726106282f/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/client_model v0.1.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/prometheus/client_model v0.2.0 h1:uq5h0d+GuxiXLJLNABMgp2qUWDPiLvgCzz2dUR+/W/M= github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/prometheus/common v0.0.0-20180801064454-c7de2306084e/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= github.com/prometheus/common v0.2.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= github.com/prometheus/common v0.7.0/go.mod h1:DjGbpBbp5NYNiECxcL/VnbXCCaQpKd3tt26CguLLsqA= github.com/prometheus/common v0.9.1/go.mod h1:yhUN8i9wzaXS3w1O07YhxHEBxD+W35wd8bs7vj7HSQ4= +github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo= +github.com/prometheus/common v0.26.0/go.mod h1:M7rCNAaPfAosfx8veZJCuw84e35h3Cfd9VFqTh1DIvc= +github.com/prometheus/common v0.32.1/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+M/gUGO4Hls= +github.com/prometheus/common v0.37.0 h1:ccBbHCgIiT9uSoFY0vX8H3zsNR5eLt17/RQLUvn8pXE= +github.com/prometheus/common v0.37.0/go.mod h1:phzohg0JFMnBEFGxTDbfu3QyL5GI8gTQJFhYO5B3mfA= +github.com/prometheus/procfs v0.0.0-20180725123919-05ee40e3a273/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= github.com/prometheus/procfs v0.0.0-20190117184657-bf6a532e95b1/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= github.com/prometheus/procfs v0.0.8/go.mod h1:7Qr8sr6344vo1JqZ6HhLceV9o3AJ1Ff+GxbHq6oeK9A= github.com/prometheus/procfs v0.0.11/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU= +github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU= +github.com/prometheus/procfs v0.6.0/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA= +github.com/prometheus/procfs v0.7.3/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA= +github.com/prometheus/procfs v0.8.0 h1:ODq8ZFEaYeCaZOJlZZdJA2AbQR98dSHSM1KW/You5mo= +github.com/prometheus/procfs v0.8.0/go.mod h1:z7EfXMXOkbkqb9IINtpCn86r/to3BnA0uaxHdg830/4= +github.com/prysmaticlabs/go-bitfield v0.0.0-20210809151128-385d8c5e3fb7 h1:0tVE4tdWQK9ZpYygoV7+vS6QkDvQVySboMVEIxBJmXw= +github.com/prysmaticlabs/go-bitfield v0.0.0-20210809151128-385d8c5e3fb7/go.mod h1:wmuf/mdK4VMD+jA9ThwcUKjg3a2XWM9cVfFYjDyY4j4= +github.com/prysmaticlabs/gohashtree v0.0.1-alpha.0.20220714111606-acbb2962fb48 h1:cSo6/vk8YpvkLbk9v3FO97cakNmUoxwi2KMP8hd5WIw= github.com/quasilyte/go-ruleguard/dsl v0.3.21 h1:vNkC6fC6qMLzCOGbnIHOd5ixUGgTbp3Z4fGnUgULlDA= github.com/quasilyte/go-ruleguard/dsl v0.3.21/go.mod h1:KeCP03KrjuSO0H1kTuZQCWlQPulDV6YMIXmpQss17rU= +github.com/raulk/go-watchdog v1.3.0 h1:oUmdlHxdkXRJlwfG0O9omj8ukerm8MEQavSiDTEtBsk= +github.com/raulk/go-watchdog v1.3.0/go.mod h1:fIvOnLbF0b0ZwkB9YU4mOW9Did//4vPZtDqv66NfsMU= github.com/rcrowley/go-metrics v0.0.0-20181016184325-3113b8401b8a/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4= github.com/remyoudompheng/bigfft v0.0.0-20200410134404-eec4a21b6bb0 h1:OdAsTTz6OkFY5QxjkYwrChwuRruF69c169dPK26NUlk= github.com/remyoudompheng/bigfft v0.0.0-20200410134404-eec4a21b6bb0/go.mod h1:qqbHyh8v60DhA7CoWK5oRCqLrMHRGoxYCSS9EjAz6Eo= @@ -568,6 +862,7 @@ github.com/rs/cors v1.8.2 h1:KCooALfAYGs415Cwu5ABvv9n9509fSiG5SQJn/AQo4U= github.com/rs/cors v1.8.2/go.mod h1:XyqrcTp5zjWr1wsJ8PIRZssZ8b/WMcMf71DJnit4EMU= github.com/rs/dnscache v0.0.0-20211102005908-e0241e321417 h1:Lt9DzQALzHoDwMBGJ6v8ObDPR0dzr2a6sXTB1Fq7IHs= github.com/rs/dnscache v0.0.0-20211102005908-e0241e321417/go.mod h1:qe5TWALJ8/a1Lqznoc5BDHpYX/8HU60Hm2AwRmqzxqA= +github.com/russross/blackfriday v1.5.2/go.mod h1:JO/DiYxRf+HjHt06OyowR9PTA263kcR/rfWxYHBV53g= github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= github.com/russross/blackfriday/v2 v2.1.0 h1:JIOH55/0cWyOuilr9/qlrm0BSXldqnqwMsf35Ld67mk= github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= @@ -577,9 +872,34 @@ github.com/ryszard/goskiplist v0.0.0-20150312221310-2dfbae5fcf46/go.mod h1:uAQ5P github.com/samuel/go-zookeeper v0.0.0-20190923202752-2cc03de413da/go.mod h1:gi+0XIa01GRL2eRQVjQkKGqKF3SF9vZR/HnPullcV2E= github.com/sclevine/agouti v3.0.0+incompatible/go.mod h1:b4WX9W9L1sfQKXeJf1mUTLZKJ48R1S7H23Ji7oFO5Bw= github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg65j358z/aeFdxmN0P9QXhEzd20vsDc= +github.com/sergi/go-diff v1.0.0/go.mod h1:0CfEIISq7TuYL3j771MWULgwwjU+GofnZX9QAmXWZgo= +github.com/shurcooL/component v0.0.0-20170202220835-f88ec8f54cc4/go.mod h1:XhFIlyj5a1fBNx5aJTbKoIq0mNaPvOagO+HjB3EtxrY= +github.com/shurcooL/events v0.0.0-20181021180414-410e4ca65f48/go.mod h1:5u70Mqkb5O5cxEA8nxTsgrgLehJeAw6Oc4Ab1c/P1HM= +github.com/shurcooL/github_flavored_markdown v0.0.0-20181002035957-2122de532470/go.mod h1:2dOwnU2uBioM+SGy2aZoq1f/Sd1l9OkAeAUvjSyvgU0= +github.com/shurcooL/go v0.0.0-20180423040247-9e1955d9fb6e/go.mod h1:TDJrrUr11Vxrven61rcy3hJMUqaf/CLWYhHNPmT14Lk= +github.com/shurcooL/go-goon v0.0.0-20170922171312-37c2f522c041/go.mod h1:N5mDOmsrJOB+vfqUK+7DmDyjhSLIIBnXo9lvZJj3MWQ= +github.com/shurcooL/gofontwoff v0.0.0-20180329035133-29b52fc0a18d/go.mod h1:05UtEgK5zq39gLST6uB0cf3NEHjETfB4Fgr3Gx5R9Vw= +github.com/shurcooL/gopherjslib v0.0.0-20160914041154-feb6d3990c2c/go.mod h1:8d3azKNyqcHP1GaQE/c6dDgjkgSx2BZ4IoEi4F1reUI= +github.com/shurcooL/highlight_diff v0.0.0-20170515013008-09bb4053de1b/go.mod h1:ZpfEhSmds4ytuByIcDnOLkTHGUI6KNqRNPDLHDk+mUU= +github.com/shurcooL/highlight_go v0.0.0-20181028180052-98c3abbbae20/go.mod h1:UDKB5a1T23gOMUJrI+uSuH0VRDStOiUVSjBTRDVBVag= +github.com/shurcooL/home v0.0.0-20181020052607-80b7ffcb30f9/go.mod h1:+rgNQw2P9ARFAs37qieuu7ohDNQ3gds9msbT2yn85sg= +github.com/shurcooL/htmlg v0.0.0-20170918183704-d01228ac9e50/go.mod h1:zPn1wHpTIePGnXSHpsVPWEktKXHr6+SS6x/IKRb7cpw= +github.com/shurcooL/httperror v0.0.0-20170206035902-86b7830d14cc/go.mod h1:aYMfkZ6DWSJPJ6c4Wwz3QtW22G7mf/PEgaB9k/ik5+Y= +github.com/shurcooL/httpfs v0.0.0-20171119174359-809beceb2371/go.mod h1:ZY1cvUeJuFPAdZ/B6v7RHavJWZn2YPVFQ1OSXhCGOkg= +github.com/shurcooL/httpgzip v0.0.0-20180522190206-b1c53ac65af9/go.mod h1:919LwcH0M7/W4fcZ0/jy0qGght1GIhqyS/EgWGH2j5Q= +github.com/shurcooL/issues v0.0.0-20181008053335-6292fdc1e191/go.mod h1:e2qWDig5bLteJ4fwvDAc2NHzqFEthkqn7aOZAOpj+PQ= +github.com/shurcooL/issuesapp v0.0.0-20180602232740-048589ce2241/go.mod h1:NPpHK2TI7iSaM0buivtFUc9offApnI0Alt/K8hcHy0I= +github.com/shurcooL/notifications v0.0.0-20181007000457-627ab5aea122/go.mod h1:b5uSkrEVM1jQUspwbixRBhaIjIzL2xazXp6kntxYle0= +github.com/shurcooL/octicon v0.0.0-20181028054416-fa4f57f9efb2/go.mod h1:eWdoE5JD4R5UVWDucdOPg1g2fqQRq78IQa9zlOV1vpQ= +github.com/shurcooL/reactions v0.0.0-20181006231557-f2e0b4ca5b82/go.mod h1:TCR1lToEk4d2s07G3XGfz2QrgHXg4RJBvjrOozvoWfk= +github.com/shurcooL/sanitized_anchor_name v0.0.0-20170918181015-86672fcb3f95/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc= github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc= +github.com/shurcooL/users v0.0.0-20180125191416-49c67e49c537/go.mod h1:QJTqeLYEDaXHZDBsXlPCDqdhQuJkuw4NOtaxYe3xii4= +github.com/shurcooL/webdavfs v0.0.0-20170829043945-18c3829fa133/go.mod h1:hKmq5kWdCj2z2KEozexVbfEZIWiTjhE0+UjmZgPqehw= github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= +github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88= +github.com/sirupsen/logrus v1.7.0/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0= github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc= github.com/smartystreets/assertions v0.0.0-20190215210624-980c5ac6f3ac/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc= github.com/smartystreets/goconvey v0.0.0-20181108003508-044398e4856c/go.mod h1:XDJAKZRPZ1CvBcN2aX5YOUTYGHki24fSF0Iv48Ibg0s= @@ -587,6 +907,11 @@ github.com/smartystreets/goconvey v0.0.0-20190306220146-200a235640ff/go.mod h1:K github.com/smartystreets/goconvey v1.6.4/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA= github.com/soheilhy/cmux v0.1.4/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4kGIyLM= github.com/sony/gobreaker v0.4.1/go.mod h1:ZKptC7FHNvhBz7dN2LGjPVBz2sZJmc0/PkyDJOjmxWY= +github.com/sourcegraph/annotate v0.0.0-20160123013949-f4cad6c6324d/go.mod h1:UdhH50NIW0fCiwBSr0co2m7BnFLdv4fQTgdqdJTHFeE= +github.com/sourcegraph/syntaxhighlight v0.0.0-20170531221838-bd320f5d308e/go.mod h1:HuIsMU8RRBOtsCgI77wP899iHVBQpCmg4ErYMZB+2IA= +github.com/spacemonkeygo/spacelog v0.0.0-20180420211403-2296661a0572 h1:RC6RW7j+1+HkWaX/Yh71Ee5ZHaHYt7ZP4sQgUrm6cDU= +github.com/spacemonkeygo/spacelog v0.0.0-20180420211403-2296661a0572/go.mod h1:w0SWMsp6j9O/dk4/ZpIhL+3CkG8ofA2vuv7k+ltqUMc= +github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= github.com/spaolacci/murmur3 v1.1.0 h1:7c1g84S4BPRrfL5Xrdp6fOJ206sU9y293DDHaoy0bLI= github.com/spaolacci/murmur3 v1.1.0/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= github.com/spf13/cobra v0.0.3/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ= @@ -610,9 +935,9 @@ github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5 github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= -github.com/stretchr/testify v1.7.2/go.mod h1:R6va5+xMeoiuVRoj+gSkQ7d3FALtqAAGI1FQKckRals= github.com/stretchr/testify v1.8.0 h1:pSgiaMZlXftHpm5L7V1+rVB+AZJydKsMxsQBIJw4PKk= github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= +github.com/tarm/serial v0.0.0-20180830185346-98f6abe2eb07/go.mod h1:kDXzergiv9cbyO7IOYJZWg1U88JhDg3PB6klq9Hg2pA= github.com/tendermint/go-amino v0.14.1 h1:o2WudxNfdLNBwMyl2dqOJxiro5rfrEaU0Ugs6offJMk= github.com/tendermint/go-amino v0.14.1/go.mod h1:i/UKE5Uocn+argJJBb12qTZsCDBcAYMbR92AaJVmKso= github.com/tendermint/tendermint v0.31.11 h1:TIs//4WfEAG4TOZc2eUfJPI3T8KrywXQCCPnGAaM1Wo= @@ -623,8 +948,8 @@ github.com/tinylib/msgp v1.0.2/go.mod h1:+d+yLhGm8mzTaHzB+wgMYrodPfmZrzkirds8fDW github.com/tinylib/msgp v1.1.0/go.mod h1:+d+yLhGm8mzTaHzB+wgMYrodPfmZrzkirds8fDWklFE= github.com/tinylib/msgp v1.1.2/go.mod h1:+d+yLhGm8mzTaHzB+wgMYrodPfmZrzkirds8fDWklFE= github.com/tmc/grpc-websocket-proxy v0.0.0-20170815181823-89b8d40f7ca8/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= -github.com/torquem-ch/mdbx-go v0.25.1-0.20220720103744-b96489e94ece h1:jwLF5BKBWPb00kMfRmSHJl0Hwe52HonOVpNkBJZR+XI= -github.com/torquem-ch/mdbx-go v0.25.1-0.20220720103744-b96489e94ece/go.mod h1:T2fsoJDVppxfAPTLd1svUgH1kpPmeXdPESmroSHcL1E= +github.com/torquem-ch/mdbx-go v0.26.0 h1:d8ph2MsVZoBZr0eFWHRiSYjoCXggED6XzcspUX/HsZM= +github.com/torquem-ch/mdbx-go v0.26.0/go.mod h1:T2fsoJDVppxfAPTLd1svUgH1kpPmeXdPESmroSHcL1E= github.com/ugorji/go v1.1.13/go.mod h1:jxau1n+/wyTGLQoCkjok9r5zFa/FxT6eI5HiHKQszjc= github.com/ugorji/go/codec v1.1.13 h1:013LbFhocBoIqgHeIHKlV4JWYhqogATYWZhIcH0WHn4= github.com/ugorji/go/codec v1.1.13/go.mod h1:oNVt3Dq+FO91WNQ/9JnHKQP2QJxTzoN7wCBFCq1OeuU= @@ -632,6 +957,7 @@ github.com/ugorji/go/codec/codecgen v1.1.13 h1:rGpZ4Q63VcWA3DMBbIHvg+SQweUkfXBBa github.com/ugorji/go/codec/codecgen v1.1.13/go.mod h1:EhCxlc7Crov+HLygD4+hBCitXNrrGKRrRWj+pRsyJGg= github.com/urfave/cli v1.20.0/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijbERA= github.com/urfave/cli v1.22.1/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0= +github.com/urfave/cli v1.22.2/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0= github.com/urfave/cli v1.22.9 h1:cv3/KhXGBGjEXLC4bH0sLuJ9BewaAbpk5oyMOveu4pw= github.com/urfave/cli v1.22.9/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0= github.com/valyala/fastjson v1.6.3 h1:tAKFnnwmeMGPbwJ7IwxcTPCNr3uIzoIj3/Fh90ra4xc= @@ -640,41 +966,75 @@ github.com/valyala/fastrand v1.1.0 h1:f+5HkLW4rsgzdNoleUOB69hyT9IlD2ZQh9GyDMfb5G github.com/valyala/fastrand v1.1.0/go.mod h1:HWqCzkrkg6QXT8V2EXWvXCoow7vLwOFN002oeRzjapQ= github.com/valyala/histogram v1.2.0 h1:wyYGAZZt3CpwUiIb9AU/Zbllg1llXyrtApRS815OLoQ= github.com/valyala/histogram v1.2.0/go.mod h1:Hb4kBwb4UxsaNbbbh+RRz8ZR6pdodR57tzWUS3BUzXY= +github.com/viant/assertly v0.4.8/go.mod h1:aGifi++jvCrUaklKEKT0BU95igDNaqkvz+49uaYMPRU= +github.com/viant/toolbox v0.24.0/go.mod h1:OxMCG57V0PXuIP2HNQrtJf2CjqdmbrOx5EkMILuUhzM= +github.com/whyrusleeping/timecache v0.0.0-20160911033111-cfcb2f1abfee h1:lYbXeSvJi5zk5GLKVuid9TVjS9a0OmLIDKTfoZBL6Ow= +github.com/whyrusleeping/timecache v0.0.0-20160911033111-cfcb2f1abfee/go.mod h1:m2aV4LZI4Aez7dP5PMyVKEHhUyEJ/RjmPEDOpDvudHg= github.com/willf/bitset v1.1.9/go.mod h1:RjeCKbqT1RxIR/KWY6phxZiaY1IyutSBfGjNPySAYV4= github.com/willf/bitset v1.1.10/go.mod h1:RjeCKbqT1RxIR/KWY6phxZiaY1IyutSBfGjNPySAYV4= github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU= github.com/xsleonard/go-merkle v1.1.0 h1:fHe1fuhJjGH22ZzVTAH0jqHLhTGhOq3wQjJN+8P0jQg= github.com/xsleonard/go-merkle v1.1.0/go.mod h1:cW4z+UZ/4f2n9IJgIiyDCdYguchoDyDAPmpuOWGxdGg= +github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= go.etcd.io/bbolt v1.3.3/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= go.etcd.io/bbolt v1.3.6 h1:/ecaJf0sk1l4l6V4awd65v2C3ILy7MSj+s/x1ADCIMU= go.etcd.io/bbolt v1.3.6/go.mod h1:qXsaaIqmgQH0T+OPdb99Bf+PKfBBQVAdyD6TY9G8XM4= go.etcd.io/etcd v0.0.0-20191023171146-3cf2f69b5738/go.mod h1:dnLIgRNXwCJa5e+c6mIZCrds/GIG4ncV9HhK5PX7jPg= +go.opencensus.io v0.18.0/go.mod h1:vKdFvxhtzZ9onBp9VKHK8z/sRpBMnKAsufL7wlDrCOA= go.opencensus.io v0.20.1/go.mod h1:6WKK9ahsWS3RSO+PY9ZHZUfv2irvY6gN279GOPZjmmk= go.opencensus.io v0.20.2/go.mod h1:6WKK9ahsWS3RSO+PY9ZHZUfv2irvY6gN279GOPZjmmk= +go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= +go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8= go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= +go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= +go.opentelemetry.io/otel v1.8.0 h1:zcvBFizPbpa1q7FehvFiHbQwGzmPILebO0tyqIR5Djg= +go.opentelemetry.io/otel v1.8.0/go.mod h1:2pkj+iMj0o03Y+cW6/m8Y4WkRdYN3AvCXCnzRMp9yvM= +go.opentelemetry.io/otel/trace v1.8.0 h1:cSy0DF9eGI5WIfNwZ1q2iUyGj00tGzP24dE1lOlHrfY= +go.opentelemetry.io/otel/trace v1.8.0/go.mod h1:0Bt3PXY8w+3pheS3hQUt+wow8b1ojPaTBoTCh2zIFI4= go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI= go.uber.org/atomic v1.3.2/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= go.uber.org/atomic v1.5.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ= -go.uber.org/atomic v1.9.0 h1:ECmE8Bn/WFTYwEW/bpKD3M8VtR/zQVbavAoalC1PYyE= -go.uber.org/atomic v1.9.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= +go.uber.org/atomic v1.6.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ= +go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= +go.uber.org/atomic v1.10.0 h1:9qC72Qh0+3MqyJbAn8YU5xVq1frD8bn3JtD2oXtafVQ= +go.uber.org/atomic v1.10.0/go.mod h1:LUxbIzbOniOlMKjJjyPfpl4v+PKK2cNJn91OQbhoJI0= +go.uber.org/goleak v1.1.11-0.20210813005559-691160354723/go.mod h1:cwTWslyiVhfpKIDGSZEM2HlOvcqm+tG4zioyIeLoqMQ= +go.uber.org/goleak v1.1.12 h1:gZAh5/EyT/HQwlpkCy6wTpqfH9H8Lz8zbm3dZh+OyzA= go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0= go.uber.org/multierr v1.3.0/go.mod h1:VgVr7evmIr6uPjLBxg28wmKNXyqE9akIJ5XnfpiKl+4= +go.uber.org/multierr v1.5.0/go.mod h1:FeouvMocqHpRaaGuG9EjoKcStLC43Zu/fmqdUMPcKYU= +go.uber.org/multierr v1.6.0/go.mod h1:cdWPpRnG4AhwMwsgIHip0KRBQjJy5kYEpYjJxpXp9iU= +go.uber.org/multierr v1.8.0 h1:dg6GjLku4EH+249NNmoIciG9N/jURbDG+pFlTkhzIC8= +go.uber.org/multierr v1.8.0/go.mod h1:7EAYxJLBy9rStEaz58O2t4Uvip6FSURkq8/ppBp95ak= go.uber.org/tools v0.0.0-20190618225709-2cfd321de3ee/go.mod h1:vJERXedbb3MVM5f9Ejo0C68/HhF8uaILCdgjnY+goOA= go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= go.uber.org/zap v1.13.0/go.mod h1:zwrFLgMcdUuIBviXEYEH1YKNaOBnKXsx2IPda5bBwHM= +go.uber.org/zap v1.16.0/go.mod h1:MA8QOfq0BHJwdXa996Y4dYkAqRKB8/1K1QMMZVaNZjQ= +go.uber.org/zap v1.19.1/go.mod h1:j3DNczoxDZroyBnOT1L/Q79cfUMGZxlv/9dzN7SM1rI= +go.uber.org/zap v1.23.0 h1:OjGQ5KQDEUawVHxNwQgPpiypGHOxo2mNZsOqTak4fFY= +go.uber.org/zap v1.23.0/go.mod h1:D+nX8jyLsMHMYrln8A0rJjFt/T/9/bGgIhAqxv5URuY= +go4.org v0.0.0-20180809161055-417644f6feb5/go.mod h1:MkTOUMDaeVYJUOUsaDXIhWPZYa1yOyC1qaOBpL57BhE= +golang.org/x/build v0.0.0-20190111050920-041ab4dc3f9d/go.mod h1:OWs+y06UdEOHN4y+MfF/py+xQ/tYqIWW03b70/CG9Rw= golang.org/x/crypto v0.0.0-20170930174604-9419663f5a44/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20181029021203-45a5f77698d3/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.0.0-20181030102418-4d3f4d9ffa16/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20190313024323-a1f597ede03a/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20190611184440-5c40567a22f8/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190701094942-4def268fd1a4/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20200115085410-6d4e4cb37c7d/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200510223506-06a226fb4e37/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20200602180216-279210d13fed/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20210322153248-0c34fe9e7dc2/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4= golang.org/x/crypto v0.0.0-20220131195533-30dcbda58838/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= @@ -683,25 +1043,50 @@ golang.org/x/crypto v0.0.0-20220516162934-403b01795ae8/go.mod h1:IxCIyHEi3zRg3s0 golang.org/x/crypto v0.0.0-20220622213112-05595931fe9d h1:sK3txAijHtOK88l68nt020reeT1ZdKLIYetKl95FzVY= golang.org/x/crypto v0.0.0-20220622213112-05595931fe9d/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= -golang.org/x/exp v0.0.0-20220722155223-a9213eeb770e h1:+WEEuIdZHnUeJJmEUjyYC2gfUMj69yZXw17EnHg/otA= -golang.org/x/exp v0.0.0-20220722155223-a9213eeb770e/go.mod h1:Kr81I6Kryrl9sr8s2FK3vxD90NdsKWRuOIl2O4CvYbA= +golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= +golang.org/x/exp v0.0.0-20190829153037-c13cbed26979/go.mod h1:86+5VVa7VpoJ4kLfm080zCjGlMRFzhUhsZKEZO7MGek= +golang.org/x/exp v0.0.0-20191030013958-a1ab85dbe136/go.mod h1:JXzH8nQsPlswgeRAPE3MuO9GYsAcnJvJ4vnMwN/5qkY= +golang.org/x/exp v0.0.0-20191129062945-2f5052295587/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= +golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= +golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= +golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM= +golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU= +golang.org/x/exp v0.0.0-20220921164117-439092de6870 h1:j8b6j9gzSigH28O5SjSpQSSh9lFd6f5D/q0aHjNTulc= +golang.org/x/exp v0.0.0-20220921164117-439092de6870/go.mod h1:cyybsKvd6eL0RnXn6p/Grxp8F5bW7iYuBgsNCOHpMYE= +golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js= +golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= +golang.org/x/lint v0.0.0-20180702182130-06c8688daad7/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20190409202823-959b441ac422/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20190909230951-414d861bb4ac/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20191125180803-fdd1cda4f05f/go.mod h1:5qLYkcX4OjUUV8bRuDixDT3tpyyb+LUpUlRWLxfhWrs= +golang.org/x/lint v0.0.0-20200130185559-910be7a94367/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= +golang.org/x/lint v0.0.0-20200302205851-738671d3881b/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= +golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE= +golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028/go.mod h1:E/iHnbuqvinMTCcRqshq8CkpyQDoeVncDDYHnLhea+o= golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc= +golang.org/x/mod v0.1.0/go.mod h1:0QHyrYULN0/3qlju5TqG8bIK38QM8yzMo5ekMj3DlcY= golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= +golang.org/x/mod v0.1.1-0.20191107180719-034126e5016b/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= -golang.org/x/mod v0.6.0-dev.0.20220106191415-9b9b3d81d5e3 h1:kQgndtyPBW/JIYERgdxfwMYh3AVStj88WQTlNDi2a+o= -golang.org/x/mod v0.6.0-dev.0.20220106191415-9b9b3d81d5e3/go.mod h1:3p9vT2HGsQu2K1YbXdKPJLVgG5VJdoTa1poYQBtP1AY= +golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4 h1:6zppjxzCulZykYSLyVDYbneBfbaBIQPYMevg0bEwv2s= +golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= golang.org/x/net v0.0.0-20180524181706-dfa909b99c79/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180719180050-a680a1efc54d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181011144130-49bb7cea24b1/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20181023162649-9b4f9f5ad519/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181029044818-c44066c5c816/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181106065722-10aee1819953/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20181201002055-351d144fa1fc/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20181220203305-927f97764cc3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= @@ -709,149 +1094,319 @@ golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73r golang.org/x/net v0.0.0-20190125091013-d26f9f9a57f3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190313220215-9f648a60d977/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190501004415-9ce7a6920f09/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190628185345-da137c7871d7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190724013045-ca1201d0de80/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190813141303-74dc4d7220e7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200114155413-6afb5195e5aa/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200202094626-16171245cfb2/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200222125558-5a598a2470a0/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200301022130-244492dfa37a/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200324143707-d3edc9973b7e/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200501053045-e0ff5e5a1de5/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200506145744-7e3656a0809f/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200513185701-a91f0712d120/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= golang.org/x/net v0.0.0-20200520004742-59133d7f0dd7/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200520182314-0ba52f642ac2/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= +golang.org/x/net v0.0.0-20200707034311-ab3426394381/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20201201195509-5d6afe98e0b7/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20210119194325-5f4716e94777/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= +golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= +golang.org/x/net v0.0.0-20210423184538-5f58ad60dda6/go.mod h1:OJAsFXCWl8Ukc7SiCT/9KSuxbyM7479/AVlXFRxuMCk= golang.org/x/net v0.0.0-20210428140749-89ef3d95e781/go.mod h1:OJAsFXCWl8Ukc7SiCT/9KSuxbyM7479/AVlXFRxuMCk= +golang.org/x/net v0.0.0-20210525063256-abc453219eb5/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20210726213435-c6fcb2dbf985/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20211201190559-0a0e4e1bb54c/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= +golang.org/x/net v0.0.0-20220225172249-27dd8689420f/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= golang.org/x/net v0.0.0-20220401154927-543a649e0bdd/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= golang.org/x/net v0.0.0-20220425223048-2871e0cb64e4/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= golang.org/x/net v0.0.0-20220531201128-c960675eff93/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= -golang.org/x/net v0.0.0-20220630215102-69896b714898 h1:K7wO6V1IrczY9QOQ2WkVpw4JQSwCd52UsxVEirZUfiw= -golang.org/x/net v0.0.0-20220630215102-69896b714898/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= +golang.org/x/net v0.0.0-20220920183852-bf014ff85ad5 h1:KafLifaRFIuSJ5C+7CyFJOF9haxKNC1CEIDk8GX6X0k= +golang.org/x/net v0.0.0-20220920183852-bf014ff85ad5/go.mod h1:YDH+HFinaLZZlnHAfSS6ZXJJ9M9t4Dl22yv3iI2vPwk= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= +golang.org/x/oauth2 v0.0.0-20181017192945-9dcd33a902f4/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= +golang.org/x/oauth2 v0.0.0-20181203162652-d668ce993890/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.0.0-20210514164344-f6687ab2804c/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20220223155221-ee480838109b/go.mod h1:DAh4E804XQdzx2j+YRIaUnCqCV2RuMz24cGBJ5QYIrc= +golang.org/x/perf v0.0.0-20180704124530-6e6d33e29852/go.mod h1:JLpeXjPJfIyPr5TlbXLkXWLhP8nz10XfvxElABhCtcw= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4 h1:uVc8UZUe6tr40fFVnUP5Oj+veunVezqYl9z7DYw9xzw= golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sys v0.0.0-20180810173357-98c5dad5d1a0/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20181026203630-95b1ffbd15a5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20181029174526-d69651ed3497/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20181107165924-66b7b1311ac8/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20181122145206-62eef0e2fa9b/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190228124157-a34e9553db1e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190316082340-a2f829d7f35f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190405154228-4b34438f7a67/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190826190057-c7b8b68b1456/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190904154756-749cb33beabd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191001151750-bb3f8db39f24/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191120155948-bd437916bb0e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191220142924-d4481acd189f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191228213918-04cbcbbfeed8/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200106162015-b016eb3dc98e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200113162924-86b910548bc1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200116001909-b77594299b42/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200124204421-9fbb57f87de9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200212091648-12a6c2dcc1e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200302150141-5c8b2ff67527/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200331124033-c3d80250170d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200413165638-669c56c373c4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200501052902-10377860bb8e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200511232937-7e40ca221e25/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200515095857-1151b9dac4a9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200523222454-059865788121/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200602225109-6fdc65e7d980/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200615200032-f1bc736245b1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200625212154-ddb9806d33ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200803210538-64077c9b5642/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200923182605-d9f96fdee20d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210112080510-489259a85091/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210119212857-b64e53b001e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210326220804-49726bf1d181/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210303074136-134d130e1a04/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210420205809-ac73e9fd8988/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210426080607-c94f62235c83/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210603081109-ebe580a85c40/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210927094055-39ccf1dd6fa6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20211007075335-d3039528d8ac/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20211020174200-9d6173849985/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20211030160813-b3129d9d1021/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220114195835-da31bd327af9/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220405052023-b1e9470b6e64/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220412211240-33da011f77ad/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220608164250-635b8c9b7f68/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f h1:v4INt8xihDGvnrfjMDVXGxw9wrfxYyCjk0KbXjhR55s= -golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220704084225-05e143d24a9e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220919091848-fb04ddd9f9c8 h1:h+EGohizhe9XlX18rfpa8k8RAc5XyaeamM+0VHRd4lc= +golang.org/x/sys v0.0.0-20220919091848-fb04ddd9f9c8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= +golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.7 h1:olpwvP2KacW1ZWvsR7uQhoyTYvKAupfQrRGBFM352Gk= golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20220609170525-579cf78fd858 h1:Dpdu/EMxGMFgq0CeYMh4fazTD2vtlZRYE7wyynxJb9U= golang.org/x/time v0.0.0-20220609170525-579cf78fd858/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20180828015842-6cd1fcedba52/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20181030000716-a0a13e073c7b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20181030221726-6c7e314b6563/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190312151545-0bb0c0a6e846/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= golang.org/x/tools v0.0.0-20190328211700-ab21143f2384/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190425150028-36563e24a262/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/tools v0.0.0-20190506145303-2d16b83fe98c/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/tools v0.0.0-20190606124116-d0a3d012864b/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191029041327-9cc4af7d6b2c/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191029190741-b9c20aec41a5/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191113191852-77e3bb0ad9e7/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191115202509-3a792d9c32b2/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191125144606-a911d9008d1f/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191130070609-6e064ea0cf2d/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191216173652-a0e659d51361/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20191227053925-7b8e75db28f4/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= golang.org/x/tools v0.0.0-20200103221440-774c71fcf114/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200117161641-43d50277825c/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200122220014-bf1340f18c4a/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200130002326-2f3ba24bd6e7/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200204074204-1cc6d1ef6c74/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200207183749-b753a1ba74fa/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200212150539-ea181f53ac56/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200224181240-023911ca70b2/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200227222343-706bc42d1f0d/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200304193943-95d2e580d8eb/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw= +golang.org/x/tools v0.0.0-20200312045724-11d5b4c81c7d/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw= +golang.org/x/tools v0.0.0-20200331025713-a30bf2db82d4/go.mod h1:Sl4aGygMT6LrqrWclx+PTx3U+LnKx/seiNR+3G19Ar8= +golang.org/x/tools v0.0.0-20200501065659-ab2804fb9c9d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200512131952-2bc93b1c0c88/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200515010526-7d3b6ebf133d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200618134242-20370b0cb4b2/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200729194436-6467de6f59a7/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= +golang.org/x/tools v0.0.0-20200804011535-6c149bb5ef0d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= +golang.org/x/tools v0.0.0-20200825202427-b303f430e36d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= golang.org/x/tools v0.0.0-20201124115921-2c860bdd6e78/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.0.0-20201224043029-2b0845dc783e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.1.0/go.mod h1:xkSsbof2nBLbhDlRMhhhyNLN/zl3eTqcnHD5viDpcZ0= -golang.org/x/tools v0.1.10 h1:QjFRCZxdOhBJ/UNgnBZLbNV13DlbnK0quyivTnXJM20= -golang.org/x/tools v0.1.10/go.mod h1:Uh6Zz+xoGYZom868N8YTex3t7RhtHDBrE8Gzo9bV56E= +golang.org/x/tools v0.1.1/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= +golang.org/x/tools v0.1.5/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= +golang.org/x/tools v0.1.6-0.20210726203631-07bc1bf47fb2/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= +golang.org/x/tools v0.1.12 h1:VveCTK38A2rkS8ZqFY25HIDFscX5X9OoEhJd3quQmXU= +golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -golang.org/x/xerrors v0.0.0-20220609144429-65e65417b02f h1:uF6paiQQebLeSXkrTqHqz0MXhXXS1KgF41eUdBNvxK0= -golang.org/x/xerrors v0.0.0-20220609144429-65e65417b02f/go.mod h1:K8+ghG5WaK9qNqU5K3HdILfMLy1f3aNYFI/wnl100a8= +google.golang.org/api v0.0.0-20180910000450-7ca32eb868bf/go.mod h1:4mhQ8q/RsB7i+udVvVy5NUi08OU8ZlA0gRVgrF7VFY0= +google.golang.org/api v0.0.0-20181030000543-1d582fd0359e/go.mod h1:4mhQ8q/RsB7i+udVvVy5NUi08OU8ZlA0gRVgrF7VFY0= +google.golang.org/api v0.1.0/go.mod h1:UGEZY7KEX120AnNLIHFMKIo4obdJhkp2tPbaPlQx13Y= google.golang.org/api v0.3.1/go.mod h1:6wY9I6uQWHQ8EM57III9mq/AjF+i8G65rmVagqKMtkk= +google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE= +google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M= +google.golang.org/api v0.8.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= +google.golang.org/api v0.9.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= +google.golang.org/api v0.13.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= +google.golang.org/api v0.14.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= +google.golang.org/api v0.15.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= +google.golang.org/api v0.17.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.18.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.19.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.20.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.22.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.24.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE= +google.golang.org/api v0.28.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE= +google.golang.org/api v0.29.0/go.mod h1:Lcubydp8VUV7KeIHD9z2Bys/sm/vGKnG1UHuDBSrHWM= +google.golang.org/api v0.30.0/go.mod h1:QGmEvQ87FHZNiUVJkT14jQNYJ4ZJjdRF23ZXz5138Fc= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= google.golang.org/appengine v1.2.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= +google.golang.org/appengine v1.3.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= +google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= +google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0= +google.golang.org/appengine v1.6.5/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= +google.golang.org/appengine v1.6.6/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= +google.golang.org/genproto v0.0.0-20180831171423-11092d34479b/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= +google.golang.org/genproto v0.0.0-20181029155118-b69ba1387ce2/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= +google.golang.org/genproto v0.0.0-20181202183823-bd91e49a0898/go.mod h1:7Ep/1NZk928CDR8SjdVbjWNpdIf6nzjE3BTgJDr2Atg= +google.golang.org/genproto v0.0.0-20190306203927-b5d61aea6440/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190502173448-54afdca5d873/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= google.golang.org/genproto v0.0.0-20190530194941-fb225487d101/go.mod h1:z3L6/3dTEVtUr6QSP8miRzeRqwQOioJ9I66odjN4I7s= +google.golang.org/genproto v0.0.0-20190801165951-fa694d86fc64/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= +google.golang.org/genproto v0.0.0-20190911173649-1774047e7e51/go.mod h1:IbNlFCBrqXvoKpeg0TB2l7cyZUmoaFKYIwrEpbDKLA8= +google.golang.org/genproto v0.0.0-20191108220845-16a3f7862a1a/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20191115194625-c23dd37a84c9/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20191216164720-4f79533eabd1/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20191230161307-f3c370f40bfb/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20200115191322-ca5a22157cba/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20200122232147-0452cf42e150/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20200204135345-fa8e72b47b90/go.mod h1:GmwEX6Z4W5gMy59cAlVYjN9JhxgbQH6Gn+gFDQe2lzA= +google.golang.org/genproto v0.0.0-20200212174721-66ed5ce911ce/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200224152610-e50cd9704f63/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200228133532-8c2c7df3a383/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200305110556-506484158171/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200312145019-da6875a35672/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200331122359-1ee6d9798940/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= google.golang.org/genproto v0.0.0-20200423170343-7949de9c1215/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200430143042-b979b6f78d84/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200511104702-f5ebc3bea380/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= google.golang.org/genproto v0.0.0-20200513103714-09dca8ec2884/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200515170657-fc4c6c6a6587/go.mod h1:YsZOwe1myG/8QRHRsmBRE1LrgQY60beZKjly0O1fX9U= google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= -google.golang.org/genproto v0.0.0-20200825200019-8632dd797987 h1:PDIOdWxZ8eRizhKa1AAvY53xsvLB1cWorMjslvY3VA8= +google.golang.org/genproto v0.0.0-20200618031413-b414f8b61790/go.mod h1:jDfRM7FcilCzHH/e9qn6dsT145K34l5v+OpcnNgKAAA= +google.golang.org/genproto v0.0.0-20200729003335-053ba62fc06f/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20200804131852-c06518451d9c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20200825200019-8632dd797987/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20211118181313-81c1377c94b1 h1:b9mVrqYfq3P4bCdaLg1qtBnPzUYgglsIdjZkL/fQVOE= +google.golang.org/genproto v0.0.0-20211118181313-81c1377c94b1/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= +google.golang.org/grpc v1.14.0/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw= +google.golang.org/grpc v1.16.0/go.mod h1:0JHn/cJsOMiMfNA9+DeHDlAU7KAAB5GDlYFpa9MZMio= google.golang.org/grpc v1.17.0/go.mod h1:6QZJwpn2B+Zp71q/5VxRsJ6NXXVCE5NRUHRo+f3cWCs= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= google.golang.org/grpc v1.20.0/go.mod h1:chYK+tFQF0nDUGJgXMSgLCQk3phJEuONr2DCgLDdAQM= google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= google.golang.org/grpc v1.21.0/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= +google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= google.golang.org/grpc v1.22.1/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= google.golang.org/grpc v1.23.1/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= google.golang.org/grpc v1.26.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= +google.golang.org/grpc v1.27.1/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= +google.golang.org/grpc v1.28.0/go.mod h1:rpkK4SK4GF4Ach/+MFLZUBavHOvF2JJB5uozKKal+60= google.golang.org/grpc v1.29.1/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3IjizoKk= +google.golang.org/grpc v1.30.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= +google.golang.org/grpc v1.31.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= google.golang.org/grpc v1.33.1/go.mod h1:fr5YgcSWrqhRRxogOsw7RzIpsmvOZ6IcH4kBYTpR3n0= google.golang.org/grpc v1.36.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= +google.golang.org/grpc v1.40.0/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34= google.golang.org/grpc v1.48.0 h1:rQOsyJ/8+ufEDJd/Gdsz7HG220Mh9HAhFHRGnIjda0w= google.golang.org/grpc v1.48.0/go.mod h1:vN9eftEi1UMyUsIF80+uQXhHjbXYbm0uXoFCACuMGWk= google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.2.0 h1:TLkBREm4nIsEcexnCjgQd5GQWaHcqMzwQV0TX9pq8S0= @@ -869,8 +1424,8 @@ google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlba google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= -google.golang.org/protobuf v1.28.0 h1:w43yiav+6bVFTBQFZX0r7ipe9JQ1QsbMgHwbBziscLw= -google.golang.org/protobuf v1.28.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= +google.golang.org/protobuf v1.28.1 h1:d0NfwRgPtno5B1Wa6L2DAG+KivqkdutMf1UhdNx175w= +google.golang.org/protobuf v1.28.1/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= @@ -881,6 +1436,7 @@ gopkg.in/cheggaaa/pb.v1 v1.0.25/go.mod h1:V/YB90LKu/1FcN3WVnfiiE5oMCibMjukxqG/qS gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= gopkg.in/gcfg.v1 v1.2.3/go.mod h1:yesOnuUOFQAhST5vPY4nbZsb/huCgGGXlipJsBn0b3o= +gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= gopkg.in/resty.v1 v1.12.0/go.mod h1:mDo4pnntr5jdWRML875a/NmxYqAlA73dVijT2AXvQQo= gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ= gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= @@ -891,54 +1447,68 @@ gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.3/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.5/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gotest.tools/v3 v3.3.0 h1:MfDY1b1/0xN1CyMlQDac0ziEy9zJQd9CXBRRDHw2jJo= gotest.tools/v3 v3.3.0/go.mod h1:Mcr9QNxkg0uMvy/YElmo4SpXgJKWgQvYrT7Kw5RzJ1A= +grpc.go4.org v0.0.0-20170609214715-11d0a25b4919/go.mod h1:77eQGdRu53HpSqPFJFmuJdjuHRquDANNeA4x7B8WQ9o= honnef.co/go/tools v0.0.0-20180728063816-88497007e858/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= +honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= +honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= +honnef.co/go/tools v0.1.3/go.mod h1:NgwopIslSNH47DimFoV78dnkksY2EFtX0ajyb3K/las= +lukechampine.com/blake3 v1.1.7 h1:GgRMhmdsuK8+ii6UZFDL8Nb+VyMwadAgcJyfYHxG6n0= +lukechampine.com/blake3 v1.1.7/go.mod h1:tkKEOtDkNtklkXtLNEOGNq5tcV90tJiA1vAA12R78LA= lukechampine.com/uint128 v1.1.1 h1:pnxCASz787iMf+02ssImqk6OLt+Z5QHMoZyUXR4z6JU= lukechampine.com/uint128 v1.1.1/go.mod h1:c4eWIwlEGaxC/+H1VguhU4PHXNWDCDMUlWdIWl2j1gk= -modernc.org/cc/v3 v3.36.0 h1:0kmRkTmqNidmu3c7BNDSdVHCxXCkWLmWmCIVX4LUboo= -modernc.org/cc/v3 v3.36.0/go.mod h1:NFUHyPn4ekoC/JHeZFfZurN6ixxawE1BnVonP/oahEI= -modernc.org/ccgo/v3 v3.0.0-20220428102840-41399a37e894/go.mod h1:eI31LL8EwEBKPpNpA4bU1/i+sKOwOrQy8D87zWUcRZc= -modernc.org/ccgo/v3 v3.0.0-20220430103911-bc99d88307be/go.mod h1:bwdAnOoaIt8Ax9YdWGjxWsdkPcZyRPHqrOvJxaKAKGw= -modernc.org/ccgo/v3 v3.16.4/go.mod h1:tGtX0gE9Jn7hdZFeU88slbTh1UtCYKusWOoCJuvkWsQ= -modernc.org/ccgo/v3 v3.16.6 h1:3l18poV+iUemQ98O3X5OMr97LOqlzis+ytivU4NqGhA= -modernc.org/ccgo/v3 v3.16.6/go.mod h1:tGtX0gE9Jn7hdZFeU88slbTh1UtCYKusWOoCJuvkWsQ= +modernc.org/cc/v3 v3.36.2/go.mod h1:NFUHyPn4ekoC/JHeZFfZurN6ixxawE1BnVonP/oahEI= +modernc.org/cc/v3 v3.38.1 h1:Yu2IiiRpustRFUgMDZKwVn2RvyJzpfYSOw7zHeKtSi4= +modernc.org/cc/v3 v3.38.1/go.mod h1:vtL+3mdHx/wcj3iEGz84rQa8vEqR6XM84v5Lcvfph20= +modernc.org/ccgo/v3 v3.16.9 h1:AXquSwg7GuMk11pIdw7fmO1Y/ybgazVkMhsZWCV0mHM= +modernc.org/ccgo/v3 v3.16.9/go.mod h1:zNMzC9A9xeNUepy6KuZBbugn3c0Mc9TeiJO4lgvkJDo= modernc.org/ccorpus v1.11.6 h1:J16RXiiqiCgua6+ZvQot4yUuUy8zxgqbqEEUuGPlISk= modernc.org/ccorpus v1.11.6/go.mod h1:2gEUTrWqdpH2pXsmTM1ZkjeSrUWDpjMu2T6m29L/ErQ= modernc.org/httpfs v1.0.6 h1:AAgIpFZRXuYnkjftxTAZwMIiwEqAfk8aVB2/oA6nAeM= modernc.org/httpfs v1.0.6/go.mod h1:7dosgurJGp0sPaRanU53W4xZYKh14wfzX420oZADeHM= -modernc.org/libc v0.0.0-20220428101251-2d5f3daf273b/go.mod h1:p7Mg4+koNjc8jkqwcoFBJx7tXkpj00G77X7A72jXPXA= -modernc.org/libc v1.16.0/go.mod h1:N4LD6DBE9cf+Dzf9buBlzVJndKr/iJHG97vGLHYnb5A= -modernc.org/libc v1.16.1/go.mod h1:JjJE0eu4yeK7tab2n4S1w8tlWd9MxXLRzheaRnAKymU= -modernc.org/libc v1.16.7 h1:qzQtHhsZNpVPpeCu+aMIQldXeV1P0vRhSqCL0nOIJOA= -modernc.org/libc v1.16.7/go.mod h1:hYIV5VZczAmGZAnG15Vdngn5HSF5cSkbvfz2B7GRuVU= +modernc.org/libc v1.17.0/go.mod h1:XsgLldpP4aWlPlsjqKRdHPqCxCjISdHfM/yeWC5GyW0= +modernc.org/libc v1.19.0 h1:bXyVhGQg6KIClTr8FMVIDPl7jtbcs7aS5WP7vLDaxPs= +modernc.org/libc v1.19.0/go.mod h1:ZRfIaEkgrYgZDl6pa4W39HgN5G/yDW+NRmNKZBDFrk0= modernc.org/mathutil v1.2.2/go.mod h1:mZW8CKdRPY1v87qxC/wUdX5O1qDzXMP5TH3wjfpga6E= -modernc.org/mathutil v1.4.1 h1:ij3fYGe8zBF4Vu+g0oT7mB06r8sqGWKuJu1yXeR4by8= modernc.org/mathutil v1.4.1/go.mod h1:mZW8CKdRPY1v87qxC/wUdX5O1qDzXMP5TH3wjfpga6E= -modernc.org/memory v1.1.1 h1:bDOL0DIDLQv7bWhP3gMvIrnoFw+Eo6F7a2QK9HPDiFU= -modernc.org/memory v1.1.1/go.mod h1:/0wo5ibyrQiaoUoH7f9D8dnglAmILJ5/cxZlRECf+Nw= -modernc.org/opt v0.1.1 h1:/0RX92k9vwVeDXj+Xn23DKp2VJubL7k8qNffND6qn3A= +modernc.org/mathutil v1.5.0 h1:rV0Ko/6SfM+8G+yKiyI830l3Wuz1zRutdslNoQ0kfiQ= +modernc.org/mathutil v1.5.0/go.mod h1:mZW8CKdRPY1v87qxC/wUdX5O1qDzXMP5TH3wjfpga6E= +modernc.org/memory v1.2.0/go.mod h1:/0wo5ibyrQiaoUoH7f9D8dnglAmILJ5/cxZlRECf+Nw= +modernc.org/memory v1.4.0 h1:crykUfNSnMAXaOJnnxcSzbUGMqkLWjklJKkBK2nwZwk= +modernc.org/memory v1.4.0/go.mod h1:PkUhL0Mugw21sHPeskwZW4D6VscE/GQJOnIpCnW6pSU= modernc.org/opt v0.1.1/go.mod h1:WdSiB5evDcignE70guQKxYUl14mgWtbClRi5wmkkTX0= -modernc.org/sqlite v1.17.3 h1:iE+coC5g17LtByDYDWKpR6m2Z9022YrSh3bumwOnIrI= -modernc.org/sqlite v1.17.3/go.mod h1:10hPVYar9C0kfXuTWGz8s0XtB8uAGymUy51ZzStYe3k= -modernc.org/strutil v1.1.1 h1:xv+J1BXY3Opl2ALrBwyfEikFAj8pmqcpnfmuwUwcozs= +modernc.org/opt v0.1.3 h1:3XOZf2yznlhC+ibLltsDGzABUGVx8J6pnFMS3E4dcq4= +modernc.org/opt v0.1.3/go.mod h1:WdSiB5evDcignE70guQKxYUl14mgWtbClRi5wmkkTX0= +modernc.org/sqlite v1.19.1 h1:8xmS5oLnZtAK//vnd4aTVj8VOeTAccEFOtUnIzfSw+4= +modernc.org/sqlite v1.19.1/go.mod h1:UfQ83woKMaPW/ZBruK0T7YaFCrI+IE0LeWVY6pmnVms= modernc.org/strutil v1.1.1/go.mod h1:DE+MQQ/hjKBZS2zNInV5hhcipt5rLPWkmpbGeW5mmdw= -modernc.org/tcl v1.13.1 h1:npxzTwFTZYM8ghWicVIX1cRWzj7Nd8i6AqqX2p+IYao= -modernc.org/tcl v1.13.1/go.mod h1:XOLfOwzhkljL4itZkK6T72ckMgvj0BDsnKNdZVUOecw= -modernc.org/token v1.0.0 h1:a0jaWiNMDhDUtqOj09wvjWWAqd3q7WpBulmL9H2egsk= +modernc.org/strutil v1.1.3 h1:fNMm+oJklMGYfU9Ylcywl0CO5O6nTfaowNsh2wpPjzY= +modernc.org/strutil v1.1.3/go.mod h1:MEHNA7PdEnEwLvspRMtWTNnp2nnyvMfkimT1NKNAGbw= +modernc.org/tcl v1.14.0 h1:cO7oyRWEXweSJmjdbs1L86P52D9QmBy/CPFKmFvNYTU= modernc.org/token v1.0.0/go.mod h1:UGzOrNV1mAFSEB63lOFHIpNRUVMvYTc6yu1SMY/XTDM= -modernc.org/z v1.5.1 h1:RTNHdsrOpeoSeOF4FbzTo8gBYByaJ5xT7NgZ9ZqRiJM= -modernc.org/z v1.5.1/go.mod h1:eWFB510QWW5Th9YGZT81s+LwvaAs3Q2yr4sP0rmLkv8= +modernc.org/token v1.0.1 h1:A3qvTqOwexpfZZeyI0FeGPDlSWX5pjZu9hF4lU+EKWg= +modernc.org/token v1.0.1/go.mod h1:UGzOrNV1mAFSEB63lOFHIpNRUVMvYTc6yu1SMY/XTDM= +modernc.org/z v1.6.0 h1:gLwAw6aS973K/k9EOJGlofauyMk4YOUiPDYzWnq/oXo= pgregory.net/rapid v0.4.7 h1:MTNRktPuv5FNqOO151TM9mDTa+XHcX6ypYeISDVD14g= pgregory.net/rapid v0.4.7/go.mod h1:UYpPVyjFHzYBGHIxLFoupi8vwk6rXNzRY9OMvVxFIOU= +rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= +rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0= +rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA= sigs.k8s.io/yaml v1.1.0/go.mod h1:UJmg0vDUVViEyp3mgSv9WPwZCDxu4rQW1olrI1uml+o= sourcegraph.com/sourcegraph/appdash v0.0.0-20190731080439-ebfcffb1b5c0/go.mod h1:hI742Nqp5OhwiqlzhgfbWU4mW4yO10fP+LoT9WOswdU= +sourcegraph.com/sourcegraph/go-diff v0.5.0/go.mod h1:kuch7UrkMzY0X+p9CRK03kfuPQ2zzQcaEFbx8wA8rck= +sourcegraph.com/sqs/pbtypes v0.0.0-20180604144634-d3ebe8f20ae4/go.mod h1:ketZ/q3QxT9HOBeFhu6RdvsftgpsbFHBF5Cas6cDKZ0= diff --git a/hive/exclusions.json b/hive/exclusions.json new file mode 100644 index 00000000000..9093d1c3087 --- /dev/null +++ b/hive/exclusions.json @@ -0,0 +1,50 @@ +{ + "testSuites": [ + { + "name": "engine-api", + "testCases": [ + "Sidechain Reorg", + "Inconsistent Head in ForkchoiceState", + "Invalid Ancestor Chain Re-Org, Invalid StateRoot, Invalid P9'", + "Invalid Ancestor Chain Sync, Invalid StateRoot, Invalid P9'", + "Invalid Ancestor Chain Re-Org, Invalid StateRoot, Empty Txs, Invalid P9', Reveal using sync", + "Invalid Ancestor Chain Sync, Invalid StateRoot, Empty Txs, Invalid P9'", + "Invalid Ancestor Chain Re-Org, Invalid ReceiptsRoot, Invalid P8', Reveal using sync", + "Invalid Ancestor Chain Sync, Invalid ReceiptsRoot, Invalid P8'", + "Invalid Ancestor Chain Re-Org, Invalid GasLimit, Invalid P9', Reveal using sync", + "Invalid Ancestor Chain Sync, Invalid GasLimit, Invalid P9'", + "Invalid Ancestor Chain Re-Org, Invalid GasUsed, Invalid P9', Reveal using sync", + "Invalid Ancestor Chain Sync, Invalid GasUsed, Invalid P9'", + "Invalid Ancestor Chain Re-Org, Invalid Timestamp, Invalid P9', Reveal using sync", + "Invalid Ancestor Chain Sync, Invalid Timestamp, Invalid P9'", + "Invalid Ancestor Chain Re-Org, Incomplete Transactions, Invalid P9', Reveal using sync", + "Invalid Ancestor Chain Sync, Incomplete Transactions, Invalid P9'", + "Invalid Ancestor Chain Re-Org, Invalid Transaction Signature, Invalid P9', Reveal using sync", + "Invalid Ancestor Chain Sync, Invalid Transaction Signature, Invalid P9'", + "Invalid Ancestor Chain Re-Org, Invalid Transaction Nonce, Invalid P9', Reveal using sync", + "Invalid Ancestor Chain Sync, Invalid Transaction Nonce, Invalid P9'", + "Invalid Ancestor Chain Re-Org, Invalid Transaction Gas, Invalid P9', Reveal using sync", + "Invalid Ancestor Chain Sync, Invalid Transaction Gas, Invalid P9'", + "Invalid Ancestor Chain Re-Org, Invalid Transaction GasPrice, Invalid P9', Reveal using sync", + "Invalid Ancestor Chain Sync, Invalid Transaction GasPrice, Invalid P9'", + "Invalid Ancestor Chain Re-Org, Invalid Transaction Value, Invalid P9', Reveal using sync", + "Invalid Ancestor Chain Sync, Invalid Transaction Value, Invalid P9'", + "Invalid Ancestor Chain Re-Org, Invalid Ommers, Invalid P9', Reveal using sync", + "Invalid Ancestor Chain Sync, Invalid Ommers, Invalid P9'" + ] + }, + { + "Name": "engine-transition", + "testCases": [ + "Two Block PoW Re-org to Lower-Height Chain, Transaction Overwrite", + "Syncing on an Invalid Terminal Execution - Difficulty", + "Syncing on an Invalid Terminal Execution - Sealed MixHash", + "Syncing on an Invalid Terminal Execution - Sealed Nonce", + "Syncing on an Invalid Terminal Execution - Balance Mismatch", + "Stop processing gossiped Post-TTD PoW blocks", + "Terminal blocks are gossiped (Common Ancestor Depth 5", + "Long PoW Chain Sync" + ] + } + ] +} \ No newline at end of file diff --git a/internal/cmdtest/test_cmd.go b/internal/cmdtest/test_cmd.go index b837c9c399c..fa3bbdd7490 100644 --- a/internal/cmdtest/test_cmd.go +++ b/internal/cmdtest/test_cmd.go @@ -32,6 +32,8 @@ import ( "text/template" "time" + "github.com/ledgerwatch/log/v3" + "github.com/docker/docker/pkg/reexec" ) @@ -83,7 +85,7 @@ func (tt *TestCmd) Run(name string, args ...string) { // InputLine writes the given text to the child's stdin. // This method can also be called from an expect template, e.g.: // -// geth.expect(`Passphrase: {{.InputLine "password"}}`) +// geth.expect(`Passphrase: {{.InputLine "password"}}`) func (tt *TestCmd) InputLine(s string) string { io.WriteString(tt.stdin, s+"\n") return "" @@ -206,7 +208,10 @@ func (tt *TestCmd) Interrupt() { // It will only return a valid value after the process has finished. func (tt *TestCmd) ExitStatus() int { if tt.Err != nil { - exitErr := tt.Err.(*exec.ExitError) + exitErr, ok := tt.Err.(*exec.ExitError) + if !ok { + log.Warn("Failed to type convert testCmd.Error to exec.ExitError") + } if exitErr != nil { if status, ok := exitErr.Sys().(syscall.WaitStatus); ok { return status.ExitStatus() diff --git a/internal/debug/flags.go b/internal/debug/flags.go index 3eab6e518dd..eae6970ae5f 100644 --- a/internal/debug/flags.go +++ b/internal/debug/flags.go @@ -249,7 +249,7 @@ func StartPProf(address string, withMetrics bool) { heapMsg := fmt.Sprintf("go tool pprof -lines -http=: http://%s/%s", address, "debug/pprof/heap") log.Info("Starting pprof server", "cpu", cpuMsg, "heap", heapMsg) go func() { - if err := http.ListenAndServe(address, nil); err != nil { + if err := http.ListenAndServe(address, nil); err != nil { // nolint:gosec log.Error("Failure in running pprof server", "err", err) } }() diff --git a/internal/debug/signal.go b/internal/debug/signal.go index ed1e7bd28e8..85e1088eab4 100644 --- a/internal/debug/signal.go +++ b/internal/debug/signal.go @@ -37,7 +37,7 @@ func ListenSignals(stack io.Closer) { Exit() // ensure trace and CPU profile data is flushed. LoudPanic("boom") case <-usr1: - pprof.Lookup("goroutine").WriteTo(os.Stdout, 2) + pprof.Lookup("goroutine").WriteTo(os.Stdout, 1) } } } diff --git a/internal/ethapi/api.go b/internal/ethapi/api.go index 85f3d6b470c..2c8d96ad5c5 100644 --- a/internal/ethapi/api.go +++ b/internal/ethapi/api.go @@ -289,6 +289,9 @@ func RPCMarshalBlock(block *types.Block, inclTx bool, fullTx bool) (map[string]i func RPCMarshalBlockEx(block *types.Block, inclTx bool, fullTx bool, borTx types.Transaction, borTxHash common.Hash) (map[string]interface{}, error) { fields := RPCMarshalHeader(block.Header()) fields["size"] = hexutil.Uint64(block.Size()) + if _, ok := fields["transactions"]; !ok { + fields["transactions"] = make([]interface{}, 0) + } if inclTx { formatTx := func(tx types.Transaction, index int) (interface{}, error) { @@ -459,6 +462,9 @@ func newRPCBorTransaction(opaqueTx types.Transaction, txHash common.Hash, blockH From: common.Address{}, To: tx.GetTo(), Value: (*hexutil.Big)(tx.GetValue().ToBig()), + V: (*hexutil.Big)(big.NewInt(0)), + R: (*hexutil.Big)(big.NewInt(0)), + S: (*hexutil.Big)(big.NewInt(0)), } if blockHash != (common.Hash{}) { result.BlockHash = &blockHash diff --git a/internal/flags/helpers.go b/internal/flags/helpers.go index 55fe72fc272..e7545d01d14 100644 --- a/internal/flags/helpers.go +++ b/internal/flags/helpers.go @@ -36,7 +36,7 @@ type FlagGroup struct { Flags []cli.Flag } -// byCategory sorts an array of FlagGroup by Name in the order +// ByCategory sorts an array of FlagGroup by Name in the order // defined in AppHelpFlagGroups. type ByCategory []FlagGroup @@ -58,17 +58,6 @@ func (a ByCategory) Less(i, j int) bool { return iIdx < jIdx } -func FlagCategory(flag cli.Flag, flagGroups []FlagGroup) string { - for _, category := range flagGroups { - for _, flg := range category.Flags { - if flg.GetName() == flag.GetName() { - return category.Name - } - } - } - return "MISC" -} - // NewApp creates an app with sane defaults. func NewApp(gitCommit, gitDate, usage string) *cli.App { app := cli.NewApp() diff --git a/k8s/base/eth66-peering-tcp.yaml b/k8s/base/eth66-peering-tcp.yaml new file mode 100644 index 00000000000..e78717488a4 --- /dev/null +++ b/k8s/base/eth66-peering-tcp.yaml @@ -0,0 +1,18 @@ +apiVersion: v1 +kind: Service +metadata: + labels: + app: erigon + name: eth66-peering-tcp +spec: + ipFamilies: + - IPv4 + ipFamilyPolicy: SingleStack + ports: + - name: eth66-pr-tcp + port: 30303 + protocol: TCP + targetPort: 30303 + selector: + app: erigon + type: LoadBalancer diff --git a/k8s/base/eth66-peering-udp.yaml b/k8s/base/eth66-peering-udp.yaml new file mode 100644 index 00000000000..612141d7383 --- /dev/null +++ b/k8s/base/eth66-peering-udp.yaml @@ -0,0 +1,18 @@ +apiVersion: v1 +kind: Service +metadata: + labels: + app: erigon + name: eth66-peering-udp +spec: + ipFamilies: + - IPv4 + ipFamilyPolicy: SingleStack + ports: + - name: eth66-pr-udp + port: 30303 + protocol: UDP + targetPort: 30303 + selector: + app: erigon + type: LoadBalancer diff --git a/k8s/base/http.yaml b/k8s/base/http.yaml new file mode 100644 index 00000000000..fff0733b6eb --- /dev/null +++ b/k8s/base/http.yaml @@ -0,0 +1,18 @@ +apiVersion: v1 +kind: Service +metadata: + labels: + app: erigon + name: http +spec: + ipFamilies: + - IPv4 + ipFamilyPolicy: SingleStack + ports: + - name: http + port: 8545 + protocol: TCP + targetPort: 8545 + selector: + app: erigon + type: LoadBalancer diff --git a/k8s/base/kustomization.yaml b/k8s/base/kustomization.yaml new file mode 100644 index 00000000000..54effbae0fb --- /dev/null +++ b/k8s/base/kustomization.yaml @@ -0,0 +1,8 @@ +resources: +- eth66-peering-tcp.yaml +- eth66-peering-udp.yaml +- http.yaml +- metrics.yaml +- snap-sync-tcp.yaml +- snap-sync-udp.yaml +- statefulset.yaml diff --git a/k8s/base/metrics.yaml b/k8s/base/metrics.yaml new file mode 100644 index 00000000000..3c910a725bc --- /dev/null +++ b/k8s/base/metrics.yaml @@ -0,0 +1,17 @@ +apiVersion: v1 +kind: Service +metadata: + labels: + app: erigon + name: metrics +spec: + ipFamilies: + - IPv4 + ipFamilyPolicy: SingleStack + ports: + - name: metrics + port: 6060 + protocol: TCP + targetPort: 6060 + selector: + app: erigon diff --git a/k8s/base/snap-sync-tcp.yaml b/k8s/base/snap-sync-tcp.yaml new file mode 100644 index 00000000000..7f800dd0133 --- /dev/null +++ b/k8s/base/snap-sync-tcp.yaml @@ -0,0 +1,18 @@ +apiVersion: v1 +kind: Service +metadata: + labels: + app: erigon + name: snap-sync-tcp +spec: + ipFamilies: + - IPv4 + ipFamilyPolicy: SingleStack + ports: + - name: snap-sync-tcp + port: 42069 + protocol: TCP + targetPort: 42069 + selector: + app: erigon + type: LoadBalancer diff --git a/k8s/base/snap-sync-udp.yaml b/k8s/base/snap-sync-udp.yaml new file mode 100644 index 00000000000..b1a41e1e9b9 --- /dev/null +++ b/k8s/base/snap-sync-udp.yaml @@ -0,0 +1,18 @@ +apiVersion: v1 +kind: Service +metadata: + labels: + app: erigon + name: snap-sync-udp +spec: + ipFamilies: + - IPv4 + ipFamilyPolicy: SingleStack + ports: + - name: snap-sync-udp + port: 42069 + protocol: UDP + targetPort: 42069 + selector: + app: erigon + type: LoadBalancer diff --git a/k8s/base/statefulset.yaml b/k8s/base/statefulset.yaml new file mode 100644 index 00000000000..1896dfdac52 --- /dev/null +++ b/k8s/base/statefulset.yaml @@ -0,0 +1,81 @@ +apiVersion: apps/v1 +kind: StatefulSet +metadata: + labels: + app: erigon + name: erigon +spec: + replicas: 1 + revisionHistoryLimit: 10 + selector: + matchLabels: + app: erigon + serviceName: erigon + template: + metadata: + labels: + app: erigon + spec: + containers: + - args: + - '--authrpc.vhosts=*' + - '--datadir=/home/erigon/.local/share/erigon' + - '--healthcheck' + - '--log.json' + - '--metrics' + - '--metrics.addr=0.0.0.0' + - '--metrics.port=6060' + - '--nat=none' + - '--private.api.ratelimit=50000' + - '--private.api.addr=0.0.0.0:9090' + - '--torrent.download.rate=3000mb' + - '--torrent.download.slots=200' + - '--verbosity=3' + - —-batchSize=8000M + command: + - erigon + image: erigon-image + livenessProbe: + initialDelaySeconds: 1800 + periodSeconds: 20 + tcpSocket: + port: 9090 + name: erigon + ports: + - containerPort: 9090 + name: private-api + - containerPort: 8551 + name: engine + - containerPort: 30303 + name: eth66-pr-tcp + protocol: TCP + - containerPort: 30303 + name: eth66-pr-udp + protocol: UDP + - containerPort: 6060 + name: metrics + - containerPort: 6070 + name: pprof + - containerPort: 42069 + name: snap-sync-tcp + protocol: TCP + - containerPort: 42069 + name: snap-sync-udp + protocol: UDP + readinessProbe: + initialDelaySeconds: 1800 + periodSeconds: 20 + tcpSocket: + port: 9090 + resources: + requests: + cpu: 2462m + memory: 16Gi + securityContext: + allowPrivilegeEscalation: false + runAsGroup: 1000 + runAsUser: 1000 + volumeMounts: [] + initContainers: [] + volumes: [] + volumeClaimTemplates: [] \ No newline at end of file diff --git a/libmdbx b/libmdbx deleted file mode 160000 index 0018164fef0..00000000000 --- a/libmdbx +++ /dev/null @@ -1 +0,0 @@ -Subproject commit 0018164fef048b68dd84d503fde95dab5fdea94b diff --git a/metrics/exp/exp.go b/metrics/exp/exp.go index 6a2ebf7ae62..763dc41efe5 100644 --- a/metrics/exp/exp.go +++ b/metrics/exp/exp.go @@ -23,7 +23,7 @@ func Setup(address string) { //})) log.Info("Starting metrics server", "addr", fmt.Sprintf("http://%s/debug/metrics/prometheus", address)) go func() { - if err := http.ListenAndServe(address, nil); err != nil { + if err := http.ListenAndServe(address, nil); err != nil { // nolint:gosec log.Error("Failure in running metrics server", "err", err) } }() diff --git a/migrations/migrations.go b/migrations/migrations.go index 1890bca35ca..b5994461a00 100644 --- a/migrations/migrations.go +++ b/migrations/migrations.go @@ -21,20 +21,20 @@ import ( // // Idempotency is expected // Best practices to achieve Idempotency: -// - in dbutils/bucket.go add suffix for existing bucket variable, create new bucket with same variable name. -// Example: -// - SyncStageProgress = []byte("SSP1") -// + SyncStageProgressOld1 = []byte("SSP1") -// + SyncStageProgress = []byte("SSP2") -// - in the beginning of migration: check that old bucket exists, clear new bucket -// - in the end:drop old bucket (not in defer!). -// - if you need migrate multiple buckets - create separate migration for each bucket -// - write test - and check that it's safe to apply same migration twice +// - in dbutils/bucket.go add suffix for existing bucket variable, create new bucket with same variable name. +// Example: +// - SyncStageProgress = []byte("SSP1") +// - SyncStageProgressOld1 = []byte("SSP1") +// - SyncStageProgress = []byte("SSP2") +// - in the beginning of migration: check that old bucket exists, clear new bucket +// - in the end:drop old bucket (not in defer!). +// - if you need migrate multiple buckets - create separate migration for each bucket +// - write test - and check that it's safe to apply same migration twice var migrations = map[kv.Label][]Migration{ kv.ChainDB: { dbSchemaVersion5, txsBeginEnd, - resetBlocks, + resetBlocks4, }, kv.TxPoolDB: {}, kv.SentryDB: {}, diff --git a/migrations/reset_blocks.go b/migrations/reset_blocks.go index 30a187a1237..c42ed0fb7f0 100644 --- a/migrations/reset_blocks.go +++ b/migrations/reset_blocks.go @@ -2,18 +2,22 @@ package migrations import ( "context" + "encoding/binary" "github.com/ledgerwatch/erigon-lib/kv" + "github.com/ledgerwatch/erigon/common" "github.com/ledgerwatch/erigon/core/rawdb" "github.com/ledgerwatch/erigon/core/rawdb/rawdbreset" + "github.com/ledgerwatch/erigon/eth/ethconfig" "github.com/ledgerwatch/erigon/eth/stagedsync/stages" "github.com/ledgerwatch/erigon/node/nodecfg/datadir" + "github.com/ledgerwatch/erigon/turbo/snapshotsync" "github.com/ledgerwatch/erigon/turbo/snapshotsync/snap" "github.com/ledgerwatch/log/v3" ) -var resetBlocks = Migration{ - Name: "reset_blocks_3", +var resetBlocks4 = Migration{ + Name: "reset_blocks_4", Up: func(db kv.RwDB, dirs datadir.Dirs, progress []byte, BeforeCommit Callback) (err error) { tx, err := db.BeginRw(context.Background()) if err != nil { @@ -32,27 +36,81 @@ var resetBlocks = Migration{ } return tx.Commit() } - genesisBlock := rawdb.ReadHeaderByNumber(tx, 0) - if genesisBlock == nil { + // Detect whether the correction is required + snaps := snapshotsync.NewRoSnapshots(ethconfig.Snapshot{ + Enabled: true, + KeepBlocks: true, + Produce: false, + }, dirs.Snap) + snaps.ReopenFolder() + var lastFound bool + var lastBlockNum, lastBaseTxNum, lastAmount uint64 + if err := snaps.Bodies.View(func(sns []*snapshotsync.BodySegment) error { + // Take the last snapshot + if len(sns) == 0 { + return nil + } + sn := sns[len(sns)-1] + sn.Iterate(func(blockNum uint64, baseTxNum uint64, txAmount uint64) error { + lastBlockNum = blockNum + lastBaseTxNum = baseTxNum + lastAmount = txAmount + lastFound = true + return nil + }) + return nil + }); err != nil { + return err + } + if !lastFound { if err := BeforeCommit(tx, nil, true); err != nil { return err } return tx.Commit() } - chainConfig, err := rawdb.ReadChainConfig(tx, genesisBlock.Hash()) + c, err := tx.Cursor(kv.BlockBody) if err != nil { return err } + defer c.Close() + var fixNeeded bool + for k, _, err := c.First(); k != nil; k, _, err = c.Next() { + if err != nil { + return err + } + blockNumber := binary.BigEndian.Uint64(k[:8]) + if blockNumber != lastBlockNum+1 { + continue + } + blockHash := common.BytesToHash(k[8:]) + var hash common.Hash + if hash, err = rawdb.ReadCanonicalHash(tx, blockNumber); err != nil { + return err + } + // ReadBody is not returning baseTxId which is written into the DB record, but that value + 1 + _, baseTxId, _ := rawdb.ReadBody(tx, blockHash, blockNumber) + if hash != blockHash { + continue + } + if lastBaseTxNum+lastAmount+1 != baseTxId { + log.Info("Fix required, last block in seg files", "height", lastBlockNum, "baseTxNum", lastBaseTxNum, "txAmount", lastAmount, "first txId in DB", baseTxId, "expected", lastBaseTxNum+lastAmount+1) + fixNeeded = true + } + } + if !fixNeeded { + log.Info("Fix is not required") + if err := BeforeCommit(tx, nil, true); err != nil { + return err + } + return tx.Commit() + } + headersProgress, _ := stages.GetStageProgress(tx, stages.Headers) if headersProgress > 0 { log.Warn("NOTE: this migration will remove recent blocks (and senders) to fix several recent bugs. Your node will re-download last ~400K blocks, should not take very long") } - if err := snap.RemoveNonPreverifiedFiles(chainConfig.ChainName, dirs.Snap); err != nil { - return err - } - - if err := rawdbreset.ResetBlocks(tx); err != nil { + if err := rawdbreset.ResetBlocks(tx, db, nil, nil, dirs.Tmp); err != nil { return err } diff --git a/node/doc.go b/node/doc.go index e7de9491c02..7c9efeaae95 100644 --- a/node/doc.go +++ b/node/doc.go @@ -21,25 +21,22 @@ In the model exposed by this package, a node is a collection of services which u resources to provide RPC APIs. Services can also offer devp2p protocols, which are wired up to the devp2p network when the node instance is started. - -Node Lifecycle +# Node Lifecycle The Node object has a lifecycle consisting of three basic states, INITIALIZING, RUNNING and CLOSED. - - ●───────┐ - New() - │ - ▼ - INITIALIZING ────Start()─┐ - │ │ - │ ▼ - Close() RUNNING - │ │ - ▼ │ - CLOSED ◀──────Close()─┘ - + ●───────┐ + New() + │ + ▼ + INITIALIZING ────Start()─┐ + │ │ + │ ▼ + Close() RUNNING + │ │ + ▼ │ + CLOSED ◀──────Close()─┘ Creating a Node allocates basic resources such as the data directory and returns the node in its INITIALIZING state. Lifecycle objects, RPC APIs and peer-to-peer networking @@ -58,8 +55,7 @@ objects and shuts down RPC and peer-to-peer networking. You must always call Close on Node, even if the node was not started. - -Resources Managed By Node +# Resources Managed By Node All file-system resources used by a node instance are located in a directory called the data directory. The location of each resource can be overridden through additional node @@ -83,8 +79,7 @@ without a data directory, databases are opened in memory instead. Node also creates the shared store of encrypted Ethereum account keys. Services can access the account manager through the service context. - -Sharing Data Directory Among Instances +# Sharing Data Directory Among Instances Multiple node instances can share a single data directory if they have distinct instance names (set through the Name config option). Sharing behaviour depends on the type of @@ -99,25 +94,24 @@ Databases are also stored within the instance subdirectory. If multiple node instances use the same data directory, opening the databases with identical names will create one database for each instance. - -Data Directory Sharing Example +# Data Directory Sharing Example In this example, two node instances named A and B are started with the same data directory. Node instance A opens the database "db", node instance B opens the databases "db" and "db-2". The following files will be created in the data directory: - data-directory/ - A/ - nodekey -- devp2p node key of instance A - nodes/ -- devp2p discovery knowledge database of instance A - db/ -- data for "db" - A.ipc -- JSON-RPC UNIX domain socket endpoint of instance A - B/ - nodekey -- devp2p node key of node B - nodes/ -- devp2p discovery knowledge database of instance B - static-nodes.json -- devp2p static node list of instance B - db/ -- data for "db" - db-2/ -- data for "db-2" - B.ipc -- JSON-RPC UNIX domain socket endpoint of instance B + data-directory/ + A/ + nodekey -- devp2p node key of instance A + nodes/ -- devp2p discovery knowledge database of instance A + db/ -- data for "db" + A.ipc -- JSON-RPC UNIX domain socket endpoint of instance A + B/ + nodekey -- devp2p node key of node B + nodes/ -- devp2p discovery knowledge database of instance B + static-nodes.json -- devp2p static node list of instance B + db/ -- data for "db" + db-2/ -- data for "db-2" + B.ipc -- JSON-RPC UNIX domain socket endpoint of instance B */ package node diff --git a/node/endpoints.go b/node/endpoints.go index 61f8a139bec..eb1708721c8 100644 --- a/node/endpoints.go +++ b/node/endpoints.go @@ -40,12 +40,18 @@ func StartHTTPEndpoint(endpoint string, timeouts rpccfg.HTTPTimeouts, handler ht CheckTimeouts(&timeouts) // Bundle and start the HTTP server httpSrv := &http.Server{ - Handler: handler, - ReadTimeout: timeouts.ReadTimeout, - WriteTimeout: timeouts.WriteTimeout, - IdleTimeout: timeouts.IdleTimeout, + Handler: handler, + ReadTimeout: timeouts.ReadTimeout, + WriteTimeout: timeouts.WriteTimeout, + IdleTimeout: timeouts.IdleTimeout, + ReadHeaderTimeout: timeouts.ReadTimeout, } - go httpSrv.Serve(listener) + go func() { + serveErr := httpSrv.Serve(listener) + if serveErr != nil { + log.Warn("Failed to serve http endpoint", "err", serveErr) + } + }() return httpSrv, listener.Addr(), err } diff --git a/node/errors.go b/node/errors.go index 3e9910a2fb2..c6ac5c7aab7 100644 --- a/node/errors.go +++ b/node/errors.go @@ -24,16 +24,15 @@ import ( ) var ( - ErrDataDirUsed = errors.New("datadir already used by another process") - ErrNodeStopped = errors.New("node not started") - ErrNodeRunning = errors.New("node already running") - ErrServiceUnknown = errors.New("unknown service") + ErrDataDirUsed = errors.New("datadir already used by another process") + ErrNodeStopped = errors.New("node not started") + ErrNodeRunning = errors.New("node already running") - datadirInUseErrnos = map[uint]bool{11: true, 32: true, 35: true} + datadirInUseErrNos = map[uint]bool{11: true, 32: true, 35: true} ) func convertFileLockError(err error) error { - if errno, ok := err.(syscall.Errno); ok && datadirInUseErrnos[uint(errno)] { + if errno, ok := err.(syscall.Errno); ok && datadirInUseErrNos[uint(errno)] { return ErrDataDirUsed } return err diff --git a/node/node.go b/node/node.go index f43713acc31..b4700321b5e 100644 --- a/node/node.go +++ b/node/node.go @@ -30,6 +30,7 @@ import ( "github.com/c2h5oh/datasize" "github.com/ledgerwatch/erigon/node/nodecfg" "github.com/ledgerwatch/erigon/params" + "golang.org/x/sync/semaphore" "github.com/gofrs/flock" "github.com/ledgerwatch/erigon-lib/kv" @@ -131,8 +132,14 @@ func (n *Node) Start() error { } // Check if any lifecycle failed to start. if err != nil { - n.stopServices(started) //nolint:errcheck - n.doClose(nil) + stopErr := n.stopServices(started) + if stopErr != nil { + n.log.Warn("Failed to doClose for this node", "err", stopErr) + } //nolint:errcheck + closeErr := n.doClose(nil) + if closeErr != nil { + n.log.Warn("Failed to doClose for this node", "err", closeErr) + } } return err } @@ -238,7 +245,7 @@ func (n *Node) openDataDir() error { return convertFileLockError(err) } if !locked { - return fmt.Errorf("%w: %s\n", ErrDataDirUsed, instdir) + return fmt.Errorf("%w: %s", ErrDataDirUsed, instdir) } n.dirLock = l return nil @@ -313,12 +320,19 @@ func OpenDatabase(config *nodecfg.Config, logger log.Logger, label kv.Label) (kv var openFunc func(exclusive bool) (kv.RwDB, error) log.Info("Opening Database", "label", name, "path", dbPath) openFunc = func(exclusive bool) (kv.RwDB, error) { - opts := mdbx.NewMDBX(logger).Path(dbPath).Label(label).DBVerbosity(config.DatabaseVerbosity) + roTxLimit := int64(32) + if config.Http.DBReadConcurrency > 0 { + roTxLimit = int64(config.Http.DBReadConcurrency) + } + roTxsLimiter := semaphore.NewWeighted(roTxLimit) // 1 less than max to allow unlocking to happen + opts := mdbx.NewMDBX(logger).Path(dbPath).Label(label).DBVerbosity(config.DatabaseVerbosity).RoTxsLimiter(roTxsLimiter) if exclusive { opts = opts.Exclusive() } if label == kv.ChainDB { opts = opts.PageSize(config.MdbxPageSize.Bytes()).MapSize(8 * datasize.TB) + } else { + opts = opts.GrowthStep(16 * datasize.MB) } return opts.Open() } diff --git a/node/node_example_test.go b/node/node_example_test.go index 43ffc8cb56a..14ce0ff97ff 100644 --- a/node/node_example_test.go +++ b/node/node_example_test.go @@ -28,8 +28,8 @@ import ( // life cycle management. // // The following methods are needed to implement a node.Lifecycle: -// - Start() error - method invoked when the node is ready to start the service -// - Stop() error - method invoked when the node terminates the service +// - Start() error - method invoked when the node is ready to start the service +// - Stop() error - method invoked when the node terminates the service type SampleLifecycle struct{} func (s *SampleLifecycle) Start() error { fmt.Println("Service starting..."); return nil } diff --git a/node/nodecfg/config.go b/node/nodecfg/config.go index da6d2dc8ebf..e73a79b14c6 100644 --- a/node/nodecfg/config.go +++ b/node/nodecfg/config.go @@ -313,7 +313,7 @@ func (c *Config) parsePersistentNodes(w *bool, path string) []*enode.Node { return nil } // Interpret the list as a discovery node array - var nodes []*enode.Node + nodes := make([]*enode.Node, 0, len(nodelist)) for _, url := range nodelist { if url == "" { continue diff --git a/node/nodecfg/datadir/dirs.go b/node/nodecfg/datadir/dirs.go index 81c031c37b9..5b7f33f3a1c 100644 --- a/node/nodecfg/datadir/dirs.go +++ b/node/nodecfg/datadir/dirs.go @@ -14,6 +14,7 @@ type Dirs struct { Chaindata string Tmp string Snap string + SnapHistory string TxPool string Nodes string } @@ -35,6 +36,7 @@ func New(datadir string) Dirs { Chaindata: filepath.Join(datadir, "chaindata"), Tmp: filepath.Join(datadir, "etl-temp"), Snap: filepath.Join(datadir, "snapshots"), + SnapHistory: filepath.Join(datadir, "snapshots", "history"), TxPool: filepath.Join(datadir, "txpool"), Nodes: filepath.Join(datadir, "nodes"), } diff --git a/node/nodecfg/defaults.go b/node/nodecfg/defaults.go index a421c79f235..6bac6dee33c 100644 --- a/node/nodecfg/defaults.go +++ b/node/nodecfg/defaults.go @@ -25,13 +25,13 @@ import ( ) const ( - DefaultHTTPHost = "localhost" // Default host interface for the HTTP RPC server - DefaultHTTPPort = 8545 // Default TCP port for the HTTP RPC server - DefaultEngineHTTPPort = 8551 // Default TCP port for the engineApi HTTP RPC server - DefaultWSHost = "localhost" // Default host interface for the websocket RPC server - DefaultWSPort = 8546 // Default TCP port for the websocket RPC server - DefaultGRPCHost = "localhost" // Default host interface for the GRPC server - DefaultGRPCPort = 8547 // Default TCP port for the GRPC server + DefaultHTTPHost = "localhost" // Default host interface for the HTTP RPC server + DefaultHTTPPort = 8545 // Default TCP port for the HTTP RPC server + DefaultAuthRpcPort = 8551 // Default TCP port for the Engine API HTTP RPC server + DefaultWSHost = "localhost" // Default host interface for the websocket RPC server + DefaultWSPort = 8546 // Default TCP port for the websocket RPC server + DefaultGRPCHost = "localhost" // Default host interface for the GRPC server + DefaultGRPCPort = 8547 // Default TCP port for the GRPC server ) // DefaultConfig contains reasonable default settings. diff --git a/node/rpcstack.go b/node/rpcstack.go index e2ca438e382..86240416d51 100644 --- a/node/rpcstack.go +++ b/node/rpcstack.go @@ -125,7 +125,7 @@ func (h *httpServer) start() error { } // Initialize the server. - h.server = &http.Server{Handler: h} + h.server = &http.Server{Handler: h} // nolint if h.timeouts != (rpccfg.HTTPTimeouts{}) { CheckTimeouts(&h.timeouts) h.server.ReadTimeout = h.timeouts.ReadTimeout @@ -311,18 +311,6 @@ func (h *httpServer) enableWS(apis []rpc.API, config wsConfig, allowList rpc.All return nil } -// stopWS disables JSON-RPC over WebSocket and also stops the server if it only serves WebSocket. -func (h *httpServer) stopWS() { - h.mu.Lock() - defer h.mu.Unlock() - - if h.disableWS() { - if !h.rpcAllowed() { - h.doStop() - } - } -} - // disableWS disables the WebSocket handler. This is internal, the caller must hold h.mu. func (h *httpServer) disableWS() bool { ws := h.wsHandler.Load().(*rpcHandler) diff --git a/node/rpcstack_test.go b/node/rpcstack_test.go index 7ce9be3b76e..c07779c03b7 100644 --- a/node/rpcstack_test.go +++ b/node/rpcstack_test.go @@ -324,7 +324,7 @@ func rpcRequest(t *testing.T, url string, extraHeaders ...string) *http.Response } for i := 0; i < len(extraHeaders); i += 2 { key, value := extraHeaders[i], extraHeaders[i+1] - if strings.ToLower(key) == "host" { + if strings.EqualFold(key, "host") { req.Host = value } else { req.Header.Set(key, value) diff --git a/p2p/dial.go b/p2p/dial.go index d1ce4d57d6d..cadb821d5ef 100644 --- a/p2p/dial.go +++ b/p2p/dial.go @@ -86,13 +86,12 @@ var ( // dialer creates outbound connections and submits them into Server. // Two types of peer connections can be created: // -// - static dials are pre-configured connections. The dialer attempts -// keep these nodes connected at all times. -// -// - dynamic dials are created from node discovery results. The dialer -// continuously reads candidate nodes from its input iterator and attempts -// to create peer connections to nodes arriving through the iterator. +// - static dials are pre-configured connections. The dialer attempts +// keep these nodes connected at all times. // +// - dynamic dials are created from node discovery results. The dialer +// continuously reads candidate nodes from its input iterator and attempts +// to create peer connections to nodes arriving through the iterator. type dialScheduler struct { dialConfig setupFunc dialSetupFunc @@ -158,7 +157,7 @@ func (cfg dialConfig) withDefaults() dialConfig { panic(err) } seed := int64(binary.BigEndian.Uint64(seedb)) - cfg.rand = mrand.New(mrand.NewSource(seed)) + cfg.rand = mrand.New(mrand.NewSource(seed)) // nolint: gosec } return cfg } @@ -336,7 +335,7 @@ func (d *dialScheduler) readNodes(it enode.Iterator) { } // or comes back online. -//nolint +// nolint func (d *dialScheduler) logStats() { now := d.clock.Now() if d.lastStatsLog.Add(dialStatsLogInterval) > now { diff --git a/p2p/discover/table.go b/p2p/discover/table.go index 77e36a4c8cc..638159ec885 100644 --- a/p2p/discover/table.go +++ b/p2p/discover/table.go @@ -116,7 +116,7 @@ func newTable( initDone: make(chan struct{}), closeReq: make(chan struct{}), closed: make(chan struct{}), - rand: mrand.New(mrand.NewSource(0)), + rand: mrand.New(mrand.NewSource(0)), // nolint: gosec ips: netutil.DistinctNetSet{Subnet: tableSubnet, Limit: tableIPLimit}, revalidateInterval: revalidateInterval, @@ -159,7 +159,7 @@ func (tab *Table) ReadRandomNodes(buf []*enode.Node) (n int) { tab.mutex.Lock() defer tab.mutex.Unlock() - var nodes []*enode.Node + nodes := make([]*enode.Node, 0, len(&tab.buckets)) for _, b := range &tab.buckets { for _, n := range b.entries { nodes = append(nodes, unwrapNode(n)) diff --git a/p2p/discover/v5_udp.go b/p2p/discover/v5_udp.go index f27a075dc7d..1a88744d581 100644 --- a/p2p/discover/v5_udp.go +++ b/p2p/discover/v5_udp.go @@ -756,6 +756,7 @@ func (t *UDPv5) handleWhoareyou(p *v5wire.Whoareyou, fromID enode.ID, fromAddr * } // matchWithCall checks whether a handshake attempt matches the active call. +// //nolint:unparam func (t *UDPv5) matchWithCall(fromID enode.ID, nonce v5wire.Nonce) (*callV5, error) { c := t.activeCallByAuth[nonce] @@ -789,7 +790,7 @@ func (t *UDPv5) handleFindnode(p *v5wire.Findnode, fromID enode.ID, fromAddr *ne // collectTableNodes creates a FINDNODE result set for the given distances. func (t *UDPv5) collectTableNodes(rip net.IP, distances []uint, limit int) []*enode.Node { - var nodes []*enode.Node + nodes := make([]*enode.Node, 0, len(distances)) var processed = make(map[uint]struct{}) for _, dist := range distances { // Reject duplicate / invalid distances. diff --git a/p2p/discover/v5_udp_integration_test.go b/p2p/discover/v5_udp_integration_test.go index 58d432bbc62..ddb7193f4d4 100644 --- a/p2p/discover/v5_udp_integration_test.go +++ b/p2p/discover/v5_udp_integration_test.go @@ -4,11 +4,12 @@ package discover import ( "context" - "github.com/ledgerwatch/erigon/p2p/discover/v5wire" "net" "runtime" "testing" "time" + + "github.com/ledgerwatch/erigon/p2p/discover/v5wire" ) // This test checks that calls with n replies may take up to n * respTimeout. diff --git a/p2p/discover/v5_udp_test.go b/p2p/discover/v5_udp_test.go index a94f1f91e9c..75340491891 100644 --- a/p2p/discover/v5_udp_test.go +++ b/p2p/discover/v5_udp_test.go @@ -172,7 +172,7 @@ func TestUDPv5_findnodeHandling(t *testing.T) { // This request gets all the distance-249 nodes and some more at 248 because // the bucket at 249 is not full. test.packetIn(&v5wire.Findnode{ReqID: []byte{5}, Distances: []uint{249, 248}}) - var nodes []*enode.Node + nodes := make([]*enode.Node, 0, len(nodes249)+len(nodes248[:10])) nodes = append(nodes, nodes249...) nodes = append(nodes, nodes248[:10]...) test.expectNodes([]byte{5}, 5, nodes) diff --git a/p2p/discover/v5wire/encoding.go b/p2p/discover/v5wire/encoding.go index 5e983d0b9a3..3b8023022c0 100644 --- a/p2p/discover/v5wire/encoding.go +++ b/p2p/discover/v5wire/encoding.go @@ -232,6 +232,7 @@ func (c *Codec) writeHeaders(head *Header) { } // makeHeader creates a packet header. +// //nolint:unparam func (c *Codec) makeHeader(toID enode.ID, flag byte, authsizeExtra int) Header { var authsize int @@ -279,6 +280,7 @@ func (c *Codec) encodeRandom(toID enode.ID) (Header, []byte, error) { } // encodeWhoareyou encodes a WHOAREYOU packet. +// //nolint:unparam func (c *Codec) encodeWhoareyou(toID enode.ID, packet *Whoareyou) (Header, error) { // Sanity check node field to catch misbehaving callers. @@ -340,6 +342,7 @@ func (c *Codec) encodeHandshakeHeader(toID enode.ID, addr string, challenge *Who } // encodeAuthHeader creates the auth header on a request packet following WHOAREYOU. +// //nolint:unparam func (c *Codec) makeHandshakeAuth(toID enode.ID, addr string, challenge *Whoareyou) (*handshakeAuthData, *session, error) { auth := new(handshakeAuthData) diff --git a/p2p/discover/v5wire/encoding_test.go b/p2p/discover/v5wire/encoding_test.go index 0fcc1bc86cb..b143c4e228b 100644 --- a/p2p/discover/v5wire/encoding_test.go +++ b/p2p/discover/v5wire/encoding_test.go @@ -39,8 +39,7 @@ import ( // To regenerate discv5 test vectors, run // -// go test -run TestVectors -write-test-vectors -// +// go test -run TestVectors -write-test-vectors var writeTestVectorsFlag = flag.Bool("write-test-vectors", false, "Overwrite discv5 test vectors in testdata/") var ( diff --git a/p2p/dnsdisc/tree.go b/p2p/dnsdisc/tree.go index 1da6858f1e4..63f2207bd22 100644 --- a/p2p/dnsdisc/tree.go +++ b/p2p/dnsdisc/tree.go @@ -104,7 +104,7 @@ func (t *Tree) Links() []string { // Nodes returns all nodes contained in the tree. func (t *Tree) Nodes() []*enode.Node { - var nodes []*enode.Node + nodes := make([]*enode.Node, 0, len(t.entries)) for _, e := range t.entries { if ee, ok := e.(*enrEntry); ok { nodes = append(nodes, ee.node) diff --git a/p2p/enode/nodedb.go b/p2p/enode/nodedb.go index 61c3836e094..58fb1548ff6 100644 --- a/p2p/enode/nodedb.go +++ b/p2p/enode/nodedb.go @@ -101,7 +101,7 @@ func bucketsConfig(_ kv.TableCfg) kv.TableCfg { func newMemoryDB(logger log.Logger) (*DB, error) { db := &DB{quit: make(chan struct{})} var err error - db.kv, err = mdbx.NewMDBX(logger).InMem().Label(kv.SentryDB).WithTablessCfg(bucketsConfig).Open() + db.kv, err = mdbx.NewMDBX(logger).InMem().Label(kv.SentryDB).WithTableCfg(bucketsConfig).Open() if err != nil { return nil, err } @@ -116,8 +116,9 @@ func newPersistentDB(logger log.Logger, path string) (*DB, error) { db, err = mdbx.NewMDBX(logger). Path(path). Label(kv.SentryDB). - WithTablessCfg(bucketsConfig). + WithTableCfg(bucketsConfig). MapSize(1024 * datasize.MB). + GrowthStep(16 * datasize.MB). Flags(func(f uint) uint { return f ^ mdbx1.Durable | mdbx1.SafeNoSync }). SyncPeriod(2 * time.Second). Open() diff --git a/p2p/enode/urlv4.go b/p2p/enode/urlv4.go index 724806e7ca7..6145178bc7f 100644 --- a/p2p/enode/urlv4.go +++ b/p2p/enode/urlv4.go @@ -53,8 +53,8 @@ func MustParseV4(rawurl string) *Node { // // For incomplete nodes, the designator must look like one of these // -// enode:// -// +// enode:// +// // // For complete nodes, the node ID is encoded in the username portion // of the URL, separated from the host by an @ sign. The hostname can @@ -67,7 +67,7 @@ func MustParseV4(rawurl string) *Node { // a node with IP address 10.3.58.6, TCP listening port 30303 // and UDP discovery port 30301. // -// enode://@10.3.58.6:30303?discport=30301 +// enode://@10.3.58.6:30303?discport=30301 func ParseV4(rawurl string) (*Node, error) { if m := incompleteNodeURL.FindStringSubmatch(rawurl); m != nil { id, err := parsePubkey(m[1]) diff --git a/p2p/enr/enr.go b/p2p/enr/enr.go index 10f2f79c528..f28b233f217 100644 --- a/p2p/enr/enr.go +++ b/p2p/enr/enr.go @@ -19,7 +19,7 @@ // stored in key/value pairs. To store and retrieve key/values in a record, use the Entry // interface. // -// Signature Handling +// # Signature Handling // // Records must be signed before transmitting them to another node. // diff --git a/p2p/message.go b/p2p/message.go index b277ce39dac..ed1067c9a43 100644 --- a/p2p/message.go +++ b/p2p/message.go @@ -111,12 +111,11 @@ func Send(w MsgWriter, msgcode uint64, data interface{}) error { // SendItems writes an RLP with the given code and data elements. // For a call such as: // -// SendItems(w, code, e1, e2, e3) +// SendItems(w, code, e1, e2, e3) // // the message payload will be an RLP list containing the items: // -// [e1, e2, e3] -// +// [e1, e2, e3] func SendItems(w MsgWriter, msgcode uint64, elems ...interface{}) error { defer debug.LogPanic() return Send(w, msgcode, elems) diff --git a/p2p/nat/nat.go b/p2p/nat/nat.go index 05aaacdb209..9e328ecf672 100644 --- a/p2p/nat/nat.go +++ b/p2p/nat/nat.go @@ -55,14 +55,14 @@ type Interface interface { // The following formats are currently accepted. // Note that mechanism names are not case-sensitive. // -// "" or "none" return nil -// "extip:77.12.33.4" will assume the local machine is reachable on the given IP -// "any" uses the first auto-detected mechanism -// "upnp" uses the Universal Plug and Play protocol -// "pmp" uses NAT-PMP with an auto-detected gateway address -// "pmp:192.168.0.1" uses NAT-PMP with the given gateway address -// "stun" uses STUN to detect an external IP using a default server -// "stun:" uses STUN to detect an external IP using the given server (host:port) +// "" or "none" return nil +// "extip:77.12.33.4" will assume the local machine is reachable on the given IP +// "any" uses the first auto-detected mechanism +// "upnp" uses the Universal Plug and Play protocol +// "pmp" uses NAT-PMP with an auto-detected gateway address +// "pmp:192.168.0.1" uses NAT-PMP with the given gateway address +// "stun" uses STUN to detect an external IP using a default server +// "stun:" uses STUN to detect an external IP using the given server (host:port) func Parse(spec string) (Interface, error) { var ( parts = strings.SplitN(spec, ":", 2) diff --git a/p2p/nat/nat_stun.go b/p2p/nat/nat_stun.go index 03e4a5e5ccb..321eca17606 100644 --- a/p2p/nat/nat_stun.go +++ b/p2p/nat/nat_stun.go @@ -2,9 +2,10 @@ package nat import ( "fmt" - "github.com/pion/stun" "net" "time" + + "github.com/pion/stun" ) const STUNDefaultServerAddr = "stun.l.google.com:19302" diff --git a/p2p/nat/natupnp_test.go b/p2p/nat/natupnp_test.go index e468f910ed5..7788aacace4 100644 --- a/p2p/nat/natupnp_test.go +++ b/p2p/nat/natupnp_test.go @@ -220,7 +220,7 @@ func (dev *fakeIGD) ServeHTTP(w http.ResponseWriter, r *http.Request) { } func (dev *fakeIGD) replaceListenAddr(resp string) string { - return strings.Replace(resp, "{{listenAddr}}", dev.listener.Addr().String(), -1) + return strings.ReplaceAll(resp, "{{listenAddr}}", dev.listener.Addr().String()) } func (dev *fakeIGD) listen() (err error) { diff --git a/p2p/node_key_config.go b/p2p/node_key_config.go index dd46aee8f7d..ad32a58ef7d 100644 --- a/p2p/node_key_config.go +++ b/p2p/node_key_config.go @@ -3,9 +3,10 @@ package p2p import ( "crypto/ecdsa" "fmt" - "github.com/ledgerwatch/erigon/crypto" "os" "path" + + "github.com/ledgerwatch/erigon/crypto" ) type NodeKeyConfig struct { diff --git a/p2p/peer.go b/p2p/peer.go index 93222940e26..abc67526cc7 100644 --- a/p2p/peer.go +++ b/p2p/peer.go @@ -490,7 +490,7 @@ type PeerInfo struct { // Info gathers and returns a collection of metadata known about a peer. func (p *Peer) Info() *PeerInfo { // Gather the protocol capabilities - var caps []string + caps := make([]string, 0, len(p.Caps())) for _, cap := range p.Caps() { caps = append(caps, cap.String()) } diff --git a/p2p/rlpx/rlpx.go b/p2p/rlpx/rlpx.go index ae253e1a785..a094bb8df79 100644 --- a/p2p/rlpx/rlpx.go +++ b/p2p/rlpx/rlpx.go @@ -544,7 +544,7 @@ func (h *handshakeState) runInitiator(conn io.ReadWriter, prv *ecdsa.PrivateKey, func (h *handshakeState) makeAuthMsg(prv *ecdsa.PrivateKey) (*authMsgV4, error) { // Generate random initiator nonce. h.initNonce = make([]byte, shaLen) - _, err := rand.Read(h.initNonce) + _, err := rand.Read(h.initNonce) // nolint: gosec if err != nil { return nil, err } @@ -582,7 +582,7 @@ func (h *handshakeState) handleAuthResp(msg *authRespV4) (err error) { func (h *handshakeState) makeAuthResp() (msg *authRespV4, err error) { // Generate random nonce. h.respNonce = make([]byte, shaLen) - if _, err = rand.Read(h.respNonce); err != nil { + if _, err = rand.Read(h.respNonce); err != nil { // nolint: gosec return nil, err } diff --git a/p2p/simulations/adapters/types.go b/p2p/simulations/adapters/types.go index d6b59c22d77..af275e91a7a 100644 --- a/p2p/simulations/adapters/types.go +++ b/p2p/simulations/adapters/types.go @@ -41,7 +41,6 @@ import ( // * SimNode - An in-memory node // * ExecNode - A child process node // * DockerNode - A Docker container node -// type Node interface { // Addr returns the node's address (e.g. an Enode URL) Addr() []byte @@ -286,30 +285,3 @@ func RegisterLifecycles(lifecycles LifecycleConstructors) { lifecycleConstructorFuncs[name] = f } } - -// adds the host part to the configuration's ENR, signs it -// creates and the corresponding enode object to the configuration -func (n *NodeConfig) initEnode(ip net.IP, tcpport int, udpport int) error { - enrIp := enr.IP(ip) - n.Record.Set(&enrIp) - enrTcpPort := enr.TCP(tcpport) - n.Record.Set(&enrTcpPort) - enrUdpPort := enr.UDP(udpport) - n.Record.Set(&enrUdpPort) - - err := enode.SignV4(&n.Record, n.PrivateKey) - if err != nil { - return fmt.Errorf("unable to generate ENR: %v", err) - } - nod, err := enode.New(enode.V4ID{}, &n.Record) - if err != nil { - return fmt.Errorf("unable to create enode: %v", err) - } - log.Trace("simnode new", "record", n.Record) - n.node = nod - return nil -} - -func (n *NodeConfig) initDummyEnode() error { - return n.initEnode(net.IPv4(127, 0, 0, 1), int(n.Port), 0) -} diff --git a/p2p/simulations/mocker.go b/p2p/simulations/mocker.go index 9c3f76d2e28..5e97094a5ef 100644 --- a/p2p/simulations/mocker.go +++ b/p2p/simulations/mocker.go @@ -29,20 +29,20 @@ import ( "github.com/ledgerwatch/log/v3" ) -//a map of mocker names to its function +// a map of mocker names to its function var mockerList = map[string]func(net *Network, quit chan struct{}, nodeCount int){ "startStop": startStop, "probabilistic": probabilistic, "boot": boot, } -//Lookup a mocker by its name, returns the mockerFn +// Lookup a mocker by its name, returns the mockerFn func LookupMocker(mockerType string) func(net *Network, quit chan struct{}, nodeCount int) { return mockerList[mockerType] } -//Get a list of mockers (keys of the map) -//Useful for frontend to build available mocker selection +// Get a list of mockers (keys of the map) +// Useful for frontend to build available mocker selection func GetMockerList() []string { list := make([]string, 0, len(mockerList)) for k := range mockerList { @@ -51,7 +51,7 @@ func GetMockerList() []string { return list } -//The boot mockerFn only connects the node in a ring and doesn't do anything else +// The boot mockerFn only connects the node in a ring and doesn't do anything else func boot(net *Network, quit chan struct{}, nodeCount int) { _, err := connectNodesInRing(net, nodeCount) if err != nil { @@ -59,7 +59,7 @@ func boot(net *Network, quit chan struct{}, nodeCount int) { } } -//The startStop mockerFn stops and starts nodes in a defined period (ticker) +// The startStop mockerFn stops and starts nodes in a defined period (ticker) func startStop(net *Network, quit chan struct{}, nodeCount int) { nodes, err := connectNodesInRing(net, nodeCount) if err != nil { @@ -96,10 +96,10 @@ func startStop(net *Network, quit chan struct{}, nodeCount int) { } } -//The probabilistic mocker func has a more probabilistic pattern -//(the implementation could probably be improved): -//nodes are connected in a ring, then a varying number of random nodes is selected, -//mocker then stops and starts them in random intervals, and continues the loop +// The probabilistic mocker func has a more probabilistic pattern +// (the implementation could probably be improved): +// nodes are connected in a ring, then a varying number of random nodes is selected, +// mocker then stops and starts them in random intervals, and continues the loop func probabilistic(net *Network, quit chan struct{}, nodeCount int) { nodes, err := connectNodesInRing(net, nodeCount) if err != nil { @@ -168,7 +168,7 @@ func probabilistic(net *Network, quit chan struct{}, nodeCount int) { } -//connect nodeCount number of nodes in a ring +// connect nodeCount number of nodes in a ring func connectNodesInRing(net *Network, nodeCount int) ([]enode.ID, error) { ids := make([]enode.ID, nodeCount) for i := 0; i < nodeCount; i++ { diff --git a/params/bootnodes.go b/params/bootnodes.go index 66614bf8801..382360325d3 100644 --- a/params/bootnodes.go +++ b/params/bootnodes.go @@ -76,13 +76,6 @@ var GoerliBootnodes = []string{ "enode://d2b720352e8216c9efc470091aa91ddafc53e222b32780f505c817ceef69e01d5b0b0797b69db254c586f493872352f5a022b4d8479a00fc92ec55f9ad46a27e@88.99.70.182:30303", } -var KilnDevNetBootNodes = []string{ - "enode://c354db99124f0faf677ff0e75c3cbbd568b2febc186af664e0c51ac435609badedc67a18a63adb64dacc1780a28dcefebfc29b83fd1a3f4aa3c0eb161364cf94@164.92.130.5:30303", - "enode://d41af1662434cad0a88fe3c7c92375ec5719f4516ab6d8cb9695e0e2e815382c767038e72c224e04040885157da47422f756c040a9072676c6e35c5b1a383cce@138.68.66.103:30303", - "enode://91a745c3fb069f6b99cad10b75c463d527711b106b622756e9ef9f12d2631b6cb885f831d1c8731b9bc7177cae5e1ea1f1be087f86d7d30b590a91f22bc041b0@165.232.180.230:30303", - "enode://b74bd2e8a9f0c53f0c93bcce80818f2f19439fd807af5c7fbc3efb10130c6ee08be8f3aaec7dc0a057ad7b2a809c8f34dc62431e9b6954b07a6548cc59867884@164.92.140.200:30303", -} - var BscBootnodes = []string{ "enode://1cc4534b14cfe351ab740a1418ab944a234ca2f702915eadb7e558a02010cb7c5a8c295a3b56bcefa7701c07752acd5539cb13df2aab8ae2d98934d712611443@52.71.43.172:30311", "enode://28b1d16562dac280dacaaf45d54516b85bc6c994252a9825c5cc4e080d3e53446d05f63ba495ea7d44d6c316b54cd92b245c5c328c37da24605c4a93a0d099c4@34.246.65.14:30311", @@ -254,8 +247,6 @@ func BootnodeURLsOfChain(chain string) []string { return RinkebyBootnodes case networkname.GoerliChainName: return GoerliBootnodes - case networkname.KilnDevnetChainName: - return KilnDevNetBootNodes case networkname.BSCChainName: return BscBootnodes case networkname.ChapelChainName: diff --git a/params/chainspecs/bor-devnet.json b/params/chainspecs/bor-devnet.json index bdf9eb7ba21..005fb7d73f2 100644 --- a/params/chainspecs/bor-devnet.json +++ b/params/chainspecs/bor-devnet.json @@ -1,43 +1,41 @@ { - "ChainName": "bor-devnet", - "chainId": 1337, - "consensus": "bor", - "homesteadBlock": 0, - "daoForkSupport": true, - "eip150Block": 0, - "eip150Hash": "0x0000000000000000000000000000000000000000000000000000000000000000", - "eip155Block": 0, - "byzantiumBlock": 0, - "constantinopleBlock": 0, - "petersburgBlock": 0, - "istanbulBlock": 0, - "muirGlacierBlock": 0, - "berlinBlock": 0, - "londonBlock": 0, - "terminalBlockHash": "0x0000000000000000000000000000000000000000000000000000000000000000", - "bor": { - "period": { - "0": 5 - }, - "producerDelay": 6, - "sprint": 64, - "backupMultiplier": { - "0": 5 - }, - "validatorContract": "0x0000000000000000000000000000000000001000", - "stateReceiverContract": "0x0000000000000000000000000000000000001001", - "blockAlloc": { - "22156660": { - "0000000000000000000000000000000000001010": { - "balance": "0x0", - "code": "0x60806040526004361061019c5760003560e01c806377d32e94116100ec578063acd06cb31161008a578063e306f77911610064578063e306f77914610a7b578063e614d0d614610aa6578063f2fde38b14610ad1578063fc0c546a14610b225761019c565b8063acd06cb31461097a578063b789543c146109cd578063cc79f97b14610a505761019c565b80639025e64c116100c65780639025e64c146107c957806395d89b4114610859578063a9059cbb146108e9578063abceeba21461094f5761019c565b806377d32e94146106315780638da5cb5b146107435780638f32d59b1461079a5761019c565b806347e7ef24116101595780637019d41a116101335780637019d41a1461053357806370a082311461058a578063715018a6146105ef578063771282f6146106065761019c565b806347e7ef2414610410578063485cc9551461046b57806360f96a8f146104dc5761019c565b806306fdde03146101a15780631499c5921461023157806318160ddd1461028257806319d27d9c146102ad5780632e1a7d4d146103b1578063313ce567146103df575b600080fd5b3480156101ad57600080fd5b506101b6610b79565b6040518080602001828103825283818151815260200191508051906020019080838360005b838110156101f65780820151818401526020810190506101db565b50505050905090810190601f1680156102235780820380516001836020036101000a031916815260200191505b509250505060405180910390f35b34801561023d57600080fd5b506102806004803603602081101561025457600080fd5b81019080803573ffffffffffffffffffffffffffffffffffffffff169060200190929190505050610bb6565b005b34801561028e57600080fd5b50610297610c24565b6040518082815260200191505060405180910390f35b3480156102b957600080fd5b5061036f600480360360a08110156102d057600080fd5b81019080803590602001906401000000008111156102ed57600080fd5b8201836020820111156102ff57600080fd5b8035906020019184600183028401116401000000008311171561032157600080fd5b9091929391929390803590602001909291908035906020019092919080359060200190929190803573ffffffffffffffffffffffffffffffffffffffff169060200190929190505050610c3a565b604051808273ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16815260200191505060405180910390f35b6103dd600480360360208110156103c757600080fd5b8101908080359060200190929190505050610caa565b005b3480156103eb57600080fd5b506103f4610dfc565b604051808260ff1660ff16815260200191505060405180910390f35b34801561041c57600080fd5b506104696004803603604081101561043357600080fd5b81019080803573ffffffffffffffffffffffffffffffffffffffff16906020019092919080359060200190929190505050610e05565b005b34801561047757600080fd5b506104da6004803603604081101561048e57600080fd5b81019080803573ffffffffffffffffffffffffffffffffffffffff169060200190929190803573ffffffffffffffffffffffffffffffffffffffff169060200190929190505050610fc1565b005b3480156104e857600080fd5b506104f1611090565b604051808273ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16815260200191505060405180910390f35b34801561053f57600080fd5b506105486110b6565b604051808273ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16815260200191505060405180910390f35b34801561059657600080fd5b506105d9600480360360208110156105ad57600080fd5b81019080803573ffffffffffffffffffffffffffffffffffffffff1690602001909291905050506110dc565b6040518082815260200191505060405180910390f35b3480156105fb57600080fd5b506106046110fd565b005b34801561061257600080fd5b5061061b6111cd565b6040518082815260200191505060405180910390f35b34801561063d57600080fd5b506107016004803603604081101561065457600080fd5b81019080803590602001909291908035906020019064010000000081111561067b57600080fd5b82018360208201111561068d57600080fd5b803590602001918460018302840111640100000000831117156106af57600080fd5b91908080601f016020809104026020016040519081016040528093929190818152602001838380828437600081840152601f19601f8201169050808301925050505050505091929192905050506111d3565b604051808273ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16815260200191505060405180910390f35b34801561074f57600080fd5b50610758611358565b604051808273ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16815260200191505060405180910390f35b3480156107a657600080fd5b506107af611381565b604051808215151515815260200191505060405180910390f35b3480156107d557600080fd5b506107de6113d8565b6040518080602001828103825283818151815260200191508051906020019080838360005b8381101561081e578082015181840152602081019050610803565b50505050905090810190601f16801561084b5780820380516001836020036101000a031916815260200191505b509250505060405180910390f35b34801561086557600080fd5b5061086e611411565b6040518080602001828103825283818151815260200191508051906020019080838360005b838110156108ae578082015181840152602081019050610893565b50505050905090810190601f1680156108db5780820380516001836020036101000a031916815260200191505b509250505060405180910390f35b610935600480360360408110156108ff57600080fd5b81019080803573ffffffffffffffffffffffffffffffffffffffff1690602001909291908035906020019092919050505061144e565b604051808215151515815260200191505060405180910390f35b34801561095b57600080fd5b50610964611474565b6040518082815260200191505060405180910390f35b34801561098657600080fd5b506109b36004803603602081101561099d57600080fd5b8101908080359060200190929190505050611501565b604051808215151515815260200191505060405180910390f35b3480156109d957600080fd5b50610a3a600480360360808110156109f057600080fd5b81019080803573ffffffffffffffffffffffffffffffffffffffff169060200190929190803590602001909291908035906020019092919080359060200190929190505050611521565b6040518082815260200191505060405180910390f35b348015610a5c57600080fd5b50610a65611541565b6040518082815260200191505060405180910390f35b348015610a8757600080fd5b50610a90611546565b6040518082815260200191505060405180910390f35b348015610ab257600080fd5b50610abb61154c565b6040518082815260200191505060405180910390f35b348015610add57600080fd5b50610b2060048036036020811015610af457600080fd5b81019080803573ffffffffffffffffffffffffffffffffffffffff1690602001909291905050506115d9565b005b348015610b2e57600080fd5b50610b376115f6565b604051808273ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16815260200191505060405180910390f35b60606040518060400160405280600b81526020017f4d6174696320546f6b656e000000000000000000000000000000000000000000815250905090565b6040517f08c379a00000000000000000000000000000000000000000000000000000000081526004018080602001828103825260108152602001807f44697361626c656420666561747572650000000000000000000000000000000081525060200191505060405180910390fd5b6000601260ff16600a0a6402540be40002905090565b60006040517f08c379a00000000000000000000000000000000000000000000000000000000081526004018080602001828103825260108152602001807f44697361626c656420666561747572650000000000000000000000000000000081525060200191505060405180910390fd5b60003390506000610cba826110dc565b9050610cd18360065461161c90919063ffffffff16565b600681905550600083118015610ce657508234145b610d58576040517f08c379a00000000000000000000000000000000000000000000000000000000081526004018080602001828103825260138152602001807f496e73756666696369656e7420616d6f756e740000000000000000000000000081525060200191505060405180910390fd5b8173ffffffffffffffffffffffffffffffffffffffff16600260009054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff167febff2602b3f468259e1e99f613fed6691f3a6526effe6ef3e768ba7ae7a36c4f8584610dd4876110dc565b60405180848152602001838152602001828152602001935050505060405180910390a3505050565b60006012905090565b610e0d611381565b610e1657600080fd5b600081118015610e535750600073ffffffffffffffffffffffffffffffffffffffff168273ffffffffffffffffffffffffffffffffffffffff1614155b610ea8576040517f08c379a0000000000000000000000000000000000000000000000000000000008152600401808060200182810382526023815260200180611da76023913960400191505060405180910390fd5b6000610eb3836110dc565b905060008390508073ffffffffffffffffffffffffffffffffffffffff166108fc849081150290604051600060405180830381858888f19350505050158015610f00573d6000803e3d6000fd5b50610f168360065461163c90919063ffffffff16565b6006819055508373ffffffffffffffffffffffffffffffffffffffff16600260009054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff167f4e2ca0515ed1aef1395f66b5303bb5d6f1bf9d61a353fa53f73f8ac9973fa9f68585610f98896110dc565b60405180848152602001838152602001828152602001935050505060405180910390a350505050565b600760009054906101000a900460ff1615611027576040517f08c379a0000000000000000000000000000000000000000000000000000000008152600401808060200182810382526023815260200180611d846023913960400191505060405180910390fd5b6001600760006101000a81548160ff02191690831515021790555080600260006101000a81548173ffffffffffffffffffffffffffffffffffffffff021916908373ffffffffffffffffffffffffffffffffffffffff16021790555061108c8261165b565b5050565b600360009054906101000a900473ffffffffffffffffffffffffffffffffffffffff1681565b600460009054906101000a900473ffffffffffffffffffffffffffffffffffffffff1681565b60008173ffffffffffffffffffffffffffffffffffffffff16319050919050565b611105611381565b61110e57600080fd5b600073ffffffffffffffffffffffffffffffffffffffff166000809054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff167f8be0079c531659141344cd1fd0a4f28419497f9722a3daafe3b4186f6b6457e060405160405180910390a360008060006101000a81548173ffffffffffffffffffffffffffffffffffffffff021916908373ffffffffffffffffffffffffffffffffffffffff160217905550565b60065481565b60008060008060418551146111ee5760009350505050611352565b602085015192506040850151915060ff6041860151169050601b8160ff16101561121957601b810190505b601b8160ff16141580156112315750601c8160ff1614155b156112425760009350505050611352565b60018682858560405160008152602001604052604051808581526020018460ff1660ff1681526020018381526020018281526020019450505050506020604051602081039080840390855afa15801561129f573d6000803e3d6000fd5b505050602060405103519350600073ffffffffffffffffffffffffffffffffffffffff168473ffffffffffffffffffffffffffffffffffffffff16141561134e576040517f08c379a00000000000000000000000000000000000000000000000000000000081526004018080602001828103825260128152602001807f4572726f7220696e2065637265636f766572000000000000000000000000000081525060200191505060405180910390fd5b5050505b92915050565b60008060009054906101000a900473ffffffffffffffffffffffffffffffffffffffff16905090565b60008060009054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff163373ffffffffffffffffffffffffffffffffffffffff1614905090565b6040518060400160405280600181526020017f890000000000000000000000000000000000000000000000000000000000000081525081565b60606040518060400160405280600581526020017f4d41544943000000000000000000000000000000000000000000000000000000815250905090565b6000813414611460576000905061146e565b61146b338484611753565b90505b92915050565b6040518060800160405280605b8152602001611e1c605b91396040516020018082805190602001908083835b602083106114c357805182526020820191506020810190506020830392506114a0565b6001836020036101000a0380198251168184511680821785525050505050509050019150506040516020818303038152906040528051906020012081565b60056020528060005260406000206000915054906101000a900460ff1681565b600061153761153286868686611b10565b611be6565b9050949350505050565b608981565b60015481565b604051806080016040528060528152602001611dca605291396040516020018082805190602001908083835b6020831061159b5780518252602082019150602081019050602083039250611578565b6001836020036101000a0380198251168184511680821785525050505050509050019150506040516020818303038152906040528051906020012081565b6115e1611381565b6115ea57600080fd5b6115f38161165b565b50565b600260009054906101000a900473ffffffffffffffffffffffffffffffffffffffff1681565b60008282111561162b57600080fd5b600082840390508091505092915050565b60008082840190508381101561165157600080fd5b8091505092915050565b600073ffffffffffffffffffffffffffffffffffffffff168173ffffffffffffffffffffffffffffffffffffffff16141561169557600080fd5b8073ffffffffffffffffffffffffffffffffffffffff166000809054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff167f8be0079c531659141344cd1fd0a4f28419497f9722a3daafe3b4186f6b6457e060405160405180910390a3806000806101000a81548173ffffffffffffffffffffffffffffffffffffffff021916908373ffffffffffffffffffffffffffffffffffffffff16021790555050565b6000803073ffffffffffffffffffffffffffffffffffffffff166370a08231866040518263ffffffff1660e01b8152600401808273ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16815260200191505060206040518083038186803b1580156117d357600080fd5b505afa1580156117e7573d6000803e3d6000fd5b505050506040513d60208110156117fd57600080fd5b8101908080519060200190929190505050905060003073ffffffffffffffffffffffffffffffffffffffff166370a08231866040518263ffffffff1660e01b8152600401808273ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16815260200191505060206040518083038186803b15801561188f57600080fd5b505afa1580156118a3573d6000803e3d6000fd5b505050506040513d60208110156118b957600080fd5b810190808051906020019092919050505090506118d7868686611c30565b8473ffffffffffffffffffffffffffffffffffffffff168673ffffffffffffffffffffffffffffffffffffffff16600260009054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff167fe6497e3ee548a3372136af2fcb0696db31fc6cf20260707645068bd3fe97f3c48786863073ffffffffffffffffffffffffffffffffffffffff166370a082318e6040518263ffffffff1660e01b8152600401808273ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16815260200191505060206040518083038186803b1580156119df57600080fd5b505afa1580156119f3573d6000803e3d6000fd5b505050506040513d6020811015611a0957600080fd5b81019080805190602001909291905050503073ffffffffffffffffffffffffffffffffffffffff166370a082318e6040518263ffffffff1660e01b8152600401808273ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16815260200191505060206040518083038186803b158015611a9757600080fd5b505afa158015611aab573d6000803e3d6000fd5b505050506040513d6020811015611ac157600080fd5b8101908080519060200190929190505050604051808681526020018581526020018481526020018381526020018281526020019550505050505060405180910390a46001925050509392505050565b6000806040518060800160405280605b8152602001611e1c605b91396040516020018082805190602001908083835b60208310611b625780518252602082019150602081019050602083039250611b3f565b6001836020036101000a03801982511681845116808217855250505050505090500191505060405160208183030381529060405280519060200120905060405181815273ffffffffffffffffffffffffffffffffffffffff8716602082015285604082015284606082015283608082015260a0812092505081915050949350505050565b60008060015490506040517f190100000000000000000000000000000000000000000000000000000000000081528160028201528360228201526042812092505081915050919050565b3073ffffffffffffffffffffffffffffffffffffffff168273ffffffffffffffffffffffffffffffffffffffff161415611cd2576040517f08c379a00000000000000000000000000000000000000000000000000000000081526004018080602001828103825260138152602001807f63616e27742073656e6420746f204d524332300000000000000000000000000081525060200191505060405180910390fd5b8173ffffffffffffffffffffffffffffffffffffffff166108fc829081150290604051600060405180830381858888f19350505050158015611d18573d6000803e3d6000fd5b508173ffffffffffffffffffffffffffffffffffffffff168373ffffffffffffffffffffffffffffffffffffffff167fddf252ad1be2c89b69c2b068fc378daa952ba7f163c4a11628f55a4df523b3ef836040518082815260200191505060405180910390a350505056fe54686520636f6e747261637420697320616c726561647920696e697469616c697a6564496e73756666696369656e7420616d6f756e74206f7220696e76616c69642075736572454950373132446f6d61696e28737472696e67206e616d652c737472696e672076657273696f6e2c75696e7432353620636861696e49642c6164647265737320766572696679696e67436f6e747261637429546f6b656e5472616e736665724f726465722861646472657373207370656e6465722c75696e7432353620746f6b656e49644f72416d6f756e742c6279746573333220646174612c75696e743235362065787069726174696f6e29a265627a7a72315820a4a6f71a98ac3fc613c3a8f1e2e11b9eb9b6b39f125f7d9508916c2b8fb02c7164736f6c63430005100032" - } + "ChainName": "bor-devnet", + "chainId": 1337, + "consensus": "bor", + "homesteadBlock": 0, + "daoForkSupport": true, + "eip150Block": 0, + "eip150Hash": "0x0000000000000000000000000000000000000000000000000000000000000000", + "eip155Block": 0, + "byzantiumBlock": 0, + "constantinopleBlock": 0, + "petersburgBlock": 0, + "istanbulBlock": 0, + "muirGlacierBlock": 0, + "berlinBlock": 0, + "londonBlock": 0, + "bor": { + "period": { + "0": 5 + }, + "producerDelay": 6, + "sprint": 64, + "backupMultiplier": { + "0": 5 + }, + "validatorContract": "0x0000000000000000000000000000000000001000", + "stateReceiverContract": "0x0000000000000000000000000000000000001001", + "blockAlloc": { + "22156660": { + "0000000000000000000000000000000000001010": { + "balance": "0x0", + "code": "0x60806040526004361061019c5760003560e01c806377d32e94116100ec578063acd06cb31161008a578063e306f77911610064578063e306f77914610a7b578063e614d0d614610aa6578063f2fde38b14610ad1578063fc0c546a14610b225761019c565b8063acd06cb31461097a578063b789543c146109cd578063cc79f97b14610a505761019c565b80639025e64c116100c65780639025e64c146107c957806395d89b4114610859578063a9059cbb146108e9578063abceeba21461094f5761019c565b806377d32e94146106315780638da5cb5b146107435780638f32d59b1461079a5761019c565b806347e7ef24116101595780637019d41a116101335780637019d41a1461053357806370a082311461058a578063715018a6146105ef578063771282f6146106065761019c565b806347e7ef2414610410578063485cc9551461046b57806360f96a8f146104dc5761019c565b806306fdde03146101a15780631499c5921461023157806318160ddd1461028257806319d27d9c146102ad5780632e1a7d4d146103b1578063313ce567146103df575b600080fd5b3480156101ad57600080fd5b506101b6610b79565b6040518080602001828103825283818151815260200191508051906020019080838360005b838110156101f65780820151818401526020810190506101db565b50505050905090810190601f1680156102235780820380516001836020036101000a031916815260200191505b509250505060405180910390f35b34801561023d57600080fd5b506102806004803603602081101561025457600080fd5b81019080803573ffffffffffffffffffffffffffffffffffffffff169060200190929190505050610bb6565b005b34801561028e57600080fd5b50610297610c24565b6040518082815260200191505060405180910390f35b3480156102b957600080fd5b5061036f600480360360a08110156102d057600080fd5b81019080803590602001906401000000008111156102ed57600080fd5b8201836020820111156102ff57600080fd5b8035906020019184600183028401116401000000008311171561032157600080fd5b9091929391929390803590602001909291908035906020019092919080359060200190929190803573ffffffffffffffffffffffffffffffffffffffff169060200190929190505050610c3a565b604051808273ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16815260200191505060405180910390f35b6103dd600480360360208110156103c757600080fd5b8101908080359060200190929190505050610caa565b005b3480156103eb57600080fd5b506103f4610dfc565b604051808260ff1660ff16815260200191505060405180910390f35b34801561041c57600080fd5b506104696004803603604081101561043357600080fd5b81019080803573ffffffffffffffffffffffffffffffffffffffff16906020019092919080359060200190929190505050610e05565b005b34801561047757600080fd5b506104da6004803603604081101561048e57600080fd5b81019080803573ffffffffffffffffffffffffffffffffffffffff169060200190929190803573ffffffffffffffffffffffffffffffffffffffff169060200190929190505050610fc1565b005b3480156104e857600080fd5b506104f1611090565b604051808273ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16815260200191505060405180910390f35b34801561053f57600080fd5b506105486110b6565b604051808273ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16815260200191505060405180910390f35b34801561059657600080fd5b506105d9600480360360208110156105ad57600080fd5b81019080803573ffffffffffffffffffffffffffffffffffffffff1690602001909291905050506110dc565b6040518082815260200191505060405180910390f35b3480156105fb57600080fd5b506106046110fd565b005b34801561061257600080fd5b5061061b6111cd565b6040518082815260200191505060405180910390f35b34801561063d57600080fd5b506107016004803603604081101561065457600080fd5b81019080803590602001909291908035906020019064010000000081111561067b57600080fd5b82018360208201111561068d57600080fd5b803590602001918460018302840111640100000000831117156106af57600080fd5b91908080601f016020809104026020016040519081016040528093929190818152602001838380828437600081840152601f19601f8201169050808301925050505050505091929192905050506111d3565b604051808273ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16815260200191505060405180910390f35b34801561074f57600080fd5b50610758611358565b604051808273ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16815260200191505060405180910390f35b3480156107a657600080fd5b506107af611381565b604051808215151515815260200191505060405180910390f35b3480156107d557600080fd5b506107de6113d8565b6040518080602001828103825283818151815260200191508051906020019080838360005b8381101561081e578082015181840152602081019050610803565b50505050905090810190601f16801561084b5780820380516001836020036101000a031916815260200191505b509250505060405180910390f35b34801561086557600080fd5b5061086e611411565b6040518080602001828103825283818151815260200191508051906020019080838360005b838110156108ae578082015181840152602081019050610893565b50505050905090810190601f1680156108db5780820380516001836020036101000a031916815260200191505b509250505060405180910390f35b610935600480360360408110156108ff57600080fd5b81019080803573ffffffffffffffffffffffffffffffffffffffff1690602001909291908035906020019092919050505061144e565b604051808215151515815260200191505060405180910390f35b34801561095b57600080fd5b50610964611474565b6040518082815260200191505060405180910390f35b34801561098657600080fd5b506109b36004803603602081101561099d57600080fd5b8101908080359060200190929190505050611501565b604051808215151515815260200191505060405180910390f35b3480156109d957600080fd5b50610a3a600480360360808110156109f057600080fd5b81019080803573ffffffffffffffffffffffffffffffffffffffff169060200190929190803590602001909291908035906020019092919080359060200190929190505050611521565b6040518082815260200191505060405180910390f35b348015610a5c57600080fd5b50610a65611541565b6040518082815260200191505060405180910390f35b348015610a8757600080fd5b50610a90611546565b6040518082815260200191505060405180910390f35b348015610ab257600080fd5b50610abb61154c565b6040518082815260200191505060405180910390f35b348015610add57600080fd5b50610b2060048036036020811015610af457600080fd5b81019080803573ffffffffffffffffffffffffffffffffffffffff1690602001909291905050506115d9565b005b348015610b2e57600080fd5b50610b376115f6565b604051808273ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16815260200191505060405180910390f35b60606040518060400160405280600b81526020017f4d6174696320546f6b656e000000000000000000000000000000000000000000815250905090565b6040517f08c379a00000000000000000000000000000000000000000000000000000000081526004018080602001828103825260108152602001807f44697361626c656420666561747572650000000000000000000000000000000081525060200191505060405180910390fd5b6000601260ff16600a0a6402540be40002905090565b60006040517f08c379a00000000000000000000000000000000000000000000000000000000081526004018080602001828103825260108152602001807f44697361626c656420666561747572650000000000000000000000000000000081525060200191505060405180910390fd5b60003390506000610cba826110dc565b9050610cd18360065461161c90919063ffffffff16565b600681905550600083118015610ce657508234145b610d58576040517f08c379a00000000000000000000000000000000000000000000000000000000081526004018080602001828103825260138152602001807f496e73756666696369656e7420616d6f756e740000000000000000000000000081525060200191505060405180910390fd5b8173ffffffffffffffffffffffffffffffffffffffff16600260009054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff167febff2602b3f468259e1e99f613fed6691f3a6526effe6ef3e768ba7ae7a36c4f8584610dd4876110dc565b60405180848152602001838152602001828152602001935050505060405180910390a3505050565b60006012905090565b610e0d611381565b610e1657600080fd5b600081118015610e535750600073ffffffffffffffffffffffffffffffffffffffff168273ffffffffffffffffffffffffffffffffffffffff1614155b610ea8576040517f08c379a0000000000000000000000000000000000000000000000000000000008152600401808060200182810382526023815260200180611da76023913960400191505060405180910390fd5b6000610eb3836110dc565b905060008390508073ffffffffffffffffffffffffffffffffffffffff166108fc849081150290604051600060405180830381858888f19350505050158015610f00573d6000803e3d6000fd5b50610f168360065461163c90919063ffffffff16565b6006819055508373ffffffffffffffffffffffffffffffffffffffff16600260009054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff167f4e2ca0515ed1aef1395f66b5303bb5d6f1bf9d61a353fa53f73f8ac9973fa9f68585610f98896110dc565b60405180848152602001838152602001828152602001935050505060405180910390a350505050565b600760009054906101000a900460ff1615611027576040517f08c379a0000000000000000000000000000000000000000000000000000000008152600401808060200182810382526023815260200180611d846023913960400191505060405180910390fd5b6001600760006101000a81548160ff02191690831515021790555080600260006101000a81548173ffffffffffffffffffffffffffffffffffffffff021916908373ffffffffffffffffffffffffffffffffffffffff16021790555061108c8261165b565b5050565b600360009054906101000a900473ffffffffffffffffffffffffffffffffffffffff1681565b600460009054906101000a900473ffffffffffffffffffffffffffffffffffffffff1681565b60008173ffffffffffffffffffffffffffffffffffffffff16319050919050565b611105611381565b61110e57600080fd5b600073ffffffffffffffffffffffffffffffffffffffff166000809054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff167f8be0079c531659141344cd1fd0a4f28419497f9722a3daafe3b4186f6b6457e060405160405180910390a360008060006101000a81548173ffffffffffffffffffffffffffffffffffffffff021916908373ffffffffffffffffffffffffffffffffffffffff160217905550565b60065481565b60008060008060418551146111ee5760009350505050611352565b602085015192506040850151915060ff6041860151169050601b8160ff16101561121957601b810190505b601b8160ff16141580156112315750601c8160ff1614155b156112425760009350505050611352565b60018682858560405160008152602001604052604051808581526020018460ff1660ff1681526020018381526020018281526020019450505050506020604051602081039080840390855afa15801561129f573d6000803e3d6000fd5b505050602060405103519350600073ffffffffffffffffffffffffffffffffffffffff168473ffffffffffffffffffffffffffffffffffffffff16141561134e576040517f08c379a00000000000000000000000000000000000000000000000000000000081526004018080602001828103825260128152602001807f4572726f7220696e2065637265636f766572000000000000000000000000000081525060200191505060405180910390fd5b5050505b92915050565b60008060009054906101000a900473ffffffffffffffffffffffffffffffffffffffff16905090565b60008060009054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff163373ffffffffffffffffffffffffffffffffffffffff1614905090565b6040518060400160405280600181526020017f890000000000000000000000000000000000000000000000000000000000000081525081565b60606040518060400160405280600581526020017f4d41544943000000000000000000000000000000000000000000000000000000815250905090565b6000813414611460576000905061146e565b61146b338484611753565b90505b92915050565b6040518060800160405280605b8152602001611e1c605b91396040516020018082805190602001908083835b602083106114c357805182526020820191506020810190506020830392506114a0565b6001836020036101000a0380198251168184511680821785525050505050509050019150506040516020818303038152906040528051906020012081565b60056020528060005260406000206000915054906101000a900460ff1681565b600061153761153286868686611b10565b611be6565b9050949350505050565b608981565b60015481565b604051806080016040528060528152602001611dca605291396040516020018082805190602001908083835b6020831061159b5780518252602082019150602081019050602083039250611578565b6001836020036101000a0380198251168184511680821785525050505050509050019150506040516020818303038152906040528051906020012081565b6115e1611381565b6115ea57600080fd5b6115f38161165b565b50565b600260009054906101000a900473ffffffffffffffffffffffffffffffffffffffff1681565b60008282111561162b57600080fd5b600082840390508091505092915050565b60008082840190508381101561165157600080fd5b8091505092915050565b600073ffffffffffffffffffffffffffffffffffffffff168173ffffffffffffffffffffffffffffffffffffffff16141561169557600080fd5b8073ffffffffffffffffffffffffffffffffffffffff166000809054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff167f8be0079c531659141344cd1fd0a4f28419497f9722a3daafe3b4186f6b6457e060405160405180910390a3806000806101000a81548173ffffffffffffffffffffffffffffffffffffffff021916908373ffffffffffffffffffffffffffffffffffffffff16021790555050565b6000803073ffffffffffffffffffffffffffffffffffffffff166370a08231866040518263ffffffff1660e01b8152600401808273ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16815260200191505060206040518083038186803b1580156117d357600080fd5b505afa1580156117e7573d6000803e3d6000fd5b505050506040513d60208110156117fd57600080fd5b8101908080519060200190929190505050905060003073ffffffffffffffffffffffffffffffffffffffff166370a08231866040518263ffffffff1660e01b8152600401808273ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16815260200191505060206040518083038186803b15801561188f57600080fd5b505afa1580156118a3573d6000803e3d6000fd5b505050506040513d60208110156118b957600080fd5b810190808051906020019092919050505090506118d7868686611c30565b8473ffffffffffffffffffffffffffffffffffffffff168673ffffffffffffffffffffffffffffffffffffffff16600260009054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff167fe6497e3ee548a3372136af2fcb0696db31fc6cf20260707645068bd3fe97f3c48786863073ffffffffffffffffffffffffffffffffffffffff166370a082318e6040518263ffffffff1660e01b8152600401808273ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16815260200191505060206040518083038186803b1580156119df57600080fd5b505afa1580156119f3573d6000803e3d6000fd5b505050506040513d6020811015611a0957600080fd5b81019080805190602001909291905050503073ffffffffffffffffffffffffffffffffffffffff166370a082318e6040518263ffffffff1660e01b8152600401808273ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16815260200191505060206040518083038186803b158015611a9757600080fd5b505afa158015611aab573d6000803e3d6000fd5b505050506040513d6020811015611ac157600080fd5b8101908080519060200190929190505050604051808681526020018581526020018481526020018381526020018281526020019550505050505060405180910390a46001925050509392505050565b6000806040518060800160405280605b8152602001611e1c605b91396040516020018082805190602001908083835b60208310611b625780518252602082019150602081019050602083039250611b3f565b6001836020036101000a03801982511681845116808217855250505050505090500191505060405160208183030381529060405280519060200120905060405181815273ffffffffffffffffffffffffffffffffffffffff8716602082015285604082015284606082015283608082015260a0812092505081915050949350505050565b60008060015490506040517f190100000000000000000000000000000000000000000000000000000000000081528160028201528360228201526042812092505081915050919050565b3073ffffffffffffffffffffffffffffffffffffffff168273ffffffffffffffffffffffffffffffffffffffff161415611cd2576040517f08c379a00000000000000000000000000000000000000000000000000000000081526004018080602001828103825260138152602001807f63616e27742073656e6420746f204d524332300000000000000000000000000081525060200191505060405180910390fd5b8173ffffffffffffffffffffffffffffffffffffffff166108fc829081150290604051600060405180830381858888f19350505050158015611d18573d6000803e3d6000fd5b508173ffffffffffffffffffffffffffffffffffffffff168373ffffffffffffffffffffffffffffffffffffffff167fddf252ad1be2c89b69c2b068fc378daa952ba7f163c4a11628f55a4df523b3ef836040518082815260200191505060405180910390a350505056fe54686520636f6e747261637420697320616c726561647920696e697469616c697a6564496e73756666696369656e7420616d6f756e74206f7220696e76616c69642075736572454950373132446f6d61696e28737472696e67206e616d652c737472696e672076657273696f6e2c75696e7432353620636861696e49642c6164647265737320766572696679696e67436f6e747261637429546f6b656e5472616e736665724f726465722861646472657373207370656e6465722c75696e7432353620746f6b656e49644f72416d6f756e742c6279746573333220646174612c75696e743235362065787069726174696f6e29a265627a7a72315820a4a6f71a98ac3fc613c3a8f1e2e11b9eb9b6b39f125f7d9508916c2b8fb02c7164736f6c63430005100032" } - }, - "burntContract": { - "23850000": "0x70bca57f4579f58670ab2d18ef16e02c17553c38" - }, - "jaipurBlock": 0 - } + } + }, + "burntContract": { + "23850000": "0x70bca57f4579f58670ab2d18ef16e02c17553c38" + }, + "jaipurBlock": 0 } - \ No newline at end of file +} diff --git a/params/chainspecs/bor-mainnet.json b/params/chainspecs/bor-mainnet.json index df6e684abc4..a0dd2331f4a 100644 --- a/params/chainspecs/bor-mainnet.json +++ b/params/chainspecs/bor-mainnet.json @@ -14,7 +14,6 @@ "muirGlacierBlock": 3395000, "berlinBlock": 14750000, "londonBlock": 23850000, - "terminalBlockHash": "0x0000000000000000000000000000000000000000000000000000000000000000", "bor": { "period": { "0": 2 diff --git a/params/chainspecs/bsc.json b/params/chainspecs/bsc.json index 96edfaf4d00..711d412c8d7 100644 --- a/params/chainspecs/bsc.json +++ b/params/chainspecs/bsc.json @@ -16,7 +16,7 @@ "mirrorSyncBlock": 5184000, "brunoBlock": 13082000, "eulerBlock": 18907621, - "terminalBlockHash": "0x0000000000000000000000000000000000000000000000000000000000000000", + "nanoBlock": 21962149, "parlia": { "DBPath": "", "InMemory": false, diff --git a/params/chainspecs/chapel.json b/params/chainspecs/chapel.json index 0a1e8fbcf94..03283d7cca0 100644 --- a/params/chainspecs/chapel.json +++ b/params/chainspecs/chapel.json @@ -16,7 +16,8 @@ "mirrorSyncBlock": 5582500, "brunoBlock": 13837000, "eulerBlock": 19203503, - "terminalBlockHash": "0x0000000000000000000000000000000000000000000000000000000000000000", + "gibbsBlock": 22800220, + "nanoBlock": 23482428, "parlia": { "DBPath": "", "InMemory": false, diff --git a/params/chainspecs/fermion.json b/params/chainspecs/fermion.json index 856f3f81f25..7872071b198 100644 --- a/params/chainspecs/fermion.json +++ b/params/chainspecs/fermion.json @@ -13,7 +13,6 @@ "muirGlacierBlock": 0, "berlinBlock": 0, "londonBlock": 0, - "terminalBlockHash": "0x0000000000000000000000000000000000000000000000000000000000000000", "clique": { "period": 15, "epoch": 30000 diff --git a/params/chainspecs/gnosis.json b/params/chainspecs/gnosis.json index 9a0567a029e..68b6652dfe0 100644 --- a/params/chainspecs/gnosis.json +++ b/params/chainspecs/gnosis.json @@ -10,9 +10,9 @@ "constantinopleBlock": 1604400, "petersburgBlock": 2508800, "istanbulBlock": 7298030, + "posdaoBlock": 9186425, "berlinBlock": 16101500, "londonBlock": 19040000, - "terminalBlockHash": "0x0000000000000000000000000000000000000000000000000000000000000000", "aura": { "DBPath": "", "InMemory": false, diff --git a/params/chainspecs/goerli.json b/params/chainspecs/goerli.json index 16a8cfede28..d48db577ecd 100644 --- a/params/chainspecs/goerli.json +++ b/params/chainspecs/goerli.json @@ -14,7 +14,7 @@ "berlinBlock": 4460644, "londonBlock": 5062605, "terminalTotalDifficulty": 10790000, - "terminalBlockHash": "0x0000000000000000000000000000000000000000000000000000000000000000", + "terminalTotalDifficultyPassed": true, "clique": { "period": 15, "epoch": 30000 diff --git a/params/chainspecs/kiln-devnet.json b/params/chainspecs/kiln-devnet.json deleted file mode 100644 index 3553e08f056..00000000000 --- a/params/chainspecs/kiln-devnet.json +++ /dev/null @@ -1,19 +0,0 @@ -{ - "ChainName": "kiln-devnet", - "chainId": 1337802, - "consensus": "ethash", - "homesteadBlock": 0, - "eip150Block": 0, - "eip150Hash": "0x0000000000000000000000000000000000000000000000000000000000000000", - "eip155Block": 0, - "byzantiumBlock": 0, - "constantinopleBlock": 0, - "petersburgBlock": 0, - "istanbulBlock": 0, - "berlinBlock": 0, - "londonBlock": 0, - "terminalTotalDifficulty": 20000000000000, - "terminalBlockHash": "0x0000000000000000000000000000000000000000000000000000000000000000", - "mergeNetsplitBlock": 1000, - "ethash": {} -} diff --git a/params/chainspecs/mainnet.json b/params/chainspecs/mainnet.json index 74e4e2eb363..53c4920b92e 100644 --- a/params/chainspecs/mainnet.json +++ b/params/chainspecs/mainnet.json @@ -17,6 +17,7 @@ "londonBlock": 12965000, "arrowGlacierBlock": 13773000, "grayGlacierBlock": 15050000, - "terminalBlockHash": "0x0000000000000000000000000000000000000000000000000000000000000000", + "terminalTotalDifficulty": 58750000000000000000000, + "terminalTotalDifficultyPassed": true, "ethash": {} } diff --git a/params/chainspecs/mumbai.json b/params/chainspecs/mumbai.json index 92f2a6c7c77..78e9bab9935 100644 --- a/params/chainspecs/mumbai.json +++ b/params/chainspecs/mumbai.json @@ -14,7 +14,6 @@ "muirGlacierBlock": 2722000, "berlinBlock": 13996000, "londonBlock": 22640000, - "terminalBlockHash": "0x0000000000000000000000000000000000000000000000000000000000000000", "bor": { "period": { "0": 2, diff --git a/params/chainspecs/rialto.json b/params/chainspecs/rialto.json index e11de7d1443..9f5b7936cb1 100644 --- a/params/chainspecs/rialto.json +++ b/params/chainspecs/rialto.json @@ -15,7 +15,8 @@ "nielsBlock": 0, "mirrorSyncBlock": 400, "brunoBlock": 400, - "terminalBlockHash": "0x0000000000000000000000000000000000000000000000000000000000000000", + "eulerBlock": 400, + "gibbsBlock": 400, "parlia": { "DBPath": "", "InMemory": false, diff --git a/params/chainspecs/rinkeby.json b/params/chainspecs/rinkeby.json index 6ebecf052a8..4324e89bf2a 100644 --- a/params/chainspecs/rinkeby.json +++ b/params/chainspecs/rinkeby.json @@ -13,7 +13,6 @@ "istanbulBlock": 5435345, "berlinBlock": 8290928, "londonBlock": 8897988, - "terminalBlockHash": "0x0000000000000000000000000000000000000000000000000000000000000000", "clique": { "period": 15, "epoch": 30000 diff --git a/params/chainspecs/ropsten.json b/params/chainspecs/ropsten.json index fd3808b5ae7..2d8043be1de 100644 --- a/params/chainspecs/ropsten.json +++ b/params/chainspecs/ropsten.json @@ -15,6 +15,6 @@ "berlinBlock": 9812189, "londonBlock": 10499401, "terminalTotalDifficulty": 50000000000000000, - "terminalBlockHash": "0x0000000000000000000000000000000000000000000000000000000000000000", + "terminalTotalDifficultyPassed": true, "ethash": {} } diff --git a/params/chainspecs/sepolia.json b/params/chainspecs/sepolia.json index 628a7440ded..ca8ce29a7ec 100644 --- a/params/chainspecs/sepolia.json +++ b/params/chainspecs/sepolia.json @@ -15,6 +15,7 @@ "berlinBlock": 0, "londonBlock": 0, "terminalTotalDifficulty": 17000000000000000, + "terminalTotalDifficultyPassed": true, "mergeNetsplitBlock": 1735371, "ethash": {} } diff --git a/params/chainspecs/sokol.json b/params/chainspecs/sokol.json index ea8d6fc24ff..a69ecba07e3 100644 --- a/params/chainspecs/sokol.json +++ b/params/chainspecs/sokol.json @@ -11,7 +11,6 @@ "petersburgBlock": 7026400, "istanbulBlock": 12095200, "berlinBlock": 21050600, - "terminalBlockHash": "0x0000000000000000000000000000000000000000000000000000000000000000", "aura": { "DBPath": "", "InMemory": false, diff --git a/params/config.go b/params/config.go index 200af283c7f..a4ab648f787 100644 --- a/params/config.go +++ b/params/config.go @@ -65,7 +65,6 @@ var ( RopstenGenesisHash = common.HexToHash("0x41941023680923e0fe4d74a34bdac8141f2540e3ae90623718e47d66d1ca4a2d") RinkebyGenesisHash = common.HexToHash("0x6341fd3daf94b748c72ced5a5b26028f2474f5f00d824504e4fa37a75767e177") GoerliGenesisHash = common.HexToHash("0xbf7e331f7f7c1dd2e05159666b3bf8bc7a8a3a9eb1d518969eab529dd9b88c1a") - KilnDevnetGensisHash = common.HexToHash("0x51c7fe41be669f69c45c33a56982cbde405313342d9e2b00d7c91a7b284dd4f8") SokolGenesisHash = common.HexToHash("0x5b28c1bfd3a15230c9a46b399cd0f9a6920d432e85381cc6a140b06e8410112f") FermionGenesisHash = common.HexToHash("0x0658360d8680ead416900a552b67b84e6d575c7f0ecab3dbe42406f9f8c34c35") BSCGenesisHash = common.HexToHash("0x0d21840abff46b96c84b2ac9e10e4f5cdaeb5693cb665db62a2f3b02d2d57b5b") @@ -84,6 +83,7 @@ var ( var ( SokolGenesisStateRoot = common.HexToHash("0xfad4af258fd11939fae0c6c6eec9d340b1caac0b0196fd9a1bc3f489c5bf00b3") FermionGenesisStateRoot = common.HexToHash("0x08982dc16236c51b6d9aff8b76cd0faa7067eb55eba62395d5a82649d8fb73c4") + GnosisGenesisStateRoot = common.HexToHash("0x40cf4430ecaa733787d1a65154a3b9efb560c95d9e324a23b97f0609b539133b") ) var ( @@ -102,8 +102,6 @@ var ( // GoerliChainConfig contains the chain parameters to run a node on the Görli test network. GoerliChainConfig = readChainSpec("chainspecs/goerli.json") - KilnDevnetChainConfig = readChainSpec("chainspecs/kiln-devnet.json") - BSCChainConfig = readChainSpec("chainspecs/bsc.json") ChapelChainConfig = readChainSpec("chainspecs/chapel.json") @@ -246,18 +244,25 @@ type ChainConfig struct { ArrowGlacierBlock *big.Int `json:"arrowGlacierBlock,omitempty"` // EIP-4345 (bomb delay) switch block (nil = no fork, 0 = already activated) GrayGlacierBlock *big.Int `json:"grayGlacierBlock,omitempty"` // EIP-5133 (bomb delay) switch block (nil = no fork, 0 = already activated) + // EIP-3675: Upgrade consensus to Proof-of-Stake + TerminalTotalDifficulty *big.Int `json:"terminalTotalDifficulty,omitempty"` // The merge happens when terminal total difficulty is reached + TerminalTotalDifficultyPassed bool `json:"terminalTotalDifficultyPassed,omitempty"` // Disable PoW sync for networks that have already passed through the Merge + MergeNetsplitBlock *big.Int `json:"mergeNetsplitBlock,omitempty"` // Virtual fork after The Merge to use as a network splitter; see FORK_NEXT_VALUE in EIP-3675 + + ShanghaiBlock *big.Int `json:"shanghaiBlock,omitempty"` // Shanghai switch block (nil = no fork, 0 = already activated) + CancunBlock *big.Int `json:"cancunBlock,omitempty"` // Cancun switch block (nil = no fork, 0 = already activated) + // Parlia fork blocks RamanujanBlock *big.Int `json:"ramanujanBlock,omitempty" toml:",omitempty"` // ramanujanBlock switch block (nil = no fork, 0 = already activated) NielsBlock *big.Int `json:"nielsBlock,omitempty" toml:",omitempty"` // nielsBlock switch block (nil = no fork, 0 = already activated) MirrorSyncBlock *big.Int `json:"mirrorSyncBlock,omitempty" toml:",omitempty"` // mirrorSyncBlock switch block (nil = no fork, 0 = already activated) BrunoBlock *big.Int `json:"brunoBlock,omitempty" toml:",omitempty"` // brunoBlock switch block (nil = no fork, 0 = already activated) EulerBlock *big.Int `json:"eulerBlock,omitempty" toml:",omitempty"` // eulerBlock switch block (nil = no fork, 0 = already activated) + GibbsBlock *big.Int `json:"gibbsBlock,omitempty" toml:",omitempty"` // gibbsBlock switch block (nil = no fork, 0 = already activated) + NanoBlock *big.Int `json:"nanoBlock,omitempty" toml:",omitempty"` // nanoBlock switch block (nil = no fork, 0 = already activated) - // EIP-3675: Upgrade consensus to Proof-of-Stake - TerminalTotalDifficulty *big.Int `json:"terminalTotalDifficulty,omitempty"` // The merge happens when terminal total difficulty is reached - TerminalBlockNumber *big.Int `json:"terminalBlockNumber,omitempty"` // Enforce particular terminal block; see TerminalBlockNumber in EIP-3675 - TerminalBlockHash common.Hash `json:"terminalBlockHash,omitempty"` // Enforce particular terminal block; see TERMINAL_BLOCK_HASH in EIP-3675 - MergeNetsplitBlock *big.Int `json:"mergeNetsplitBlock,omitempty"` // Virtual fork after The Merge to use as a network splitter; see FORK_NEXT_VALUE in EIP-3675 + // Gnosis Chain fork blocks + PosdaoBlock *big.Int `json:"posdaoBlock,omitempty"` // Various consensus engines Ethash *EthashConfig `json:"ethash,omitempty"` @@ -394,18 +399,20 @@ func (c *ChainConfig) String() string { // TODO Covalent: Refactor to more generic approach and potentially introduce tag for "ecosystem" field (Ethereum, BSC, etc.) if c.Consensus == ParliaConsensus { - return fmt.Sprintf("{ChainID: %v Ramanujan: %v, Niels: %v, MirrorSync: %v, Bruno: %v, Euler: %v, Engine: %v}", + return fmt.Sprintf("{ChainID: %v Ramanujan: %v, Niels: %v, MirrorSync: %v, Bruno: %v, Euler: %v, Gibbs: %v, Nano: %v, Engine: %v}", c.ChainID, c.RamanujanBlock, c.NielsBlock, c.MirrorSyncBlock, c.BrunoBlock, c.EulerBlock, + c.GibbsBlock, + c.NanoBlock, engine, ) } - return fmt.Sprintf("{ChainID: %v, Homestead: %v, DAO: %v, DAO Support: %v, Tangerine Whistle: %v, Spurious Dragon: %v, Byzantium: %v, Constantinople: %v, Petersburg: %v, Istanbul: %v, Muir Glacier: %v, Berlin: %v, London: %v, Arrow Glacier: %v, Gray Glacier: %v, Terminal Total Difficulty: %v, Merge Netsplit: %v, Engine: %v}", + return fmt.Sprintf("{ChainID: %v, Homestead: %v, DAO: %v, DAO Support: %v, Tangerine Whistle: %v, Spurious Dragon: %v, Byzantium: %v, Constantinople: %v, Petersburg: %v, Istanbul: %v, Muir Glacier: %v, Berlin: %v, London: %v, Arrow Glacier: %v, Gray Glacier: %v, Terminal Total Difficulty: %v, Merge Netsplit: %v, Shanghai: %v, Cancun: %v, Engine: %v}", c.ChainID, c.HomesteadBlock, c.DAOForkBlock, @@ -423,6 +430,8 @@ func (c *ChainConfig) String() string { c.GrayGlacierBlock, c.TerminalTotalDifficulty, c.MergeNetsplitBlock, + c.ShanghaiBlock, + c.CancunBlock, engine, ) } @@ -534,6 +543,24 @@ func (c *ChainConfig) IsOnEuler(num *big.Int) bool { return configNumEqual(c.EulerBlock, num) } +// IsGibbs returns whether num is either equal to the euler fork block or greater. +func (c *ChainConfig) IsGibbs(num *big.Int) bool { + return isForked(c.GibbsBlock, num.Uint64()) +} + +func (c *ChainConfig) IsOnGibbs(num *big.Int) bool { + return configNumEqual(c.GibbsBlock, num) +} + +// IsNano returns whether num is either equal to the euler fork block or greater. +func (c *ChainConfig) IsNano(num uint64) bool { + return isForked(c.NanoBlock, num) +} + +func (c *ChainConfig) IsOnNano(num *big.Int) bool { + return configNumEqual(c.NanoBlock, num) +} + // IsMuirGlacier returns whether num is either equal to the Muir Glacier (EIP-2384) fork block or greater. func (c *ChainConfig) IsMuirGlacier(num uint64) bool { return isForked(c.MuirGlacierBlock, num) @@ -571,6 +598,16 @@ func (c *ChainConfig) IsGrayGlacier(num uint64) bool { return isForked(c.GrayGlacierBlock, num) } +// IsShanghai returns whether num is either equal to the Shanghai fork block or greater. +func (c *ChainConfig) IsShanghai(num uint64) bool { + return isForked(c.ShanghaiBlock, num) +} + +// IsCancun returns whether num is either equal to the Cancun fork block or greater. +func (c *ChainConfig) IsCancun(num uint64) bool { + return isForked(c.CancunBlock, num) +} + // CheckCompatible checks whether scheduled fork transitions have been imported // with a mismatching chain configuration. func (c *ChainConfig) CheckCompatible(newcfg *ChainConfig, height uint64) *ConfigCompatError { @@ -612,11 +649,14 @@ func (c *ChainConfig) CheckConfigForkOrder() error { {name: "istanbulBlock", block: c.IstanbulBlock}, {name: "muirGlacierBlock", block: c.MuirGlacierBlock, optional: true}, {name: "eulerBlock", block: c.EulerBlock, optional: true}, + {name: "gibbsBlock", block: c.GibbsBlock, optional: true}, {name: "berlinBlock", block: c.BerlinBlock}, {name: "londonBlock", block: c.LondonBlock}, {name: "arrowGlacierBlock", block: c.ArrowGlacierBlock, optional: true}, {name: "grayGlacierBlock", block: c.GrayGlacierBlock, optional: true}, {name: "mergeNetsplitBlock", block: c.MergeNetsplitBlock, optional: true}, + {name: "shanghaiBlock", block: c.ShanghaiBlock}, + {name: "cancunBlock", block: c.CancunBlock}, } { if lastFork.name != "" { // Next one must be higher number @@ -693,6 +733,12 @@ func (c *ChainConfig) checkCompatible(newcfg *ChainConfig, head uint64) *ConfigC if isForkIncompatible(c.MergeNetsplitBlock, newcfg.MergeNetsplitBlock, head) { return newCompatError("Merge netsplit block", c.MergeNetsplitBlock, newcfg.MergeNetsplitBlock) } + if isForkIncompatible(c.ShanghaiBlock, newcfg.ShanghaiBlock, head) { + return newCompatError("Shanghai fork block", c.ShanghaiBlock, newcfg.ShanghaiBlock) + } + if isForkIncompatible(c.CancunBlock, newcfg.CancunBlock, head) { + return newCompatError("Cancun fork block", c.CancunBlock, newcfg.CancunBlock) + } // Parlia forks if isForkIncompatible(c.RamanujanBlock, newcfg.RamanujanBlock, head) { @@ -710,6 +756,12 @@ func (c *ChainConfig) checkCompatible(newcfg *ChainConfig, head uint64) *ConfigC if isForkIncompatible(c.EulerBlock, newcfg.EulerBlock, head) { return newCompatError("Euler fork block", c.EulerBlock, newcfg.EulerBlock) } + if isForkIncompatible(c.GibbsBlock, newcfg.GibbsBlock, head) { + return newCompatError("Gibbs fork block", c.GibbsBlock, newcfg.GibbsBlock) + } + if isForkIncompatible(c.NanoBlock, newcfg.NanoBlock, head) { + return newCompatError("Nano fork block", c.NanoBlock, newcfg.NanoBlock) + } return nil } @@ -777,8 +829,9 @@ type Rules struct { ChainID *big.Int IsHomestead, IsTangerineWhistle, IsSpuriousDragon bool IsByzantium, IsConstantinople, IsPetersburg, IsIstanbul bool - IsBerlin, IsLondon bool - IsParlia, IsStarknet bool + IsBerlin, IsLondon, IsShanghai, IsCancun bool + IsParlia, IsStarknet, IsAura bool + IsNano bool } // Rules ensures c's ChainID is not nil. @@ -798,7 +851,11 @@ func (c *ChainConfig) Rules(num uint64) *Rules { IsIstanbul: c.IsIstanbul(num), IsBerlin: c.IsBerlin(num), IsLondon: c.IsLondon(num), + IsShanghai: c.IsShanghai(num), + IsCancun: c.IsCancun(num), + IsNano: c.IsNano(num), IsParlia: c.Parlia != nil, + IsAura: c.Aura != nil, } } @@ -814,8 +871,6 @@ func ChainConfigByChainName(chain string) *ChainConfig { return RinkebyChainConfig case networkname.GoerliChainName: return GoerliChainConfig - case networkname.KilnDevnetChainName: - return KilnDevnetChainConfig case networkname.SokolChainName: return SokolChainConfig case networkname.FermionChainName: @@ -851,8 +906,6 @@ func GenesisHashByChainName(chain string) *common.Hash { return &RinkebyGenesisHash case networkname.GoerliChainName: return &GoerliGenesisHash - case networkname.KilnDevnetChainName: - return &KilnDevnetGensisHash case networkname.SokolChainName: return &SokolGenesisHash case networkname.FermionChainName: @@ -888,8 +941,6 @@ func ChainConfigByGenesisHash(genesisHash common.Hash) *ChainConfig { return RinkebyChainConfig case genesisHash == GoerliGenesisHash: return GoerliChainConfig - case genesisHash == KilnDevnetGensisHash: - return KilnDevnetChainConfig case genesisHash == SokolGenesisHash: return SokolChainConfig case genesisHash == FermionGenesisHash: diff --git a/params/denomination.go b/params/denomination.go index fb4da7f4125..bcedd271e0e 100644 --- a/params/denomination.go +++ b/params/denomination.go @@ -19,8 +19,7 @@ package params // These are the multipliers for ether denominations. // Example: To get the wei value of an amount in 'gwei', use // -// new(big.Int).Mul(value, big.NewInt(params.GWei)) -// +// new(big.Int).Mul(value, big.NewInt(params.GWei)) const ( Wei = 1 GWei = 1e9 diff --git a/params/networkname/network_name.go b/params/networkname/network_name.go index 0bb2eab7ebb..82c4832768e 100644 --- a/params/networkname/network_name.go +++ b/params/networkname/network_name.go @@ -6,7 +6,6 @@ const ( RopstenChainName = "ropsten" RinkebyChainName = "rinkeby" GoerliChainName = "goerli" - KilnDevnetChainName = "kiln-devnet" DevChainName = "dev" SokolChainName = "sokol" FermionChainName = "fermion" @@ -25,7 +24,6 @@ var All = []string{ RopstenChainName, RinkebyChainName, GoerliChainName, - KilnDevnetChainName, //DevChainName, SokolChainName, FermionChainName, diff --git a/params/protocol_params.go b/params/protocol_params.go index b2f3c1b3c33..1765aa70981 100644 --- a/params/protocol_params.go +++ b/params/protocol_params.go @@ -68,7 +68,7 @@ const ( // In EIP-2929: SstoreResetGas was changed to '5000 - COLD_SLOAD_COST'. // In EIP-3529: SSTORE_CLEARS_SCHEDULE is defined as SSTORE_RESET_GAS + ACCESS_LIST_STORAGE_KEY_COST // Which becomes: 5000 - 2100 + 1900 = 4800 - SstoreClearsScheduleRefundEIP3529 uint64 = SstoreResetGasEIP2200 - ColdSloadCostEIP2929 + TxAccessListStorageKeyGas + SstoreClearsScheduleRefundEIP3529 = SstoreResetGasEIP2200 - ColdSloadCostEIP2929 + TxAccessListStorageKeyGas JumpdestGas uint64 = 1 // Once per JUMPDEST operation. EpochDuration uint64 = 30000 // Duration between proof-of-work epochs. diff --git a/params/version.go b/params/version.go index 56d8388b83b..72d8a05bad2 100644 --- a/params/version.go +++ b/params/version.go @@ -31,9 +31,9 @@ var ( // see https://calver.org const ( - VersionMajor = 2022 // Major version component of the current release - VersionMinor = 99 // Minor version component of the current release - VersionMicro = 99 // Patch version component of the current release + VersionMajor = 2 // Major version component of the current release + VersionMinor = 27 // Minor version component of the current release + VersionMicro = 0 // Patch version component of the current release VersionModifier = "dev" // Modifier component of the current release VersionKeyCreated = "ErigonVersionCreated" VersionKeyFinished = "ErigonVersionFinished" @@ -70,7 +70,8 @@ var VersionWithMeta = func() string { // ArchiveVersion holds the textual version string used for Geth archives. // e.g. "1.8.11-dea1ce05" for stable releases, or -// "1.8.13-unstable-21c059b6" for unstable releases +// +// "1.8.13-unstable-21c059b6" for unstable releases func ArchiveVersion(gitCommit string) string { vsn := withModifier(Version) if len(gitCommit) >= 8 { diff --git a/rlp/decode.go b/rlp/decode.go index a817dee09d9..1eebcd9ac6e 100644 --- a/rlp/decode.go +++ b/rlp/decode.go @@ -28,6 +28,8 @@ import ( "strings" "sync" + "github.com/ledgerwatch/log/v3" + "github.com/holiman/uint256" ) @@ -101,9 +103,12 @@ type Decoder interface { // Note that Decode does not set an input limit for all readers and may be vulnerable to // panics cause by huge value sizes. If you need an input limit, use // -// NewStream(r, limit).Decode(val) +// NewStream(r, limit).Decode(val) func Decode(r io.Reader, val interface{}) error { - stream := streamPool.Get().(*Stream) + stream, ok := streamPool.Get().(*Stream) + if !ok { + log.Warn("Failed to type convert to Stream pointer") + } defer streamPool.Put(stream) stream.Reset(r, 0) @@ -115,7 +120,10 @@ func Decode(r io.Reader, val interface{}) error { func DecodeBytes(b []byte, val interface{}) error { r := bytes.NewReader(b) - stream := streamPool.Get().(*Stream) + stream, ok := streamPool.Get().(*Stream) + if !ok { + log.Warn("Failed to type convert to Stream pointer") + } defer streamPool.Put(stream) stream.Reset(r, uint64(len(b))) @@ -991,18 +999,18 @@ func (s *Stream) readKind() (kind Kind, size uint64, err error) { } s.byteval = 0 switch { - case b < 0x80: + case b < 0x80: //128 // For a single byte whose value is in the [0x00, 0x7F] range, that byte // is its own RLP encoding. s.byteval = b return Byte, 0, nil - case b < 0xB8: + case b < 0xB8: //184 // Otherwise, if a string is 0-55 bytes long, // the RLP encoding consists of a single byte with value 0x80 plus the // length of the string followed by the string. The range of the first // byte is thus [0x80, 0xB7]. return String, uint64(b - 0x80), nil - case b < 0xC0: + case b < 0xC0: //192 // If a string is more than 55 bytes long, the // RLP encoding consists of a single byte with value 0xB7 plus the length // of the length of the string in binary form, followed by the length of @@ -1014,7 +1022,7 @@ func (s *Stream) readKind() (kind Kind, size uint64, err error) { err = ErrCanonSize } return String, size, err - case b < 0xF8: + case b < 0xF8: //248 // If the total payload of a list // (i.e. the combined length of all its items) is 0-55 bytes long, the // RLP encoding consists of a single byte with value 0xC0 plus the length @@ -1028,7 +1036,7 @@ func (s *Stream) readKind() (kind Kind, size uint64, err error) { // form, followed by the length of the payload, followed by // the concatenation of the RLP encodings of the items. The // range of the first byte is thus [0xF8, 0xFF]. - size, err = s.readUint(b - 0xF7) + size, err = s.readUint(b - 0xF7) //247 if err == nil && size < 56 { err = ErrCanonSize } diff --git a/rlp/decode_test.go b/rlp/decode_test.go index 856751273fa..84755e36780 100644 --- a/rlp/decode_test.go +++ b/rlp/decode_test.go @@ -1133,7 +1133,7 @@ func encodeTestSlice(n uint) []byte { } func unhex(str string) []byte { - b, err := hex.DecodeString(strings.Replace(str, " ", "", -1)) + b, err := hex.DecodeString(strings.ReplaceAll(str, " ", "")) if err != nil { panic(fmt.Sprintf("invalid hex string: %q", str)) } diff --git a/rlp/doc.go b/rlp/doc.go index 113828e39b9..8dd5c89b8e8 100644 --- a/rlp/doc.go +++ b/rlp/doc.go @@ -27,8 +27,7 @@ value zero equivalent to the empty string). RLP values are distinguished by a type tag. The type tag precedes the value in the input stream and defines the size and kind of the bytes that follow. - -Encoding Rules +# Encoding Rules Package rlp uses reflection and encodes RLP based on the Go type of the value. @@ -58,8 +57,7 @@ An interface value encodes as the value contained in the interface. Floating point numbers, maps, channels and functions are not supported. - -Decoding Rules +# Decoding Rules Decoding uses the following type-dependent rules: @@ -93,30 +91,29 @@ or one (true). To decode into an interface value, one of these types is stored in the value: - []interface{}, for RLP lists - []byte, for RLP strings + []interface{}, for RLP lists + []byte, for RLP strings Non-empty interface types are not supported when decoding. Signed integers, floating point numbers, maps, channels and functions cannot be decoded into. - -Struct Tags +# Struct Tags As with other encoding packages, the "-" tag ignores fields. - type StructWithIgnoredField struct{ - Ignored uint `rlp:"-"` - Field uint - } + type StructWithIgnoredField struct{ + Ignored uint `rlp:"-"` + Field uint + } Go struct values encode/decode as RLP lists. There are two ways of influencing the mapping of fields to list elements. The "tail" tag, which may only be used on the last exported struct field, allows slurping up any excess list elements into a slice. - type StructWithTail struct{ - Field uint - Tail []string `rlp:"tail"` - } + type StructWithTail struct{ + Field uint + Tail []string `rlp:"tail"` + } The "optional" tag says that the field may be omitted if it is zero-valued. If this tag is used on a struct field, all subsequent public fields must also be declared optional. @@ -128,11 +125,11 @@ When decoding into a struct, optional fields may be omitted from the end of the list. For the example below, this means input lists of one, two, or three elements are accepted. - type StructWithOptionalFields struct{ - Required uint - Optional1 uint `rlp:"optional"` - Optional2 uint `rlp:"optional"` - } + type StructWithOptionalFields struct{ + Required uint + Optional1 uint `rlp:"optional"` + Optional2 uint `rlp:"optional"` + } The "nil", "nilList" and "nilString" tags apply to pointer-typed fields only, and change the decoding rules for the field type. For regular pointer fields without the "nil" tag, @@ -140,9 +137,9 @@ input values must always match the required input length exactly and the decoder produce nil values. When the "nil" tag is set, input values of size zero decode as a nil pointer. This is especially useful for recursive types. - type StructWithNilField struct { - Field *[3]byte `rlp:"nil"` - } + type StructWithNilField struct { + Field *[3]byte `rlp:"nil"` + } In the example above, Field allows two possible input sizes. For input 0xC180 (a list containing an empty string) Field is set to nil after decoding. For input 0xC483000000 (a diff --git a/rpc/doc.go b/rpc/doc.go index b9d0636b9f5..c3370ada13b 100644 --- a/rpc/doc.go +++ b/rpc/doc.go @@ -15,7 +15,6 @@ // along with the go-ethereum library. If not, see . /* - Package rpc implements bi-directional JSON-RPC 2.0 on multiple transports. It provides access to the exported methods of an object across a network or other I/O @@ -23,16 +22,16 @@ connection. After creating a server or client instance, objects can be registere them visible as 'services'. Exported methods that follow specific conventions can be called remotely. It also has support for the publish/subscribe pattern. -RPC Methods +# RPC Methods Methods that satisfy the following criteria are made available for remote access: - - method must be exported - - method returns 0, 1 (response or error) or 2 (response and error) values + - method must be exported + - method returns 0, 1 (response or error) or 2 (response and error) values An example method: - func (s *CalcService) Add(a, b int) (int, error) + func (s *CalcService) Add(a, b int) (int, error) When the returned error isn't nil the returned integer is ignored and the error is sent back to the client. Otherwise the returned integer is sent back to the client. @@ -41,7 +40,7 @@ Optional arguments are supported by accepting pointer values as arguments. E.g. to do the addition in an optional finite field we can accept a mod argument as pointer value. - func (s *CalcService) Add(a, b int, mod *int) (int, error) + func (s *CalcService) Add(a, b int, mod *int) (int, error) This RPC method can be called with 2 integers and a null value as third argument. In that case the mod argument will be nil. Or it can be called with 3 integers, in that case mod @@ -56,40 +55,40 @@ to the client out of order. An example server which uses the JSON codec: - type CalculatorService struct {} + type CalculatorService struct {} - func (s *CalculatorService) Add(a, b int) int { - return a + b - } + func (s *CalculatorService) Add(a, b int) int { + return a + b + } - func (s *CalculatorService) Div(a, b int) (int, error) { - if b == 0 { - return 0, errors.New("divide by zero") - } - return a/b, nil - } + func (s *CalculatorService) Div(a, b int) (int, error) { + if b == 0 { + return 0, errors.New("divide by zero") + } + return a/b, nil + } - calculator := new(CalculatorService) - server := NewServer() - server.RegisterName("calculator", calculator) - l, _ := net.ListenUnix("unix", &net.UnixAddr{Net: "unix", Name: "/tmp/calculator.sock"}) - server.ServeListener(l) + calculator := new(CalculatorService) + server := NewServer() + server.RegisterName("calculator", calculator) + l, _ := net.ListenUnix("unix", &net.UnixAddr{Net: "unix", Name: "/tmp/calculator.sock"}) + server.ServeListener(l) -Subscriptions +# Subscriptions The package also supports the publish subscribe pattern through the use of subscriptions. A method that is considered eligible for notifications must satisfy the following criteria: - - method must be exported - - first method argument type must be context.Context - - method must have return types (rpc.Subscription, error) + - method must be exported + - first method argument type must be context.Context + - method must have return types (rpc.Subscription, error) An example method: - func (s *BlockChainService) NewBlocks(ctx context.Context) (rpc.Subscription, error) { - ... - } + func (s *BlockChainService) NewBlocks(ctx context.Context) (rpc.Subscription, error) { + ... + } When the service containing the subscription method is registered to the server, for example under the "blockchain" namespace, a subscription is created by calling the @@ -101,7 +100,7 @@ the client and server. The server will close the connection for any write error. For more information about subscriptions, see https://github.com/ledgerwatch/erigon/wiki/RPC-PUB-SUB. -Reverse Calls +# Reverse Calls In any method handler, an instance of rpc.Client can be accessed through the ClientFromContext method. Using this client instance, server-to-client method calls can be diff --git a/rpc/handler.go b/rpc/handler.go index aa871ae085d..a5fe261efc6 100644 --- a/rpc/handler.go +++ b/rpc/handler.go @@ -37,21 +37,20 @@ import ( // // The entry points for incoming messages are: // -// h.handleMsg(message) -// h.handleBatch(message) +// h.handleMsg(message) +// h.handleBatch(message) // // Outgoing calls use the requestOp struct. Register the request before sending it // on the connection: // -// op := &requestOp{ids: ...} -// h.addRequestOp(op) +// op := &requestOp{ids: ...} +// h.addRequestOp(op) // // Now send the request, then wait for the reply to be delivered through handleMsg: // -// if err := op.wait(...); err != nil { -// h.removeRequestOp(op) // timeout, etc. -// } -// +// if err := op.wait(...); err != nil { +// h.removeRequestOp(op) // timeout, etc. +// } type handler struct { reg *serviceRegistry unsubscribeCb *callback @@ -79,6 +78,38 @@ type callProc struct { notifiers []*Notifier } +func HandleError(err error, stream *jsoniter.Stream) error { + if err != nil { + //return msg.errorResponse(err) + stream.WriteObjectField("error") + stream.WriteObjectStart() + stream.WriteObjectField("code") + ec, ok := err.(Error) + if ok { + stream.WriteInt(ec.ErrorCode()) + } else { + stream.WriteInt(defaultErrorCode) + } + stream.WriteMore() + stream.WriteObjectField("message") + stream.WriteString(fmt.Sprintf("%v", err)) + de, ok := err.(DataError) + if ok { + stream.WriteMore() + stream.WriteObjectField("data") + data, derr := json.Marshal(de.ErrorData()) + if derr == nil { + stream.Write(data) + } else { + stream.WriteString(fmt.Sprintf("%v", derr)) + } + } + stream.WriteObjectEnd() + } + + return nil +} + func newHandler(connCtx context.Context, conn jsonWriter, idgen func() ID, reg *serviceRegistry, allowList AllowList, maxBatchConcurrency uint, traceRequests bool) *handler { rootCtx, cancelRoot := context.WithCancel(connCtx) forbiddenList := newForbiddenList() @@ -193,7 +224,7 @@ func (h *handler) handleMsg(msg *jsonrpcMessage, stream *jsoniter.Stream) { h.addSubscriptions(cp.notifiers) if answer != nil { buffer, _ := json.Marshal(answer) - stream.Write(json.RawMessage(buffer)) + stream.Write(buffer) } if needWriteStream { h.conn.writeJSON(cp.ctx, json.RawMessage(stream.Buffer())) @@ -479,33 +510,9 @@ func (h *handler) runMethod(ctx context.Context, msg *jsonrpcMessage, callb *cal stream.WriteObjectField("result") _, err := callb.call(ctx, msg.Method, args, stream) if err != nil { - //return msg.errorResponse(err) - - stream.WriteMore() - stream.WriteObjectField("error") - stream.WriteObjectStart() - stream.WriteObjectField("code") - ec, ok := err.(Error) - if ok { - stream.WriteInt(ec.ErrorCode()) - } else { - stream.WriteInt(defaultErrorCode) - } + stream.WriteNil() stream.WriteMore() - stream.WriteObjectField("message") - stream.WriteString(fmt.Sprintf("%v", err)) - de, ok := err.(DataError) - if ok { - stream.WriteMore() - stream.WriteObjectField("data") - data, derr := json.Marshal(de.ErrorData()) - if derr == nil { - stream.Write(data) - } else { - stream.WriteString(fmt.Sprintf("%v", derr)) - } - } - stream.WriteObjectEnd() + HandleError(err, stream) } stream.WriteObjectEnd() stream.Flush() diff --git a/rpc/rpccfg/rpccfg.go b/rpc/rpccfg/rpccfg.go index 3347b55d08e..f1cde689c78 100644 --- a/rpc/rpccfg/rpccfg.go +++ b/rpc/rpccfg/rpccfg.go @@ -35,3 +35,5 @@ var DefaultHTTPTimeouts = HTTPTimeouts{ WriteTimeout: 30 * time.Minute, IdleTimeout: 120 * time.Second, } + +const DefaultEvmCallTimeout = 5 * time.Minute diff --git a/rpc/subscription.go b/rpc/subscription.go index 233215d792f..84cd8228d3a 100644 --- a/rpc/subscription.go +++ b/rpc/subscription.go @@ -60,7 +60,7 @@ func randomIDGenerator() func() ID { var ( mu sync.Mutex - rng = rand.New(rand.NewSource(seed)) + rng = rand.New(rand.NewSource(seed)) // nolint: gosec ) return func() ID { mu.Lock() diff --git a/rpc/types.go b/rpc/types.go index bf61b327c91..c890ce43e0b 100644 --- a/rpc/types.go +++ b/rpc/types.go @@ -71,11 +71,12 @@ type BlockNumber int64 type Timestamp uint64 const ( - FinalizedBlockNumber = BlockNumber(-4) - SafeBlockNumber = BlockNumber(-3) - PendingBlockNumber = BlockNumber(-2) - LatestBlockNumber = BlockNumber(-1) - EarliestBlockNumber = BlockNumber(0) + LatestExecutedBlockNumber = BlockNumber(-5) + FinalizedBlockNumber = BlockNumber(-4) + SafeBlockNumber = BlockNumber(-3) + PendingBlockNumber = BlockNumber(-2) + LatestBlockNumber = BlockNumber(-1) + EarliestBlockNumber = BlockNumber(0) ) // UnmarshalJSON parses the given JSON fragment into a BlockNumber. It supports: @@ -106,6 +107,9 @@ func (bn *BlockNumber) UnmarshalJSON(data []byte) error { case "finalized": *bn = FinalizedBlockNumber return nil + case "latestExecuted": + *bn = LatestExecutedBlockNumber + return nil case "null": *bn = LatestBlockNumber return nil diff --git a/tests/block_test.go b/tests/block_test.go index c5cfbc8cd85..92b16f17b67 100644 --- a/tests/block_test.go +++ b/tests/block_test.go @@ -22,6 +22,7 @@ import ( "runtime" "testing" + "github.com/ledgerwatch/erigon/eth/ethconfig" "github.com/ledgerwatch/log/v3" ) @@ -40,6 +41,12 @@ func TestBlockchain(t *testing.T) { // Currently it fails because SpawnStageHeaders doesn't accept any PoW blocks after PoS transition // TODO(yperbasis): make it work bt.skipLoad(`^TransitionTests/bcArrowGlacierToMerge/powToPosBlockRejection\.json`) + if ethconfig.EnableHistoryV3InTest { + // HistoryV3: doesn't produce receipts on execution by design + bt.skipLoad(`^TestBlockchain/InvalidBlocks/bcInvalidHeaderTest/log1_wrongBloom\.json`) + bt.skipLoad(`^TestBlockchain/InvalidBlocks/bcInvalidHeaderTest/wrongReceiptTrie\.json`) + bt.skipLoad(`^TestBlockchain/InvalidBlocks/bcInvalidHeaderTest/wrongGasUsed\.json`) + } bt.walk(t, blockTestDir, func(t *testing.T, name string, test *BlockTest) { // import pre accounts & construct test genesis block & state root diff --git a/tests/block_test_util.go b/tests/block_test_util.go index 6635656bd2c..a27ef31c26c 100644 --- a/tests/block_test_util.go +++ b/tests/block_test_util.go @@ -163,17 +163,18 @@ func (t *BlockTest) genesis(config *params.ChainConfig) *core.Genesis { } } -/* See https://github.com/ethereum/tests/wiki/Blockchain-Tests-II +/* +See https://github.com/ethereum/tests/wiki/Blockchain-Tests-II - Whether a block is valid or not is a bit subtle, it's defined by presence of - blockHeader, transactions and uncleHeaders fields. If they are missing, the block is - invalid and we must verify that we do not accept it. + Whether a block is valid or not is a bit subtle, it's defined by presence of + blockHeader, transactions and uncleHeaders fields. If they are missing, the block is + invalid and we must verify that we do not accept it. - Since some tests mix valid and invalid blocks we need to check this for every block. + Since some tests mix valid and invalid blocks we need to check this for every block. - If a block is invalid it does not necessarily fail the test, if it's invalidness is - expected we are expected to ignore it and continue processing and then validate the - post state. + If a block is invalid it does not necessarily fail the test, if it's invalidness is + expected we are expected to ignore it and continue processing and then validate the + post state. */ func (t *BlockTest) insertBlocks(m *stages.MockSentry) ([]btBlock, error) { validBlocks := make([]btBlock, 0) @@ -189,7 +190,8 @@ func (t *BlockTest) insertBlocks(m *stages.MockSentry) ([]btBlock, error) { } // RLP decoding worked, try to insert into chain: chain := &core.ChainPack{Blocks: []*types.Block{cb}, Headers: []*types.Header{cb.Header()}, TopBlock: cb} - if err1 := m.InsertChain(chain); err1 != nil { + err1 := m.InsertChain(chain) + if err1 != nil { if b.BlockHeader == nil { continue // OK - block is supposed to be invalid, continue with next block } else { diff --git a/tests/fuzzers/bls12381/precompile_fuzzer.go b/tests/fuzzers/bls12381/precompile_fuzzer.go index 963b157b37f..1308ca3d268 100644 --- a/tests/fuzzers/bls12381/precompile_fuzzer.go +++ b/tests/fuzzers/bls12381/precompile_fuzzer.go @@ -72,8 +72,10 @@ func checkInput(id byte, inputLen int) bool { // The fuzzer functions must return // 1 if the fuzzer should increase priority of the -// given input during subsequent fuzzing (for example, the input is lexically -// correct and was parsed successfully); +// +// given input during subsequent fuzzing (for example, the input is lexically +// correct and was parsed successfully); +// // -1 if the input must not be added to corpus even if gives new coverage; and // 0 otherwise // other values are reserved for future use. diff --git a/tests/fuzzers/bn256/bn256_fuzz.go b/tests/fuzzers/bn256/bn256_fuzz.go index 2efbdc8efa6..e0b0a865ad8 100644 --- a/tests/fuzzers/bn256/bn256_fuzz.go +++ b/tests/fuzzers/bn256/bn256_fuzz.go @@ -2,6 +2,7 @@ // Use of this source code is governed by a BSD-style license that can be found // in the LICENSE file. +//go:build gofuzz // +build gofuzz package bn256 diff --git a/tests/fuzzers/difficulty/difficulty-fuzz.go b/tests/fuzzers/difficulty/difficulty-fuzz.go index 80528f6e26d..b5bee7d90e5 100644 --- a/tests/fuzzers/difficulty/difficulty-fuzz.go +++ b/tests/fuzzers/difficulty/difficulty-fuzz.go @@ -70,8 +70,10 @@ func (f *fuzzer) readBool() bool { // The function must return // 1 if the fuzzer should increase priority of the -// given input during subsequent fuzzing (for example, the input is lexically -// correct and was parsed successfully); +// +// given input during subsequent fuzzing (for example, the input is lexically +// correct and was parsed successfully); +// // -1 if the input must not be added to corpus even if gives new coverage; and // 0 otherwise // other values are reserved for future use. diff --git a/tests/state_test_util.go b/tests/state_test_util.go index fdc473995fe..ab159fb2f90 100644 --- a/tests/state_test_util.go +++ b/tests/state_test_util.go @@ -18,6 +18,7 @@ package tests import ( "encoding/binary" + "encoding/hex" "encoding/json" "fmt" "math/big" @@ -60,15 +61,47 @@ func (t *StateTest) UnmarshalJSON(in []byte) error { type stJSON struct { Env stEnv `json:"env"` Pre core.GenesisAlloc `json:"pre"` + Tx stTransactionMarshaling `json:"transaction"` Out hexutil.Bytes `json:"out"` Post map[string][]stPostState `json:"post"` } type stPostState struct { - Root common.Hash `json:"hash"` - Logs common.Hash `json:"logs"` - Tx hexutil.Bytes `json:"txbytes"` - ExpectException string `json:"expectException"` + Root common.UnprefixedHash `json:"hash"` + Logs common.UnprefixedHash `json:"logs"` + Tx hexutil.Bytes `json:"txbytes"` + ExpectException string `json:"expectException"` + Indexes struct { + Data int `json:"data"` + Gas int `json:"gas"` + Value int `json:"value"` + } +} + +type stTransaction struct { + GasPrice *big.Int `json:"gasPrice"` + MaxFeePerGas *big.Int `json:"maxFeePerGas"` + MaxPriorityFeePerGas *big.Int `json:"maxPriorityFeePerGas"` + Nonce uint64 `json:"nonce"` + To string `json:"to"` + Data []string `json:"data"` + AccessLists []*types.AccessList `json:"accessLists,omitempty"` + GasLimit []uint64 `json:"gasLimit"` + Value []string `json:"value"` + PrivateKey []byte `json:"secretKey"` +} + +type stTransactionMarshaling struct { + GasPrice *math.HexOrDecimal256 `json:"gasPrice"` + MaxFeePerGas *math.HexOrDecimal256 `json:"maxFeePerGas"` + MaxPriorityFeePerGas *math.HexOrDecimal256 `json:"maxPriorityFeePerGas"` + Nonce math.HexOrDecimal64 `json:"nonce"` + GasLimit []math.HexOrDecimal64 `json:"gasLimit"` + PrivateKey hexutil.Bytes `json:"secretKey"` + To string `json:"to"` + Data []string `json:"data"` + Value []string `json:"value"` + AccessLists []*types.AccessList `json:"accessLists,omitempty"` } //go:generate gencodec -type stEnv -field-override stEnvMarshaling -out gen_stenv.go @@ -180,20 +213,25 @@ func (t *StateTest) RunNoVerify(rules *params.Rules, tx kv.RwTx, subtest StateSu } } post := t.json.Post[subtest.Fork][subtest.Index] - txn, err := types.UnmarshalTransactionFromBinary(post.Tx) + msg, err := toMessage(t.json.Tx, post, baseFee) if err != nil { return nil, common.Hash{}, err } - msg, err := txn.AsMessage(*types.MakeSigner(config, 0), baseFee, config.Rules(0)) - if err != nil { - return nil, common.Hash{}, err + if len(post.Tx) != 0 { + txn, err := types.UnmarshalTransactionFromBinary(post.Tx) + if err != nil { + return nil, common.Hash{}, err + } + msg, err = txn.AsMessage(*types.MakeSigner(config, 0), baseFee, config.Rules(0)) + if err != nil { + return nil, common.Hash{}, err + } } // Prepare the EVM. txContext := core.NewEVMTxContext(msg) - contractHasTEVM := func(common.Hash) (bool, error) { return false, nil } header := block.Header() - context := core.NewEVMBlockContext(header, core.GetHashFn(header, nil), nil, &t.json.Env.Coinbase, contractHasTEVM) + context := core.NewEVMBlockContext(header, core.GetHashFn(header, nil), nil, &t.json.Env.Coinbase) context.GetHash = vmTestBlockHash if baseFee != nil { context.BaseFee = new(uint256.Int) @@ -326,3 +364,103 @@ func rlpHash(x interface{}) (h common.Hash) { func vmTestBlockHash(n uint64) common.Hash { return common.BytesToHash(crypto.Keccak256([]byte(big.NewInt(int64(n)).String()))) } + +func toMessage(tx stTransactionMarshaling, ps stPostState, baseFee *big.Int) (core.Message, error) { + // Derive sender from private key if present. + var from common.Address + if len(tx.PrivateKey) > 0 { + key, err := crypto.ToECDSA(tx.PrivateKey) + if err != nil { + return nil, fmt.Errorf("invalid private key: %v", err) + } + from = crypto.PubkeyToAddress(key.PublicKey) + } + + // Parse recipient if present. + var to *common.Address + if tx.To != "" { + to = new(common.Address) + if err := to.UnmarshalText([]byte(tx.To)); err != nil { + return nil, fmt.Errorf("invalid to address: %v", err) + } + } + + // Get values specific to this post state. + if ps.Indexes.Data > len(tx.Data) { + return nil, fmt.Errorf("tx data index %d out of bounds", ps.Indexes.Data) + } + if ps.Indexes.Value > len(tx.Value) { + return nil, fmt.Errorf("tx value index %d out of bounds", ps.Indexes.Value) + } + if ps.Indexes.Gas > len(tx.GasLimit) { + return nil, fmt.Errorf("tx gas limit index %d out of bounds", ps.Indexes.Gas) + } + dataHex := tx.Data[ps.Indexes.Data] + valueHex := tx.Value[ps.Indexes.Value] + gasLimit := tx.GasLimit[ps.Indexes.Gas] + + value := new(uint256.Int) + if valueHex != "0x" { + va, ok := math.ParseBig256(valueHex) + if !ok { + return nil, fmt.Errorf("invalid tx value %q", valueHex) + } + v, overflow := uint256.FromBig(va) + if overflow { + return nil, fmt.Errorf("invalid tx value (overflowed) %q", valueHex) + } + value = v + } + data, err := hex.DecodeString(strings.TrimPrefix(dataHex, "0x")) + if err != nil { + return nil, fmt.Errorf("invalid tx data %q", dataHex) + } + var accessList types.AccessList + if tx.AccessLists != nil && tx.AccessLists[ps.Indexes.Data] != nil { + accessList = *tx.AccessLists[ps.Indexes.Data] + } + + var feeCap, tipCap big.Int + + // If baseFee provided, set gasPrice to effectiveGasPrice. + gasPrice := tx.GasPrice + if baseFee != nil { + if tx.MaxFeePerGas == nil { + tx.MaxFeePerGas = gasPrice + } + if tx.MaxFeePerGas == nil { + tx.MaxFeePerGas = math.NewHexOrDecimal256(0) + } + if tx.MaxPriorityFeePerGas == nil { + tx.MaxPriorityFeePerGas = tx.MaxFeePerGas + } + + feeCap = big.Int(*tx.MaxPriorityFeePerGas) + tipCap = big.Int(*tx.MaxFeePerGas) + + gp := math.BigMin(new(big.Int).Add(&feeCap, baseFee), &tipCap) + gasPrice = math.NewHexOrDecimal256(gp.Int64()) + } + if gasPrice == nil { + return nil, fmt.Errorf("no gas price provided") + } + + gpi := big.Int(*gasPrice) + gasPriceInt := uint256.NewInt(gpi.Uint64()) + + // TODO the conversion to int64 then uint64 then new int isn't working! + msg := types.NewMessage( + from, + to, + uint64(tx.Nonce), + value, + uint64(gasLimit), + gasPriceInt, + uint256.NewInt(feeCap.Uint64()), + uint256.NewInt(tipCap.Uint64()), + data, + accessList, + false) + + return msg, nil +} diff --git a/tests/testdata b/tests/testdata index 95a8490f1f2..c76c5d274e2 160000 --- a/tests/testdata +++ b/tests/testdata @@ -1 +1 @@ -Subproject commit 95a8490f1f202dc9f58e910571d5d02082e0e641 +Subproject commit c76c5d274e271e039b0b11e9372f0c1143dd6c30 diff --git a/tests/transaction_test_util.go b/tests/transaction_test_util.go index fe28fb88c77..98ff970b090 100644 --- a/tests/transaction_test_util.go +++ b/tests/transaction_test_util.go @@ -131,15 +131,15 @@ func (tt *TransactionTest) Run(chainID *big.Int) error { return fmt.Errorf("got error, expected none: %w", err) } if sender == nil { - return fmt.Errorf("sender was nil, should be %x", common.Address(testcase.fork.Sender)) + return fmt.Errorf("sender was nil, should be %x", testcase.fork.Sender) } - if *sender != common.Address(testcase.fork.Sender) { + if *sender != testcase.fork.Sender { return fmt.Errorf("sender mismatch: got %x, want %x", sender, testcase.fork.Sender) } if txhash == nil { - return fmt.Errorf("txhash was nil, should be %x", common.Hash(testcase.fork.Hash)) + return fmt.Errorf("txhash was nil, should be %x", testcase.fork.Hash) } - if *txhash != common.Hash(testcase.fork.Hash) { + if *txhash != testcase.fork.Hash { return fmt.Errorf("hash mismatch: got %x, want %x", *txhash, testcase.fork.Hash) } if new(big.Int).SetUint64(intrinsicGas).Cmp((*big.Int)(testcase.fork.IntrinsicGas)) != 0 { diff --git a/tools.go b/tools.go index 7a7ba16ab0c..eab76e96267 100644 --- a/tools.go +++ b/tools.go @@ -19,6 +19,8 @@ package tools import ( _ "github.com/fjl/gencodec" _ "github.com/kevinburke/go-bindata" + _ "github.com/torquem-ch/mdbx-go" + _ "github.com/torquem-ch/mdbx-go/mdbxdist" _ "github.com/ugorji/go/codec/codecgen" _ "google.golang.org/grpc/cmd/protoc-gen-go-grpc" ) diff --git a/turbo/adapter/ethapi/internal.go b/turbo/adapter/ethapi/internal.go index 12d0ce2e4ae..0c90fcc7b23 100644 --- a/turbo/adapter/ethapi/internal.go +++ b/turbo/adapter/ethapi/internal.go @@ -26,12 +26,12 @@ type ExecutionResult struct { *ethapi.ExecutionResult } -//nolint +// nolint func RPCMarshalHeader(head *types.Header) map[string]interface{} { return ethapi.RPCMarshalHeader(head) } -//nolint +// nolint func RPCMarshalBlock(b *types.Block, inclTx bool, fullTx bool, additional map[string]interface{}) (map[string]interface{}, error) { fields, err := ethapi.RPCMarshalBlock(b, inclTx, fullTx) if err != nil { @@ -45,7 +45,7 @@ func RPCMarshalBlock(b *types.Block, inclTx bool, fullTx bool, additional map[st return fields, err } -//nolint +// nolint func RPCMarshalBlockEx(b *types.Block, inclTx bool, fullTx bool, borTx types.Transaction, borTxHash common.Hash, additional map[string]interface{}) (map[string]interface{}, error) { fields, err := ethapi.RPCMarshalBlockEx(b, inclTx, fullTx, borTx, borTxHash) if err != nil { @@ -59,7 +59,7 @@ func RPCMarshalBlockEx(b *types.Block, inclTx bool, fullTx bool, borTx types.Tra return fields, err } -//nolint +// nolint type RPCTransaction struct { *ethapi.RPCTransaction } diff --git a/turbo/app/import.go b/turbo/app/import.go index 83de96e1470..50a82ebd15b 100644 --- a/turbo/app/import.go +++ b/turbo/app/import.go @@ -60,7 +60,7 @@ func importChain(ctx *cli.Context) error { stack := makeConfigNode(nodeCfg) defer stack.Close() - ethereum, err := turboNode.RegisterEthService(stack, ethCfg, logger) + ethereum, err := eth.New(stack, ethCfg, logger) if err != nil { return err } @@ -212,7 +212,7 @@ func InsertChain(ethereum *eth.Ethereum, chain *core.ChainPack) error { sentryControlServer.Hd.MarkAllVerified() - _, err := stages.StageLoopStep(ethereum.SentryCtx(), ethereum.ChainDB(), ethereum.StagedSync(), highestSeenHeader, ethereum.Notifications(), initialCycle, sentryControlServer.UpdateHead, nil) + _, err := stages.StageLoopStep(ethereum.SentryCtx(), ethereum.ChainConfig(), ethereum.ChainDB(), ethereum.StagedSync(), highestSeenHeader, ethereum.Notifications(), initialCycle, sentryControlServer.UpdateHead, nil) if err != nil { return err } diff --git a/turbo/app/snapshots.go b/turbo/app/snapshots.go index 1a068f06cc6..0cad5d6339f 100644 --- a/turbo/app/snapshots.go +++ b/turbo/app/snapshots.go @@ -10,7 +10,9 @@ import ( "os" "path/filepath" "runtime" + "time" + "github.com/c2h5oh/datasize" "github.com/holiman/uint256" "github.com/ledgerwatch/erigon-lib/common" "github.com/ledgerwatch/erigon-lib/common/cmp" @@ -19,10 +21,12 @@ import ( "github.com/ledgerwatch/erigon-lib/etl" "github.com/ledgerwatch/erigon-lib/kv" "github.com/ledgerwatch/erigon-lib/kv/mdbx" - "github.com/ledgerwatch/erigon/cmd/hack/tool" + libstate "github.com/ledgerwatch/erigon-lib/state" + "github.com/ledgerwatch/erigon/cmd/hack/tool/fromdb" "github.com/ledgerwatch/erigon/cmd/utils" "github.com/ledgerwatch/erigon/core/rawdb" "github.com/ledgerwatch/erigon/eth/ethconfig" + "github.com/ledgerwatch/erigon/eth/ethconfig/estimate" "github.com/ledgerwatch/erigon/internal/debug" "github.com/ledgerwatch/erigon/node/nodecfg/datadir" "github.com/ledgerwatch/erigon/params" @@ -86,6 +90,18 @@ var snapshotCommand = cli.Command{ Before: func(ctx *cli.Context) error { return debug.Setup(ctx) }, Flags: append([]cli.Flag{utils.DataDirFlag}, debug.Flags...), }, + { + Name: "ram", + Action: doRam, + Before: func(ctx *cli.Context) error { return debug.Setup(ctx) }, + Flags: append([]cli.Flag{utils.DataDirFlag}, debug.Flags...), + }, + { + Name: "decompress_speed", + Action: doDecompressSpeed, + Before: func(ctx *cli.Context) error { return debug.Setup(ctx) }, + Flags: append([]cli.Flag{utils.DataDirFlag}, debug.Flags...), + }, }, } @@ -108,7 +124,7 @@ var ( SnapshotSegmentSizeFlag = cli.Uint64Flag{ Name: "segment.size", Usage: "Amount of blocks in each segment", - Value: snap.DEFAULT_SEGMENT_SIZE, + Value: snap.Erigon2SegmentSize, } SnapshotRebuildFlag = cli.BoolFlag{ Name: "rebuild", @@ -116,6 +132,72 @@ var ( } ) +func preloadFileAsync(name string) { + go func() { + ff, _ := os.Open(name) + _, _ = io.CopyBuffer(io.Discard, bufio.NewReaderSize(ff, 64*1024*1024), make([]byte, 64*1024*1024)) + }() +} + +func doDecompressSpeed(cliCtx *cli.Context) error { + args := cliCtx.Args() + if len(args) != 1 { + return fmt.Errorf("expecting .seg file path") + } + f := args[0] + + compress.SetDecompressionTableCondensity(9) + + preloadFileAsync(f) + + decompressor, err := compress.NewDecompressor(f) + if err != nil { + return err + } + defer decompressor.Close() + func() { + defer decompressor.EnableReadAhead().DisableReadAhead() + + t := time.Now() + g := decompressor.MakeGetter() + buf := make([]byte, 0, 16*etl.BufIOSize) + for g.HasNext() { + buf, _ = g.Next(buf[:0]) + } + log.Info("decompress speed", "took", time.Since(t)) + }() + func() { + defer decompressor.EnableReadAhead().DisableReadAhead() + + t := time.Now() + g := decompressor.MakeGetter() + for g.HasNext() { + _ = g.Skip() + } + log.Info("decompress skip speed", "took", time.Since(t)) + }() + return nil +} +func doRam(cliCtx *cli.Context) error { + args := cliCtx.Args() + if len(args) != 1 { + return fmt.Errorf("expecting .seg file path") + } + f := args[0] + var m runtime.MemStats + runtime.ReadMemStats(&m) + runtime.ReadMemStats(&m) + before := m.Alloc + log.Info("RAM before open", "alloc", common.ByteCount(m.Alloc), "sys", common.ByteCount(m.Sys)) + decompressor, err := compress.NewDecompressor(f) + if err != nil { + return err + } + defer decompressor.Close() + runtime.ReadMemStats(&m) + log.Info("RAM after open", "alloc", common.ByteCount(m.Alloc), "sys", common.ByteCount(m.Sys), "diff", common.ByteCount(m.Alloc-before)) + return nil +} func doIndicesCommand(cliCtx *cli.Context) error { ctx, cancel := common.RootContext() defer cancel() @@ -127,12 +209,28 @@ func doIndicesCommand(cliCtx *cli.Context) error { chainDB := mdbx.NewMDBX(log.New()).Path(dirs.Chaindata).Readonly().MustOpen() defer chainDB.Close() + dir.MustExist(dirs.SnapHistory) + + workers := cmp.Max(1, runtime.GOMAXPROCS(-1)-1) if rebuild { - cfg := ethconfig.NewSnapCfg(true, true, false) - workers := cmp.InRange(1, 4, runtime.GOMAXPROCS(-1)-1) - if err := rebuildIndices(ctx, chainDB, cfg, dirs, from, workers); err != nil { - log.Error("Error", "err", err) - } + panic("not implemented") + } + cfg := ethconfig.NewSnapCfg(true, true, false) + if err := rebuildIndices("Indexing", ctx, chainDB, cfg, dirs, from, workers); err != nil { + log.Error("Error", "err", err) + } + agg, err := libstate.NewAggregator22(dirs.SnapHistory, ethconfig.HistoryV3AggregationStep) + if err != nil { + return err + } + err = agg.ReopenFiles() + if err != nil { + return err + } + agg.SetWorkers(estimate.CompressSnapshot.Workers()) + err = agg.BuildMissedIndices() + if err != nil { + return err } return nil } @@ -145,35 +243,44 @@ func doUncompress(cliCtx *cli.Context) error { return fmt.Errorf("expecting .seg file path") } f := args[0] + + preloadFileAsync(f) + decompressor, err := compress.NewDecompressor(f) if err != nil { return err } defer decompressor.Close() - wr := bufio.NewWriterSize(os.Stdout, 16*etl.BufIOSize) + wr := bufio.NewWriterSize(os.Stdout, int(128*datasize.MB)) defer wr.Flush() + logEvery := time.NewTicker(30 * time.Second) + defer logEvery.Stop() + + var i uint var numBuf [binary.MaxVarintLen64]byte - if err := decompressor.WithReadAhead(func() error { - g := decompressor.MakeGetter() - buf := make([]byte, 0, 16*etl.BufIOSize) - for g.HasNext() { - buf, _ = g.Next(buf[:0]) - n := binary.PutUvarint(numBuf[:], uint64(len(buf))) - if _, err := wr.Write(numBuf[:n]); err != nil { - return err - } - if _, err := wr.Write(buf); err != nil { - return err - } - select { - case <-ctx.Done(): - return ctx.Err() - default: - } + defer decompressor.EnableReadAhead().DisableReadAhead() + + g := decompressor.MakeGetter() + buf := make([]byte, 0, 1*datasize.MB) + for g.HasNext() { + buf, _ = g.Next(buf[:0]) + n := binary.PutUvarint(numBuf[:], uint64(len(buf))) + if _, err := wr.Write(numBuf[:n]); err != nil { + return err + } + if _, err := wr.Write(buf); err != nil { + return err + } + i++ + select { + case <-logEvery.C: + _, fileName := filepath.Split(decompressor.FilePath()) + progress := 100 * float64(i) / float64(decompressor.Count()) + log.Info("[uncompress] ", "progress", fmt.Sprintf("%.2f%%", progress), "file", fileName) + case <-ctx.Done(): + return ctx.Err() + default: } - return nil - }); err != nil { - return err } return nil } @@ -186,16 +293,14 @@ func doCompress(cliCtx *cli.Context) error { } f := args[0] dirs := datadir.New(cliCtx.String(utils.DataDirFlag.Name)) - workers := runtime.GOMAXPROCS(-1) - 1 - if workers < 1 { - workers = 1 - } - c, err := compress.NewCompressor(ctx, "", f, dirs.Tmp, compress.MinPatternScore, workers, log.LvlInfo) + workers := cmp.Max(1, runtime.GOMAXPROCS(-1)-1) + c, err := compress.NewCompressor(ctx, "compress", f, dirs.Tmp, compress.MinPatternScore, workers, log.LvlInfo) if err != nil { return err } - r := bufio.NewReaderSize(os.Stdin, 16*etl.BufIOSize) - buf := make([]byte, 0, 32*1024*1024) + defer c.Close() + r := bufio.NewReaderSize(os.Stdin, int(128*datasize.MB)) + buf := make([]byte, 0, int(1*datasize.MB)) var l uint64 for l, err = binary.ReadUvarint(r); err == nil; l, err = binary.ReadUvarint(r) { if cap(buf) < int(l) { @@ -254,8 +359,11 @@ func doRetireCommand(cliCtx *cli.Context) error { if err := rawdb.WriteSnapshots(tx, br.Snapshots().Files()); err != nil { return err } - if err := br.PruneAncientBlocks(tx); err != nil { - return err + log.Info("prune blocks from db\n") + for j := 0; j < 10_000; j++ { // prune happens by small steps, so need many runs + if err := br.PruneAncientBlocks(tx); err != nil { + return err + } } return nil }); err != nil { @@ -263,11 +371,6 @@ func doRetireCommand(cliCtx *cli.Context) error { } } - _, err := snapshotsync.EnforceSnapshotsInvariant(db, dirs.Snap, snapshots, nil) - if err != nil { - return err - } - return nil } @@ -283,32 +386,40 @@ func doSnapshotCommand(cliCtx *cli.Context) error { } dirs := datadir.New(cliCtx.String(utils.DataDirFlag.Name)) dir.MustExist(dirs.Snap) - dir.MustExist(filepath.Join(dirs.Snap, "db")) // this folder will be checked on existance - to understand that snapshots are ready + dir.MustExist(dirs.SnapHistory) dir.MustExist(dirs.Tmp) db := mdbx.NewMDBX(log.New()).Label(kv.ChainDB).Path(dirs.Chaindata).MustOpen() defer db.Close() - if err := snapshotBlocks(ctx, db, fromBlock, toBlock, segmentSize, dirs.Snap, dirs.Tmp); err != nil { - log.Error("Error", "err", err) - } - - _, err := snapshotsync.EnforceSnapshotsInvariant(db, dirs.Snap, nil, nil) - if err != nil { - return err + { + if err := snapshotBlocks(ctx, db, fromBlock, toBlock, segmentSize, dirs.Snap, dirs.Tmp); err != nil { + log.Error("Error", "err", err) + } + allSnapshots := snapshotsync.NewRoSnapshots(ethconfig.NewSnapCfg(true, true, true), dirs.Snap) + if err := allSnapshots.ReopenFolder(); err != nil { + return err + } + if err := db.Update(ctx, func(tx kv.RwTx) error { + return rawdb.WriteSnapshots(tx, allSnapshots.Files()) + }); err != nil { + return err + } } return nil } -func rebuildIndices(ctx context.Context, db kv.RoDB, cfg ethconfig.Snapshot, dirs datadir.Dirs, from uint64, workers int) error { - chainConfig := tool.ChainConfigFromDB(db) +func rebuildIndices(logPrefix string, ctx context.Context, db kv.RoDB, cfg ethconfig.Snapshot, dirs datadir.Dirs, from uint64, workers int) error { + chainConfig := fromdb.ChainConfig(db) chainID, _ := uint256.FromBig(chainConfig.ChainID) allSnapshots := snapshotsync.NewRoSnapshots(cfg, dirs.Snap) if err := allSnapshots.ReopenFolder(); err != nil { return err } - if err := snapshotsync.BuildMissedIndices(ctx, allSnapshots.Dir(), *chainID, dirs.Tmp, workers, log.LvlInfo); err != nil { + allSnapshots.LogStat() + + if err := snapshotsync.BuildMissedIndices(logPrefix, ctx, dirs, *chainID, workers); err != nil { return err } return nil diff --git a/turbo/builder/block_builder.go b/turbo/builder/block_builder.go index 5fc57554171..2a6e440eaca 100644 --- a/turbo/builder/block_builder.go +++ b/turbo/builder/block_builder.go @@ -54,3 +54,10 @@ func (b *BlockBuilder) Stop() *types.Block { return b.block } + +func (b *BlockBuilder) Block() *types.Block { + b.syncCond.L.Lock() + defer b.syncCond.L.Unlock() + + return b.block +} diff --git a/turbo/cli/default_flags.go b/turbo/cli/default_flags.go index 26af061f036..c5ec18adfe9 100644 --- a/turbo/cli/default_flags.go +++ b/turbo/cli/default_flags.go @@ -11,6 +11,7 @@ var DefaultFlags = []cli.Flag{ utils.DataDirFlag, utils.EthashDatasetDirFlag, utils.SnapshotFlag, + utils.LightClientFlag, utils.TxPoolDisableFlag, utils.TxPoolLocalsFlag, utils.TxPoolNoLocalsFlag, @@ -49,12 +50,13 @@ var DefaultFlags = []cli.Flag{ utils.HTTPEnabledFlag, utils.HTTPListenAddrFlag, utils.HTTPPortFlag, - utils.EngineAddr, - utils.EnginePort, + utils.AuthRpcAddr, + utils.AuthRpcPort, utils.JWTSecretPath, utils.HttpCompressionFlag, utils.HTTPCORSDomainFlag, utils.HTTPVirtualHostsFlag, + utils.AuthRpcVirtualHostsFlag, utils.HTTPApiFlag, utils.WSEnabledFlag, utils.WsCompressionFlag, @@ -66,17 +68,15 @@ var DefaultFlags = []cli.Flag{ utils.RpcAccessListFlag, utils.RpcTraceCompatFlag, utils.RpcGasCapFlag, - utils.StarknetGrpcAddressFlag, - utils.TevmFlag, - utils.MemoryOverlayFlag, utils.TxpoolApiAddrFlag, utils.TraceMaxtracesFlag, HTTPReadTimeoutFlag, HTTPWriteTimeoutFlag, HTTPIdleTimeoutFlag, - EngineReadTimeoutFlag, - EngineWriteTimeoutFlag, - EngineIdleTimeoutFlag, + AuthRpcReadTimeoutFlag, + AuthRpcWriteTimeoutFlag, + AuthRpcIdleTimeoutFlag, + EvmCallTimeoutFlag, utils.SnapKeepBlocksFlag, utils.SnapStopFlag, @@ -113,6 +113,7 @@ var DefaultFlags = []cli.Flag{ utils.MetricsEnabledExpensiveFlag, utils.MetricsHTTPFlag, utils.MetricsPortFlag, + utils.HistoryV3Flag, utils.IdentityFlag, utils.CliqueSnapshotCheckpointIntervalFlag, utils.CliqueSnapshotInmemorySnapshotsFlag, @@ -138,4 +139,6 @@ var DefaultFlags = []cli.Flag{ utils.EthStatsURLFlag, utils.OverrideTerminalTotalDifficulty, utils.OverrideMergeNetsplitBlock, + + utils.ConfigFlag, } diff --git a/turbo/cli/flags.go b/turbo/cli/flags.go index 7dd6f18fd20..c13da0ec028 100644 --- a/turbo/cli/flags.go +++ b/turbo/cli/flags.go @@ -64,26 +64,28 @@ var ( r - prune receipts (Receipts, Logs, LogTopicIndex, LogAddressIndex - used by eth_getLogs and similar RPC methods) t - prune transaction by it's hash index c - prune call traces (used by trace_filter method) - Does delete data older than 90K blocks, --prune=h is shortcut for: --prune.h.older=90_000 - If item is NOT in the list - means NO pruning for this data. - Example: --prune=hrtc`, + Does delete data older than 90K blocks, --prune=h is shortcut for: --prune.h.older=90000. + Similarly, --prune=t is shortcut for: --prune.t.older=90000 and --prune=c is shortcut for: --prune.c.older=90000. + However, --prune=r means to prune receipts before the Beacon Chain genesis (Consensus Layer might need receipts after that). + If an item is NOT on the list - means NO pruning for this data. + Example: --prune=htc`, Value: "disabled", } PruneHistoryFlag = cli.Uint64Flag{ Name: "prune.h.older", - Usage: `Prune data after this amount of blocks (if --prune flag has 'h', then default is 90K)`, + Usage: `Prune data older than this number of blocks from the tip of the chain (if --prune flag has 'h', then default is 90K)`, } PruneReceiptFlag = cli.Uint64Flag{ Name: "prune.r.older", - Usage: `Prune data after this amount of blocks (if --prune flag has 'r', then default is 90K)`, + Usage: `Prune data older than this number of blocks from the tip of the chain`, } PruneTxIndexFlag = cli.Uint64Flag{ Name: "prune.t.older", - Usage: `Prune data after this amount of blocks (if --prune flag has 't', then default is 90K)`, + Usage: `Prune data older than this number of blocks from the tip of the chain (if --prune flag has 't', then default is 90K)`, } PruneCallTracesFlag = cli.Uint64Flag{ Name: "prune.c.older", - Usage: `Prune data after this amount of blocks (if --prune flag has 'c', then default is 90K)`, + Usage: `Prune data older than this number of blocks from the tip of the chain (if --prune flag has 'c', then default is 90K)`, } PruneHistoryBeforeFlag = cli.Uint64Flag{ @@ -169,25 +171,32 @@ var ( Value: rpccfg.DefaultHTTPTimeouts.IdleTimeout, } - EngineReadTimeoutFlag = cli.DurationFlag{ - Name: "engine.timeouts.read", + AuthRpcReadTimeoutFlag = cli.DurationFlag{ + Name: "authrpc.timeouts.read", Usage: "Maximum duration for reading the entire request, including the body.", Value: rpccfg.DefaultHTTPTimeouts.ReadTimeout, } - EngineWriteTimeoutFlag = cli.DurationFlag{ - Name: "engine.timeouts.write", + AuthRpcWriteTimeoutFlag = cli.DurationFlag{ + Name: "authrpc.timeouts.write", Usage: "Maximum duration before timing out writes of the response. It is reset whenever a new request's header is read.", Value: rpccfg.DefaultHTTPTimeouts.WriteTimeout, } - EngineIdleTimeoutFlag = cli.DurationFlag{ - Name: "engine.timeouts.idle", - Usage: "Maximum amount of time to wait for the next request when keep-alives are enabled. If engine.timeouts.idle is zero, the value of engine.timeouts.read is used.", + AuthRpcIdleTimeoutFlag = cli.DurationFlag{ + Name: "authrpc.timeouts.idle", + Usage: "Maximum amount of time to wait for the next request when keep-alives are enabled. If authrpc.timeouts.idle is zero, the value of authrpc.timeouts.read is used.", Value: rpccfg.DefaultHTTPTimeouts.IdleTimeout, } + + EvmCallTimeoutFlag = cli.DurationFlag{ + Name: "rpc.evmtimeout", + Usage: "Maximum amount of time to wait for the answer from EVM call.", + Value: rpccfg.DefaultEvmCallTimeout, + } ) func ApplyFlagsForEthConfig(ctx *cli.Context, cfg *ethconfig.Config) { mode, err := prune.FromCli( + cfg.Genesis.Config.ChainID.Uint64(), ctx.GlobalString(PruneFlag.Name), ctx.GlobalUint64(PruneHistoryFlag.Name), ctx.GlobalUint64(PruneReceiptFlag.Name), @@ -276,7 +285,7 @@ func ApplyFlagsForEthConfigCobra(f *pflag.FlagSet, cfg *ethconfig.Config) { beforeC = *v } - mode, err := prune.FromCli(*v, exactH, exactR, exactT, exactC, beforeH, beforeR, beforeT, beforeC, experiments) + mode, err := prune.FromCli(cfg.Genesis.Config.ChainID.Uint64(), *v, exactH, exactR, exactT, exactC, beforeH, beforeR, beforeT, beforeC, experiments) if err != nil { utils.Fatalf(fmt.Sprintf("error while parsing mode: %v", err)) } @@ -315,6 +324,10 @@ func setEmbeddedRpcDaemon(ctx *cli.Context, cfg *nodecfg.Config) { if jwtSecretPath == "" { jwtSecretPath = cfg.Dirs.DataDir + "/jwt.hex" } + + apis := ctx.GlobalString(utils.HTTPApiFlag.Name) + log.Info("starting HTTP APIs", "APIs", apis) + c := &httpcfg.HttpCfg{ Enabled: ctx.GlobalBool(utils.HTTPEnabledFlag.Name), Dirs: cfg.Dirs, @@ -323,25 +336,27 @@ func setEmbeddedRpcDaemon(ctx *cli.Context, cfg *nodecfg.Config) { TLSCACert: cfg.TLSCACert, TLSCertfile: cfg.TLSCertFile, - HttpListenAddress: ctx.GlobalString(utils.HTTPListenAddrFlag.Name), - HttpPort: ctx.GlobalInt(utils.HTTPPortFlag.Name), - EngineHTTPListenAddress: ctx.GlobalString(utils.EngineAddr.Name), - EnginePort: ctx.GlobalInt(utils.EnginePort.Name), - JWTSecretPath: jwtSecretPath, - TraceRequests: ctx.GlobalBool(utils.HTTPTraceFlag.Name), - HttpCORSDomain: strings.Split(ctx.GlobalString(utils.HTTPCORSDomainFlag.Name), ","), - HttpVirtualHost: strings.Split(ctx.GlobalString(utils.HTTPVirtualHostsFlag.Name), ","), - API: strings.Split(ctx.GlobalString(utils.HTTPApiFlag.Name), ","), + HttpListenAddress: ctx.GlobalString(utils.HTTPListenAddrFlag.Name), + HttpPort: ctx.GlobalInt(utils.HTTPPortFlag.Name), + AuthRpcHTTPListenAddress: ctx.GlobalString(utils.AuthRpcAddr.Name), + AuthRpcPort: ctx.GlobalInt(utils.AuthRpcPort.Name), + JWTSecretPath: jwtSecretPath, + TraceRequests: ctx.GlobalBool(utils.HTTPTraceFlag.Name), + HttpCORSDomain: strings.Split(ctx.GlobalString(utils.HTTPCORSDomainFlag.Name), ","), + HttpVirtualHost: strings.Split(ctx.GlobalString(utils.HTTPVirtualHostsFlag.Name), ","), + AuthRpcVirtualHost: strings.Split(ctx.GlobalString(utils.AuthRpcVirtualHostsFlag.Name), ","), + API: strings.Split(apis, ","), HTTPTimeouts: rpccfg.HTTPTimeouts{ ReadTimeout: ctx.GlobalDuration(HTTPReadTimeoutFlag.Name), WriteTimeout: ctx.GlobalDuration(HTTPWriteTimeoutFlag.Name), IdleTimeout: ctx.GlobalDuration(HTTPIdleTimeoutFlag.Name), }, - EngineTimeouts: rpccfg.HTTPTimeouts{ - ReadTimeout: ctx.GlobalDuration(EngineReadTimeoutFlag.Name), - WriteTimeout: ctx.GlobalDuration(EngineWriteTimeoutFlag.Name), + AuthRpcTimeouts: rpccfg.HTTPTimeouts{ + ReadTimeout: ctx.GlobalDuration(AuthRpcReadTimeoutFlag.Name), + WriteTimeout: ctx.GlobalDuration(AuthRpcWriteTimeoutFlag.Name), IdleTimeout: ctx.GlobalDuration(HTTPIdleTimeoutFlag.Name), }, + EvmCallTimeout: ctx.GlobalDuration(EvmCallTimeoutFlag.Name), WebsocketEnabled: ctx.GlobalIsSet(utils.WSEnabledFlag.Name), RpcBatchConcurrency: ctx.GlobalUint(utils.RpcBatchConcurrencyFlag.Name), @@ -351,8 +366,6 @@ func setEmbeddedRpcDaemon(ctx *cli.Context, cfg *nodecfg.Config) { Gascap: ctx.GlobalUint64(utils.RpcGasCapFlag.Name), MaxTraces: ctx.GlobalUint64(utils.TraceMaxtracesFlag.Name), TraceCompatibility: ctx.GlobalBool(utils.RpcTraceCompatFlag.Name), - StarknetGRPCAddress: ctx.GlobalString(utils.StarknetGrpcAddressFlag.Name), - TevmEnabled: ctx.GlobalBool(utils.TevmFlag.Name), TxPoolApiAddr: ctx.GlobalString(utils.TxpoolApiAddrFlag.Name), diff --git a/turbo/engineapi/fork_validator.go b/turbo/engineapi/fork_validator.go index f79e2541c14..59766a1a4fc 100644 --- a/turbo/engineapi/fork_validator.go +++ b/turbo/engineapi/fork_validator.go @@ -16,20 +16,17 @@ package engineapi import ( "bytes" "context" - "encoding/binary" "fmt" + "sync" - "github.com/ledgerwatch/erigon-lib/common/length" "github.com/ledgerwatch/erigon-lib/gointerfaces/remote" "github.com/ledgerwatch/erigon-lib/kv" "github.com/ledgerwatch/erigon-lib/kv/memdb" "github.com/ledgerwatch/erigon/common" - "github.com/ledgerwatch/erigon/common/changeset" "github.com/ledgerwatch/erigon/common/dbutils" "github.com/ledgerwatch/erigon/common/math" "github.com/ledgerwatch/erigon/core/rawdb" "github.com/ledgerwatch/erigon/core/types" - "github.com/ledgerwatch/erigon/core/types/accounts" "github.com/ledgerwatch/erigon/rlp" "github.com/ledgerwatch/erigon/turbo/shards" "github.com/ledgerwatch/log/v3" @@ -38,7 +35,7 @@ import ( // the maximum point from the current head, past which side forks are not validated anymore. const maxForkDepth = 32 // 32 slots is the duration of an epoch thus there cannot be side forks in PoS deeper than 32 blocks from head. -type validatePayloadFunc func(kv.RwTx, *types.Header, *types.RawBody, uint64, []*types.Header, []*types.RawBody) error +type validatePayloadFunc func(kv.RwTx, *types.Header, *types.RawBody, uint64, []*types.Header, []*types.RawBody, *shards.Notifications) error // Fork segment is a side fork segment and repressent a full side fork block. type forkSegment struct { @@ -53,12 +50,16 @@ type ForkValidator struct { sideForksBlock map[common.Hash]forkSegment // current memory batch containing chain head that extend canonical fork. extendingFork *memdb.MemoryMutation + // notifications accumulated for the extending fork + extendingForkNotifications *shards.Notifications // hash of chain head that extend canonical fork. extendingForkHeadHash common.Hash // this is the function we use to perform payload validation. validatePayload validatePayloadFunc // this is the current point where we processed the chain so far. currentHeight uint64 + // we want fork validator to be thread safe so let + lock sync.Mutex } func NewForkValidatorMock(currentHeight uint64) *ForkValidator { @@ -78,10 +79,12 @@ func NewForkValidator(currentHeight uint64, validatePayload validatePayloadFunc) // ExtendingForkHeadHash return the fork head hash of the fork that extends the canonical chain. func (fv *ForkValidator) ExtendingForkHeadHash() common.Hash { + fv.lock.Lock() + defer fv.lock.Unlock() return fv.extendingForkHeadHash } -func (fv *ForkValidator) rewindAccumulator(to uint64, accumulator *shards.Accumulator, c shards.StateChangeConsumer) error { +func (fv *ForkValidator) notifyTxPool(to uint64, accumulator *shards.Accumulator, c shards.StateChangeConsumer) error { hash, err := rawdb.ReadCanonicalHash(fv.extendingFork, to) if err != nil { return fmt.Errorf("read canonical hash of unwind point: %w", err) @@ -98,68 +101,6 @@ func (fv *ForkValidator) rewindAccumulator(to uint64, accumulator *shards.Accumu // Start the changes accumulator.Reset(0) accumulator.StartChange(to, hash, txs, true) - accChangesCursor, err := fv.extendingFork.CursorDupSort(kv.AccountChangeSet) - if err != nil { - return err - } - defer accChangesCursor.Close() - - storageChangesCursor, err := fv.extendingFork.CursorDupSort(kv.StorageChangeSet) - if err != nil { - return err - } - defer storageChangesCursor.Close() - - startingKey := dbutils.EncodeBlockNumber(to) - // Unwind notifications on accounts - for k, v, err := accChangesCursor.Seek(startingKey); k != nil; k, v, err = accChangesCursor.Next() { - if err != nil { - return err - } - _, dbKey, dbValue, err := changeset.FromDBFormat(k, v) - if err != nil { - return err - } - if len(dbValue) > 0 { - var acc accounts.Account - if err := acc.DecodeForStorage(dbValue); err != nil { - return err - } - // Fetch the code hash - var address common.Address - copy(address[:], dbKey) - if acc.Incarnation > 0 && acc.IsEmptyCodeHash() { - if codeHash, err2 := fv.extendingFork.GetOne(kv.PlainContractCode, dbutils.PlainGenerateStoragePrefix(address[:], acc.Incarnation)); err2 == nil { - copy(acc.CodeHash[:], codeHash) - } - } - - newV := make([]byte, acc.EncodingLengthForStorage()) - acc.EncodeForStorage(newV) - accumulator.ChangeAccount(address, acc.Incarnation, newV) - } else { - var address common.Address - copy(address[:], dbKey) - accumulator.DeleteAccount(address) - } - } - // Unwind notifications on storage - for k, v, err := storageChangesCursor.Seek(startingKey); k != nil; k, v, err = accChangesCursor.Next() { - if err != nil { - return err - } - _, dbKey, dbValue, err := changeset.FromDBFormat(k, v) - if err != nil { - return err - } - var address common.Address - var incarnation uint64 - var location common.Hash - copy(address[:], dbKey[:length.Addr]) - incarnation = binary.BigEndian.Uint64(dbKey[length.Addr:]) - copy(location[:], dbKey[length.Addr+length.Incarnation:]) - accumulator.ChangeStorage(address, incarnation, location, common.CopyBytes(dbValue)) - } accumulator.SendAndReset(context.Background(), c, header.BaseFee.Uint64(), header.GasLimit) log.Info("Transaction pool notified of discard side fork.") return nil @@ -167,25 +108,35 @@ func (fv *ForkValidator) rewindAccumulator(to uint64, accumulator *shards.Accumu // NotifyCurrentHeight is to be called at the end of the stage cycle and repressent the last processed block. func (fv *ForkValidator) NotifyCurrentHeight(currentHeight uint64) { + fv.lock.Lock() + defer fv.lock.Unlock() + if fv.currentHeight == currentHeight { + return + } fv.currentHeight = currentHeight // If the head changed,e previous assumptions on head are incorrect now. if fv.extendingFork != nil { fv.extendingFork.Rollback() } fv.extendingFork = nil + fv.extendingForkNotifications = nil fv.extendingForkHeadHash = common.Hash{} } // FlushExtendingFork flush the current extending fork if fcu chooses its head hash as the its forkchoice. -func (fv *ForkValidator) FlushExtendingFork(tx kv.RwTx) error { +func (fv *ForkValidator) FlushExtendingFork(tx kv.RwTx, accumulator *shards.Accumulator) error { + fv.lock.Lock() + defer fv.lock.Unlock() // Flush changes to db. if err := fv.extendingFork.Flush(tx); err != nil { return err } + fv.extendingForkNotifications.Accumulator.CopyAndReset(accumulator) // Clean extending fork data fv.extendingFork.Rollback() fv.extendingForkHeadHash = common.Hash{} fv.extendingFork = nil + fv.extendingForkNotifications = nil return nil } @@ -194,28 +145,35 @@ func (fv *ForkValidator) FlushExtendingFork(tx kv.RwTx) error { // if the payload is a fork then we unwind to the point where the fork meet the canonical chain and we check if it is valid or not from there. // if for any reasons none of the action above can be performed due to lack of information, we accept the payload and avoid validation. func (fv *ForkValidator) ValidatePayload(tx kv.RwTx, header *types.Header, body *types.RawBody, extendCanonical bool) (status remote.EngineStatus, latestValidHash common.Hash, validationError error, criticalError error) { + fv.lock.Lock() + defer fv.lock.Unlock() if fv.validatePayload == nil { status = remote.EngineStatus_ACCEPTED return } defer fv.clean() + // If the block is stored within the side fork it means it was already validated. + if _, ok := fv.sideForksBlock[header.Hash()]; ok { + status = remote.EngineStatus_VALID + latestValidHash = header.Hash() + return + } + if extendCanonical { // If the new block extends the canonical chain we update extendingFork. if fv.extendingFork == nil { fv.extendingFork = memdb.NewMemoryBatch(tx) + fv.extendingForkNotifications = &shards.Notifications{ + Events: shards.NewEvents(), + Accumulator: shards.NewAccumulator(), + } } else { fv.extendingFork.UpdateTxn(tx) } // Update fork head hash. fv.extendingForkHeadHash = header.Hash() - return fv.validateAndStorePayload(fv.extendingFork, header, body, 0, nil, nil) - } - // If the block is stored within the side fork it means it was already validated. - if _, ok := fv.sideForksBlock[header.Hash()]; ok { - status = remote.EngineStatus_VALID - latestValidHash = header.Hash() - return + return fv.validateAndStorePayload(fv.extendingFork, header, body, 0, nil, nil, fv.extendingForkNotifications) } // if the block is not in range of maxForkDepth from head then we do not validate it. @@ -228,7 +186,6 @@ func (fv *ForkValidator) ValidatePayload(tx kv.RwTx, header *types.Header, body currentHash := header.ParentHash foundCanonical, criticalError = rawdb.IsCanonicalHash(tx, currentHash) if criticalError != nil { - fmt.Println("critical") return } @@ -243,8 +200,18 @@ func (fv *ForkValidator) ValidatePayload(tx kv.RwTx, header *types.Header, body status = remote.EngineStatus_ACCEPTED return } - headersChain = append(headersChain, sb.header) - bodiesChain = append(bodiesChain, sb.body) + headersChain = append([]*types.Header{sb.header}, headersChain...) + bodiesChain = append([]*types.RawBody{sb.body}, bodiesChain...) + has, err := tx.Has(kv.BlockBody, dbutils.BlockBodyKey(sb.header.Number.Uint64(), sb.header.Hash())) + if err != nil { + criticalError = err + return + } + // MakesBodyCanonical do not support PoS. + if has { + status = remote.EngineStatus_ACCEPTED + return + } currentHash = sb.header.ParentHash foundCanonical, criticalError = rawdb.IsCanonicalHash(tx, currentHash) if criticalError != nil { @@ -256,41 +223,56 @@ func (fv *ForkValidator) ValidatePayload(tx kv.RwTx, header *types.Header, body if unwindPoint == fv.currentHeight { unwindPoint = 0 } - // if it is not canonical we validate it in memory and discard it aferwards. batch := memdb.NewMemoryBatch(tx) - defer batch.Close() - return fv.validateAndStorePayload(batch, header, body, unwindPoint, headersChain, bodiesChain) + defer batch.Rollback() + notifications := &shards.Notifications{ + Events: shards.NewEvents(), + Accumulator: shards.NewAccumulator(), + } + return fv.validateAndStorePayload(batch, header, body, unwindPoint, headersChain, bodiesChain, notifications) } // Clear wipes out current extending fork data, this method is called after fcu is called, // because fcu decides what the head is and after the call is done all the non-chosed forks are // to be considered obsolete. -func (fv *ForkValidator) Clear() { +func (fv *ForkValidator) clear() { if fv.extendingFork != nil { fv.extendingFork.Rollback() } fv.extendingForkHeadHash = common.Hash{} fv.extendingFork = nil + //fv.sideForksBlock = map[common.Hash]forkSegment{} +} + +// TryAddingPoWBlock adds a PoW block to the fork validator if possible +func (fv *ForkValidator) TryAddingPoWBlock(block *types.Block) { + defer fv.clean() + fv.lock.Lock() + defer fv.lock.Unlock() + fv.sideForksBlock[block.Hash()] = forkSegment{block.Header(), block.RawBody()} } // Clear wipes out current extending fork data and notify txpool. func (fv *ForkValidator) ClearWithUnwind(tx kv.RwTx, accumulator *shards.Accumulator, c shards.StateChangeConsumer) { + fv.lock.Lock() + defer fv.lock.Unlock() sb, ok := fv.sideForksBlock[fv.extendingForkHeadHash] // If we did not flush the fork state, then we need to notify the txpool through unwind. if fv.extendingFork != nil && accumulator != nil && fv.extendingForkHeadHash != (common.Hash{}) && ok { fv.extendingFork.UpdateTxn(tx) // this will call unwind of extending fork to notify txpool of reverting transactions. - if err := fv.rewindAccumulator(sb.header.Number.Uint64()-1, accumulator, c); err != nil { + if err := fv.notifyTxPool(sb.header.Number.Uint64()-1, accumulator, c); err != nil { log.Warn("could not notify txpool of invalid side fork", "err", err) } fv.extendingFork.Rollback() } - fv.Clear() + fv.clear() } // validateAndStorePayload validate and store a payload fork chain if such chain results valid. -func (fv *ForkValidator) validateAndStorePayload(tx kv.RwTx, header *types.Header, body *types.RawBody, unwindPoint uint64, headersChain []*types.Header, bodiesChain []*types.RawBody) (status remote.EngineStatus, latestValidHash common.Hash, validationError error, criticalError error) { - validationError = fv.validatePayload(tx, header, body, unwindPoint, headersChain, bodiesChain) +func (fv *ForkValidator) validateAndStorePayload(tx kv.RwTx, header *types.Header, body *types.RawBody, unwindPoint uint64, headersChain []*types.Header, bodiesChain []*types.RawBody, + notifications *shards.Notifications) (status remote.EngineStatus, latestValidHash common.Hash, validationError error, criticalError error) { + validationError = fv.validatePayload(tx, header, body, unwindPoint, headersChain, bodiesChain, notifications) latestValidHash = header.Hash() if validationError != nil { latestValidHash = header.ParentHash @@ -309,6 +291,10 @@ func (fv *ForkValidator) validateAndStorePayload(tx kv.RwTx, header *types.Heade if criticalError != nil { return } + if bodyWithTxs == nil { + criticalError = fmt.Errorf("ForkValidator failed to recover block body: %d, %x", header.Number.Uint64(), header.Hash()) + return + } var encodedTxs [][]byte buf := bytes.NewBuffer(nil) for _, tx := range bodyWithTxs.Transactions { diff --git a/turbo/node/node.go b/turbo/node/node.go index 3467176a878..dfb29bbd358 100644 --- a/turbo/node/node.go +++ b/turbo/node/node.go @@ -2,9 +2,6 @@ package node import ( - "fmt" - "time" - "github.com/ledgerwatch/erigon-lib/kv" "github.com/ledgerwatch/erigon/cmd/utils" "github.com/ledgerwatch/erigon/eth" @@ -93,7 +90,6 @@ func NewNodConfigUrfave(ctx *cli.Context) *nodecfg.Config { return nodeConfig } func NewEthConfigUrfave(ctx *cli.Context, nodeConfig *nodecfg.Config) *ethconfig.Config { - defer func(t time.Time) { fmt.Printf("node.go:97: %s\n", time.Since(t)) }(time.Now()) ethConfig := ðconfig.Defaults utils.SetEthConfig(ctx, nodeConfig, ethConfig) erigoncli.ApplyFlagsForEthConfig(ctx, ethConfig) @@ -111,19 +107,18 @@ func New( logger log.Logger, ) (*ErigonNode, error) { //prepareBuckets(optionalParams.CustomBuckets) - node := makeConfigNode(nodeConfig) - ethereum, err := RegisterEthService(node, ethConfig, logger) + node, err := node.New(nodeConfig) + if err != nil { + utils.Fatalf("Failed to create Erigon node: %v", err) + } + + ethereum, err := eth.New(node, ethConfig, logger) if err != nil { return nil, err } return &ErigonNode{stack: node, backend: ethereum}, nil } -// RegisterEthService adds an Ethereum client to the stack. -func RegisterEthService(stack *node.Node, cfg *ethconfig.Config, logger log.Logger) (*eth.Ethereum, error) { - return eth.New(stack, cfg, logger) -} - func NewNodeConfig() *nodecfg.Config { nodeConfig := nodecfg.DefaultConfig // see simiar changes in `cmd/geth/config.go#defaultNodeConfig` @@ -136,16 +131,3 @@ func NewNodeConfig() *nodecfg.Config { nodeConfig.Name = "erigon" return &nodeConfig } - -func MakeConfigNodeDefault() *node.Node { - return makeConfigNode(NewNodeConfig()) -} - -func makeConfigNode(config *nodecfg.Config) *node.Node { - stack, err := node.New(config) - if err != nil { - utils.Fatalf("Failed to create Erigon node: %v", err) - } - - return stack -} diff --git a/turbo/rpchelper/filters.go b/turbo/rpchelper/filters.go index 60a312d1961..c9222b506bc 100644 --- a/turbo/rpchelper/filters.go +++ b/turbo/rpchelper/filters.go @@ -17,12 +17,13 @@ import ( "github.com/ledgerwatch/erigon-lib/gointerfaces/remote" "github.com/ledgerwatch/erigon-lib/gointerfaces/txpool" txpool2 "github.com/ledgerwatch/erigon-lib/txpool" + "github.com/ledgerwatch/log/v3" + "google.golang.org/grpc" + "github.com/ledgerwatch/erigon/common" "github.com/ledgerwatch/erigon/core/types" "github.com/ledgerwatch/erigon/eth/filters" "github.com/ledgerwatch/erigon/rlp" - "github.com/ledgerwatch/log/v3" - "google.golang.org/grpc" ) type ( @@ -396,53 +397,73 @@ func (ff *Filters) SubscribeLogs(out chan *types.Log, crit filters.FilterCriteri } f.topicsOriginal = crit.Topics ff.logsSubs.addLogsFilters(f) + // if any filter in the aggregate needs all addresses or all topics then the global log subscription needs to + // allow all addresses or topics through lfr := &remote.LogsFilterRequest{ - AllAddresses: ff.logsSubs.aggLogsFilter.allAddrs == 1, - AllTopics: ff.logsSubs.aggLogsFilter.allTopics == 1, + AllAddresses: ff.logsSubs.aggLogsFilter.allAddrs >= 1, + AllTopics: ff.logsSubs.aggLogsFilter.allTopics >= 1, } - for addr := range ff.logsSubs.aggLogsFilter.addrs { + + addresses, topics := ff.logsSubs.getAggMaps() + + for addr := range addresses { lfr.Addresses = append(lfr.Addresses, gointerfaces.ConvertAddressToH160(addr)) } - for topic := range ff.logsSubs.aggLogsFilter.topics { + for topic := range topics { lfr.Topics = append(lfr.Topics, gointerfaces.ConvertHashToH256(topic)) } - ff.mu.Lock() - defer ff.mu.Unlock() - loaded := ff.logsRequestor.Load() + + loaded := ff.loadLogsRequester() if loaded != nil { if err := loaded.(func(*remote.LogsFilterRequest) error)(lfr); err != nil { log.Warn("Could not update remote logs filter", "err", err) ff.logsSubs.removeLogsFilter(id) } } + return id } +func (ff *Filters) loadLogsRequester() any { + ff.mu.Lock() + defer ff.mu.Unlock() + return ff.logsRequestor.Load() +} + func (ff *Filters) UnsubscribeLogs(id LogsSubID) bool { isDeleted := ff.logsSubs.removeLogsFilter(id) + // if any filters in the aggregate need all addresses or all topics then the request to the central + // log subscription needs to honour this lfr := &remote.LogsFilterRequest{ - AllAddresses: ff.logsSubs.aggLogsFilter.allAddrs == 1, - AllTopics: ff.logsSubs.aggLogsFilter.allTopics == 1, + AllAddresses: ff.logsSubs.aggLogsFilter.allAddrs >= 1, + AllTopics: ff.logsSubs.aggLogsFilter.allTopics >= 1, } - for addr := range ff.logsSubs.aggLogsFilter.addrs { + + addresses, topics := ff.logsSubs.getAggMaps() + + for addr := range addresses { lfr.Addresses = append(lfr.Addresses, gointerfaces.ConvertAddressToH160(addr)) } - for topic := range ff.logsSubs.aggLogsFilter.topics { + for topic := range topics { lfr.Topics = append(lfr.Topics, gointerfaces.ConvertHashToH256(topic)) } - ff.mu.Lock() - defer ff.mu.Unlock() - loaded := ff.logsRequestor.Load() + loaded := ff.loadLogsRequester() if loaded != nil { if err := loaded.(func(*remote.LogsFilterRequest) error)(lfr); err != nil { log.Warn("Could not update remote logs filter", "err", err) return isDeleted || ff.logsSubs.removeLogsFilter(id) } } + + ff.deleteLogStore(id) + + return isDeleted +} + +func (ff *Filters) deleteLogStore(id LogsSubID) { ff.storeMu.Lock() defer ff.storeMu.Unlock() delete(ff.logsStores, id) - return isDeleted } func (ff *Filters) OnNewEvent(event *remote.SubscribeReply) { @@ -522,21 +543,6 @@ func (ff *Filters) OnNewTx(reply *txpool.OnAddReply) { } func (ff *Filters) OnNewLogs(reply *remote.SubscribeLogsReply) { - lg := &types.Log{ - Address: gointerfaces.ConvertH160toAddress(reply.Address), - Data: reply.Data, - BlockNumber: reply.BlockNumber, - TxHash: gointerfaces.ConvertH256ToHash(reply.TransactionHash), - TxIndex: uint(reply.TransactionIndex), - BlockHash: gointerfaces.ConvertH256ToHash(reply.BlockHash), - Index: uint(reply.LogIndex), - Removed: reply.Removed, - } - t := make([]common.Hash, 0) - for _, v := range reply.Topics { - t = append(t, gointerfaces.ConvertH256ToHash(v)) - } - lg.Topics = t ff.logsSubs.distributeLog(reply) } diff --git a/turbo/rpchelper/filters_test.go b/turbo/rpchelper/filters_test.go new file mode 100644 index 00000000000..82032c01551 --- /dev/null +++ b/turbo/rpchelper/filters_test.go @@ -0,0 +1,338 @@ +package rpchelper + +import ( + "context" + "testing" + + "github.com/ledgerwatch/erigon-lib/gointerfaces" + "github.com/ledgerwatch/erigon-lib/gointerfaces/remote" + + types2 "github.com/ledgerwatch/erigon-lib/gointerfaces/types" + + "github.com/ledgerwatch/erigon/common" + "github.com/ledgerwatch/erigon/core/types" + "github.com/ledgerwatch/erigon/eth/filters" +) + +func createLog() *remote.SubscribeLogsReply { + return &remote.SubscribeLogsReply{ + Address: gointerfaces.ConvertAddressToH160([20]byte{}), + BlockHash: gointerfaces.ConvertHashToH256([32]byte{}), + BlockNumber: 0, + Data: []byte{}, + LogIndex: 0, + Topics: []*types2.H256{gointerfaces.ConvertHashToH256([32]byte{99, 99})}, + TransactionHash: gointerfaces.ConvertHashToH256([32]byte{}), + TransactionIndex: 0, + Removed: false, + } +} + +var ( + address1 = common.HexToAddress("0xdac17f958d2ee523a2206206994597c13d831ec7") + address1H160 = gointerfaces.ConvertAddressToH160(address1) + topic1 = common.HexToHash("0xddf252ad1be2c89b69c2b068fc378daa952ba7f163c4a11628f55a4df523b3ef") + topic1H256 = gointerfaces.ConvertHashToH256(topic1) +) + +func TestFilters_SingleSubscription_OnlyTopicsSubscribedAreBroadcast(t *testing.T) { + f := New(context.TODO(), nil, nil, nil, func() {}) + + subbedTopic := common.BytesToHash([]byte{10, 20}) + + criteria := filters.FilterCriteria{ + Addresses: nil, + Topics: [][]common.Hash{{subbedTopic}}, + } + + outChan := make(chan *types.Log, 1) + + f.SubscribeLogs(outChan, criteria) + + // now create a log for some other topic and distribute it + log := createLog() + + f.OnNewLogs(log) + + if len(outChan) != 0 { + t.Error("expected the subscription channel to be empty") + } + + // now a log that the subscription cares about + log.Topics = []*types2.H256{gointerfaces.ConvertHashToH256(subbedTopic)} + + f.OnNewLogs(log) + + if len(outChan) != 1 { + t.Error("expected a message in the channel for the subscribed topic") + } +} + +func TestFilters_SingleSubscription_EmptyTopicsInCriteria_OnlyTopicsSubscribedAreBroadcast(t *testing.T) { + f := New(context.TODO(), nil, nil, nil, func() {}) + + var nilTopic common.Hash + subbedTopic := common.BytesToHash([]byte{10, 20}) + + criteria := filters.FilterCriteria{ + Addresses: nil, + Topics: [][]common.Hash{{nilTopic, subbedTopic, nilTopic}}, + } + + outChan := make(chan *types.Log, 1) + + f.SubscribeLogs(outChan, criteria) + + // now create a log for some other topic and distribute it + log := createLog() + + f.OnNewLogs(log) + + if len(outChan) != 0 { + t.Error("expected the subscription channel to be empty") + } + + // now a log that the subscription cares about + log.Topics = []*types2.H256{gointerfaces.ConvertHashToH256(subbedTopic)} + + f.OnNewLogs(log) + + if len(outChan) != 1 { + t.Error("expected a message in the channel for the subscribed topic") + } +} + +func TestFilters_TwoSubscriptionsWithDifferentCriteria(t *testing.T) { + f := New(context.TODO(), nil, nil, nil, func() {}) + + criteria1 := filters.FilterCriteria{ + Addresses: nil, + Topics: [][]common.Hash{}, + } + criteria2 := filters.FilterCriteria{ + Addresses: nil, + Topics: [][]common.Hash{{topic1}}, + } + + chan1 := make(chan *types.Log, 256) + chan2 := make(chan *types.Log, 256) + + f.SubscribeLogs(chan1, criteria1) + f.SubscribeLogs(chan2, criteria2) + + // now create a log for some other topic and distribute it + log := createLog() + + f.OnNewLogs(log) + + if len(chan1) != 1 { + t.Error("expected channel 1 to receive the log message, no filters") + } + if len(chan2) != 0 { + t.Error("expected channel 2 to be empty, it has a topic filter") + } + + // now a log that the subscription cares about + log.Topics = []*types2.H256{gointerfaces.ConvertHashToH256(topic1)} + + f.OnNewLogs(log) + + if len(chan1) != 2 { + t.Error("expected the second log to be in the channel with no filters") + } + if len(chan2) != 1 { + t.Error("expected the channel with filters to receive the message as the filter matches") + } +} + +func TestFilters_ThreeSubscriptionsWithDifferentCriteria(t *testing.T) { + f := New(context.TODO(), nil, nil, nil, func() {}) + + criteria1 := filters.FilterCriteria{ + Addresses: nil, + Topics: [][]common.Hash{}, + } + criteria2 := filters.FilterCriteria{ + Addresses: nil, + Topics: [][]common.Hash{{topic1}}, + } + criteria3 := filters.FilterCriteria{ + Addresses: []common.Address{common.HexToAddress(address1.String())}, + Topics: [][]common.Hash{}, + } + + chan1 := make(chan *types.Log, 256) + chan2 := make(chan *types.Log, 256) + chan3 := make(chan *types.Log, 256) + + f.SubscribeLogs(chan1, criteria1) + f.SubscribeLogs(chan2, criteria2) + f.SubscribeLogs(chan3, criteria3) + + // now create a log for some other topic and distribute it + log := createLog() + + f.OnNewLogs(log) + + if len(chan1) != 1 { + t.Error("expected channel 1 to receive the log message, no filters") + } + if len(chan2) != 0 { + t.Error("expected channel 2 to be empty, it has a topic filter") + } + if len(chan3) != 0 { + t.Error("expected channel 3 to be empty as the address doesn't match") + } + + // now a log that the subscription cares about + var a common.Address + a.SetBytes(address1.Bytes()) + log.Address = gointerfaces.ConvertAddressToH160(a) + + f.OnNewLogs(log) + + if len(chan1) != 2 { + t.Error("expected the second log to be in the channel with no filters") + } + if len(chan2) != 0 { + t.Error("expected the second channel to still be empty as no log has the correct topic yet") + } + if len(chan3) != 1 { + t.Error("expected the third channel to have 1 entry as the previous log address matched") + } + + log = createLog() + log.Topics = []*types2.H256{topic1H256} + f.OnNewLogs(log) + + if len(chan1) != 3 { + t.Error("expected the third log to be in the channel with no filters") + } + if len(chan2) != 1 { + t.Error("expected the third channel to contain a log as the topic matched") + } + if len(chan3) != 1 { + t.Error("expected the third channel to still have 1 as the address didn't match in the third log") + } + +} + +func TestFilters_SubscribeLogsGeneratesCorrectLogFilterRequest(t *testing.T) { + var lastFilterRequest *remote.LogsFilterRequest + loadRequester := func(r *remote.LogsFilterRequest) error { + lastFilterRequest = r + return nil + } + + f := New(context.TODO(), nil, nil, nil, func() {}) + f.logsRequestor.Store(loadRequester) + + // first request has no filters + chan1 := make(chan *types.Log, 1) + criteria1 := filters.FilterCriteria{ + Addresses: []common.Address{}, + Topics: [][]common.Hash{}, + } + id1 := f.SubscribeLogs(chan1, criteria1) + + // request should have all addresses and topics enabled + if lastFilterRequest.AllAddresses == false { + t.Error("1: expected all addresses to be true") + } + if lastFilterRequest.AllTopics == false { + t.Error("1: expected all topics to be true") + } + + // second request filters on an address + chan2 := make(chan *types.Log, 1) + criteria2 := filters.FilterCriteria{ + Addresses: []common.Address{address1}, + Topics: [][]common.Hash{}, + } + id2 := f.SubscribeLogs(chan2, criteria2) + + // request should have all addresses and all topics still and the new address + if lastFilterRequest.AllAddresses == false { + t.Error("2: expected all addresses to be true") + } + if lastFilterRequest.AllTopics == false { + t.Error("2: expected all topics to be true") + } + if len(lastFilterRequest.Addresses) != 1 && lastFilterRequest.Addresses[0] != address1H160 { + t.Error("2: expected the address to match the last request") + } + + // third request filters on topic + chan3 := make(chan *types.Log, 1) + criteria3 := filters.FilterCriteria{ + Addresses: []common.Address{}, + Topics: [][]common.Hash{{topic1}}, + } + id3 := f.SubscribeLogs(chan3, criteria3) + + // request should have all addresses and all topics as well as the previous address and new topic + if lastFilterRequest.AllAddresses == false { + t.Error("3: expected all addresses to be true") + } + if lastFilterRequest.AllTopics == false { + t.Error("3: expected all topics to be true") + } + if len(lastFilterRequest.Addresses) != 1 && lastFilterRequest.Addresses[0] != address1H160 { + t.Error("3: expected the address to match the previous request") + } + if len(lastFilterRequest.Topics) != 1 && lastFilterRequest.Topics[0] != topic1H256 { + t.Error("3: expected the topics to match the last request") + } + + // now start unsubscribing to check the state of things + + // unsubscribing the first filter should leave us with all topics and all addresses 2 because request 2 and 3 + // have empty addresses and topics between the two of them. Effectively the state should be the same as the + // subscription in step 3 + f.UnsubscribeLogs(id1) + if lastFilterRequest.AllAddresses == false { + t.Error("4: expected all addresses to be true") + } + if lastFilterRequest.AllTopics == false { + t.Error("4: expected all topics to be true") + } + if len(lastFilterRequest.Addresses) != 1 && lastFilterRequest.Addresses[0] != address1H160 { + t.Error("4: expected an address to be present") + } + if len(lastFilterRequest.Topics) != 1 && lastFilterRequest.Topics[0] != topic1H256 { + t.Error("4: expected a topic to be present") + } + + // unsubscribing the second filter should remove the all topics filter as the only subscription remaining + // specifies a topic. All addresses should be present still. The state should represent the single + // subscription in step 3 + f.UnsubscribeLogs(id2) + if lastFilterRequest.AllAddresses == false { + t.Error("5: expected all addresses to be true") + } + if lastFilterRequest.AllTopics == true { + t.Error("5: expected all topics to be false") + } + if len(lastFilterRequest.Addresses) != 0 { + t.Error("5: expected addresses to be empty") + } + if len(lastFilterRequest.Topics) != 1 && lastFilterRequest.Topics[0] != topic1H256 { + t.Error("5: expected a topic to be present") + } + + // unsubscribing the last filter should leave us with false for the all addresses and all topics + // and nothing in the address or topics lists + f.UnsubscribeLogs(id3) + if lastFilterRequest.AllAddresses == true { + t.Error("5: expected all addresses to be false") + } + if lastFilterRequest.AllTopics == true { + t.Error("5: expected all topics to be false") + } + if len(lastFilterRequest.Addresses) != 0 { + t.Error("5: expected addresses to be empty") + } + if len(lastFilterRequest.Topics) != 0 { + t.Error("5: expected topics to be empty") + } +} diff --git a/turbo/rpchelper/helper.go b/turbo/rpchelper/helper.go index 6acc313f92e..0431708a203 100644 --- a/turbo/rpchelper/helper.go +++ b/turbo/rpchelper/helper.go @@ -6,6 +6,7 @@ import ( "github.com/ledgerwatch/erigon-lib/kv" "github.com/ledgerwatch/erigon-lib/kv/kvcache" + state2 "github.com/ledgerwatch/erigon-lib/state" "github.com/ledgerwatch/erigon/common" "github.com/ledgerwatch/erigon/core/rawdb" "github.com/ledgerwatch/erigon/core/state" @@ -13,6 +14,7 @@ import ( "github.com/ledgerwatch/erigon/eth/stagedsync/stages" "github.com/ledgerwatch/erigon/rpc" "github.com/ledgerwatch/erigon/turbo/adapter" + "github.com/ledgerwatch/log/v3" ) // unable to decode supplied params, or an invalid number of parameters @@ -63,10 +65,13 @@ func _GetBlockNumber(requireCanonical bool, blockNrOrHash rpc.BlockNumberOrHash, case rpc.PendingBlockNumber: pendingBlock := filters.LastPendingBlock() if pendingBlock == nil { + log.Warn("no pending block found returning latest executed block") blockNumber = plainStateBlockNumber } else { return pendingBlock.NumberU64(), pendingBlock.Hash(), false, nil } + case rpc.LatestExecutedBlockNumber: + blockNumber = plainStateBlockNumber default: blockNumber = uint64(number.Int64()) } @@ -97,7 +102,7 @@ func GetAccount(tx kv.Tx, blockNumber uint64, address common.Address) (*accounts return reader.ReadAccountData(address) } -func CreateStateReader(ctx context.Context, tx kv.Tx, blockNrOrHash rpc.BlockNumberOrHash, filters *Filters, stateCache kvcache.Cache) (state.StateReader, error) { +func CreateStateReader(ctx context.Context, tx kv.Tx, blockNrOrHash rpc.BlockNumberOrHash, filters *Filters, stateCache kvcache.Cache, historyV3 bool, agg *state2.Aggregator22) (state.StateReader, error) { blockNumber, _, latest, err := _GetBlockNumber(true, blockNrOrHash, tx, filters) if err != nil { return nil, err @@ -110,7 +115,20 @@ func CreateStateReader(ctx context.Context, tx kv.Tx, blockNrOrHash rpc.BlockNum } stateReader = state.NewCachedReader2(cacheView, tx) } else { - stateReader = state.NewPlainState(tx, blockNumber+1) + if historyV3 { + aggCtx := agg.MakeContext() + aggCtx.SetTx(tx) + r := state.NewHistoryReader22(aggCtx) + r.SetTx(tx) + minTxNum, err := rawdb.TxNums.Min(tx, blockNumber+1) + if err != nil { + return nil, err + } + r.SetTxNum(minTxNum) + stateReader = r + } else { + stateReader = state.NewPlainState(tx, blockNumber+1) + } } return stateReader, nil } diff --git a/turbo/rpchelper/interface.go b/turbo/rpchelper/interface.go index 1fde0d8e515..56d2ec288b7 100644 --- a/turbo/rpchelper/interface.go +++ b/turbo/rpchelper/interface.go @@ -29,4 +29,5 @@ type ApiBackend interface { EngineGetPayloadV1(ctx context.Context, payloadId uint64) (*types2.ExecutionPayload, error) NodeInfo(ctx context.Context, limit uint32) ([]p2p.NodeInfo, error) Peers(ctx context.Context) ([]*p2p.PeerInfo, error) + PendingBlock(ctx context.Context) (*types.Block, error) } diff --git a/turbo/rpchelper/logsfilter.go b/turbo/rpchelper/logsfilter.go index 621bbaa72bb..933425643b5 100644 --- a/turbo/rpchelper/logsfilter.go +++ b/turbo/rpchelper/logsfilter.go @@ -5,6 +5,7 @@ import ( "github.com/ledgerwatch/erigon-lib/gointerfaces" "github.com/ledgerwatch/erigon-lib/gointerfaces/remote" + "github.com/ledgerwatch/erigon/common" types2 "github.com/ledgerwatch/erigon/core/types" ) @@ -12,7 +13,7 @@ import ( type LogsFilterAggregator struct { aggLogsFilter LogsFilter // Aggregation of all current log filters logsFilters map[LogsSubID]*LogsFilter // Filter for each subscriber, keyed by filterID - logsFilterLock sync.Mutex + logsFilterLock sync.RWMutex nextFilterId LogsSubID } @@ -81,6 +82,8 @@ func (a *LogsFilterAggregator) subtractLogFilters(f *LogsFilter) { } func (a *LogsFilterAggregator) addLogsFilters(f *LogsFilter) { + a.logsFilterLock.Lock() + defer a.logsFilterLock.Unlock() a.aggLogsFilter.allAddrs += f.allAddrs for addr, count := range f.addrs { a.aggLogsFilter.addrs[addr] += count @@ -91,6 +94,23 @@ func (a *LogsFilterAggregator) addLogsFilters(f *LogsFilter) { } } +func (a *LogsFilterAggregator) getAggMaps() (map[common.Address]int, map[common.Hash]int) { + a.logsFilterLock.RLock() + defer a.logsFilterLock.RUnlock() + + addresses := make(map[common.Address]int) + for k, v := range a.aggLogsFilter.addrs { + addresses[k] = v + } + + topics := make(map[common.Hash]int) + for k, v := range a.aggLogsFilter.topics { + topics[k] = v + } + + return addresses, topics +} + func (a *LogsFilterAggregator) distributeLog(eventLog *remote.SubscribeLogsReply) error { a.logsFilterLock.Lock() defer a.logsFilterLock.Unlock() diff --git a/turbo/rpchelper/rpc_block.go b/turbo/rpchelper/rpc_block.go index 4d54b41e3e8..20cf9a4b3fe 100644 --- a/turbo/rpchelper/rpc_block.go +++ b/turbo/rpchelper/rpc_block.go @@ -54,3 +54,11 @@ func GetSafeBlockNumber(tx kv.Tx) (uint64, error) { } return 0, UnknownBlockError } + +func GetLatestExecutedBlockNumber(tx kv.Tx) (uint64, error) { + blockNum, err := stages.GetStageProgress(tx, stages.Execution) + if err != nil { + return 0, err + } + return blockNum, err +} diff --git a/turbo/services/interfaces.go b/turbo/services/interfaces.go index 8ed3056d902..89c21d116b2 100644 --- a/turbo/services/interfaces.go +++ b/turbo/services/interfaces.go @@ -30,7 +30,7 @@ type CanonicalReader interface { type BodyReader interface { BodyWithTransactions(ctx context.Context, tx kv.Getter, hash common.Hash, blockHeight uint64) (body *types.Body, err error) BodyRlp(ctx context.Context, tx kv.Getter, hash common.Hash, blockHeight uint64) (bodyRlp rlp.RawValue, err error) - Body(ctx context.Context, tx kv.Getter, hash common.Hash, blockHeight uint64) (body *types.Body, err error) + Body(ctx context.Context, tx kv.Getter, hash common.Hash, blockHeight uint64) (body *types.Body, txAmount uint32, err error) } type TxnReader interface { diff --git a/ethdb/privateapi/events.go b/turbo/shards/events.go similarity index 96% rename from ethdb/privateapi/events.go rename to turbo/shards/events.go index 8ea286d1ec6..7b50d4b0917 100644 --- a/ethdb/privateapi/events.go +++ b/turbo/shards/events.go @@ -1,4 +1,4 @@ -package privateapi +package shards import ( "sync" @@ -137,3 +137,9 @@ func (e *Events) OnLogs(logs []*remote.SubscribeLogsReply) { common.PrioritizedSend(ch, logs) } } + +type Notifications struct { + Events *Events + Accumulator *Accumulator + StateChangesConsumer StateChangeConsumer +} diff --git a/turbo/shards/state_change_accumulator.go b/turbo/shards/state_change_accumulator.go index f2743020658..e3ffbc38bbf 100644 --- a/turbo/shards/state_change_accumulator.go +++ b/turbo/shards/state_change_accumulator.go @@ -7,21 +7,19 @@ import ( "github.com/ledgerwatch/erigon-lib/gointerfaces" "github.com/ledgerwatch/erigon-lib/gointerfaces/remote" "github.com/ledgerwatch/erigon/common" - "github.com/ledgerwatch/erigon/params" ) // Accumulator collects state changes in a form that can then be delivered to the RPC daemon type Accumulator struct { viewID uint64 // mdbx's txID - chainConfig *params.ChainConfig changes []*remote.StateChange latestChange *remote.StateChange accountChangeIndex map[common.Address]int // For the latest changes, allows finding account change by account's address storageChangeIndex map[common.Address]map[common.Hash]int } -func NewAccumulator(chainConfig *params.ChainConfig) *Accumulator { - return &Accumulator{chainConfig: chainConfig} +func NewAccumulator() *Accumulator { + return &Accumulator{} } type StateChangeConsumer interface { @@ -35,7 +33,6 @@ func (a *Accumulator) Reset(viewID uint64) { a.storageChangeIndex = nil a.viewID = viewID } -func (a *Accumulator) ChainConfig() *params.ChainConfig { return a.chainConfig } func (a *Accumulator) SendAndReset(ctx context.Context, c StateChangeConsumer, pendingBaseFee uint64, blockGasLimit uint64) { if a == nil || c == nil || len(a.changes) == 0 { return @@ -161,3 +158,14 @@ func (a *Accumulator) ChangeStorage(address common.Address, incarnation uint64, storageChange.Location = gointerfaces.ConvertHashToH256(location) storageChange.Data = data } + +func (a *Accumulator) CopyAndReset(target *Accumulator) { + target.changes = a.changes + a.changes = nil + target.latestChange = a.latestChange + a.latestChange = nil + target.accountChangeIndex = a.accountChangeIndex + a.accountChangeIndex = nil + target.storageChangeIndex = a.storageChangeIndex + a.storageChangeIndex = nil +} diff --git a/turbo/snapshotsync/block_reader.go b/turbo/snapshotsync/block_reader.go index babb6a0037e..71ab1cecd10 100644 --- a/turbo/snapshotsync/block_reader.go +++ b/turbo/snapshotsync/block_reader.go @@ -29,14 +29,16 @@ func (back *BlockReader) CanonicalHash(ctx context.Context, tx kv.Getter, blockH return rawdb.ReadCanonicalHash(tx, blockHeight) } +func (back *BlockReader) Snapshots() *RoSnapshots { return nil } + func (back *BlockReader) Header(ctx context.Context, tx kv.Getter, hash common.Hash, blockHeight uint64) (*types.Header, error) { h := rawdb.ReadHeader(tx, hash, blockHeight) return h, nil } -func (back *BlockReader) Body(ctx context.Context, tx kv.Getter, hash common.Hash, blockHeight uint64) (body *types.Body, err error) { - body, _, _ = rawdb.ReadBody(tx, hash, blockHeight) - return body, nil +func (back *BlockReader) Body(ctx context.Context, tx kv.Getter, hash common.Hash, blockHeight uint64) (body *types.Body, txAmount uint32, err error) { + body, _, txAmount = rawdb.ReadBody(tx, hash, blockHeight) + return body, txAmount, nil } func (back *BlockReader) BodyWithTransactions(ctx context.Context, tx kv.Getter, hash common.Hash, blockHeight uint64) (body *types.Body, err error) { @@ -44,7 +46,7 @@ func (back *BlockReader) BodyWithTransactions(ctx context.Context, tx kv.Getter, } func (back *BlockReader) BodyRlp(ctx context.Context, tx kv.Getter, hash common.Hash, blockHeight uint64) (bodyRlp rlp.RawValue, err error) { - body, err := back.Body(ctx, tx, hash, blockHeight) + body, _, err := back.Body(ctx, tx, hash, blockHeight) if err != nil { return nil, err } @@ -129,6 +131,8 @@ func (back *RemoteBlockReader) HeaderByNumber(ctx context.Context, tx kv.Getter, return block.Header(), nil } +func (back *RemoteBlockReader) Snapshots() *RoSnapshots { return nil } + func (back *RemoteBlockReader) HeaderByHash(ctx context.Context, tx kv.Getter, hash common.Hash) (*types.Header, error) { blockNum := rawdb.ReadHeaderNumber(tx, hash) if blockNum == nil { @@ -195,15 +199,15 @@ func (back *RemoteBlockReader) Header(ctx context.Context, tx kv.Getter, hash co } return block.Header(), nil } -func (back *RemoteBlockReader) Body(ctx context.Context, tx kv.Getter, hash common.Hash, blockHeight uint64) (body *types.Body, err error) { +func (back *RemoteBlockReader) Body(ctx context.Context, tx kv.Getter, hash common.Hash, blockHeight uint64) (body *types.Body, txAmount uint32, err error) { block, _, err := back.BlockWithSenders(ctx, tx, hash, blockHeight) if err != nil { - return nil, err + return nil, 0, err } if block == nil { - return nil, nil + return nil, 0, nil } - return block.Body(), nil + return block.Body(), uint32(len(block.Body().Transactions)), nil } func (back *RemoteBlockReader) BodyWithTransactions(ctx context.Context, tx kv.Getter, hash common.Hash, blockHeight uint64) (body *types.Body, err error) { block, _, err := back.BlockWithSenders(ctx, tx, hash, blockHeight) @@ -237,6 +241,8 @@ func NewBlockReaderWithSnapshots(snapshots *RoSnapshots) *BlockReaderWithSnapsho return &BlockReaderWithSnapshots{sn: snapshots} } +func (back *BlockReaderWithSnapshots) Snapshots() *RoSnapshots { return back.sn } + func (back *BlockReaderWithSnapshots) HeaderByNumber(ctx context.Context, tx kv.Getter, blockHeight uint64) (h *types.Header, err error) { ok, err := back.sn.ViewHeaders(blockHeight, func(segment *HeaderSegment) error { h, _, err = back.headerFromSnapshot(blockHeight, segment, nil) @@ -276,6 +282,9 @@ func (back *BlockReaderWithSnapshots) HeaderByHash(ctx context.Context, tx kv.Ge if err != nil { return err } + if h != nil { + break + } } return nil }); err != nil { @@ -398,22 +407,22 @@ func (back *BlockReaderWithSnapshots) BodyRlp(ctx context.Context, tx kv.Getter, return bodyRlp, nil } -func (back *BlockReaderWithSnapshots) Body(ctx context.Context, tx kv.Getter, hash common.Hash, blockHeight uint64) (body *types.Body, err error) { +func (back *BlockReaderWithSnapshots) Body(ctx context.Context, tx kv.Getter, hash common.Hash, blockHeight uint64) (body *types.Body, txAmount uint32, err error) { ok, err := back.sn.ViewBodies(blockHeight, func(seg *BodySegment) error { - body, _, _, _, err = back.bodyFromSnapshot(blockHeight, seg, nil) + body, _, txAmount, _, err = back.bodyFromSnapshot(blockHeight, seg, nil) if err != nil { return err } return nil }) if err != nil { - return nil, err + return nil, 0, err } if ok { - return body, nil + return body, txAmount, nil } - body, _, _ = rawdb.ReadBody(tx, hash, blockHeight) - return body, nil + body, _, txAmount = rawdb.ReadBody(tx, hash, blockHeight) + return body, txAmount, nil } func (back *BlockReaderWithSnapshots) BlockWithSenders(ctx context.Context, tx kv.Getter, hash common.Hash, blockHeight uint64) (block *types.Block, senders []common.Address, err error) { @@ -496,7 +505,7 @@ func (back *BlockReaderWithSnapshots) headerFromSnapshot(blockHeight uint64, sn gg := sn.seg.MakeGetter() gg.Reset(headerOffset) if !gg.HasNext() { - return nil, nil, nil + return nil, buf, nil } buf, _ = gg.Next(buf[:0]) if len(buf) == 0 { @@ -576,11 +585,11 @@ func (back *BlockReaderWithSnapshots) bodyForStorageFromSnapshot(blockHeight uin gg := sn.seg.MakeGetter() gg.Reset(bodyOffset) if !gg.HasNext() { - return nil, nil, nil + return nil, buf, nil } buf, _ = gg.Next(buf[:0]) if len(buf) == 0 { - return nil, nil, nil + return nil, buf, nil } b := &types.BodyForStorage{} reader := bytes.NewReader(buf) @@ -610,13 +619,13 @@ func (back *BlockReaderWithSnapshots) txsFromSnapshot(baseTxnID uint64, txsAmoun txs = make([]types.Transaction, txsAmount) senders = make([]common.Address, txsAmount) - reader := bytes.NewReader(buf) if txsAmount == 0 { return txs, senders, nil } txnOffset := txsSeg.IdxTxnHash.OrdinalLookup(baseTxnID - txsSeg.IdxTxnHash.BaseDataID()) gg := txsSeg.Seg.MakeGetter() gg.Reset(txnOffset) + reader := bytes.NewReader(buf) stream := rlp.NewStream(reader, 0) for i := uint32(0); i < txsAmount; i++ { if !gg.HasNext() { @@ -675,13 +684,15 @@ func (back *BlockReaderWithSnapshots) txnByHash(txnHash common.Hash, segments [] continue } buf, _ = gg.Next(buf[:0]) - sender, txnRlp := buf[1:1+20], buf[1+20:] + senderByte, txnRlp := buf[1:1+20], buf[1+20:] + sender := *(*common.Address)(senderByte) txn, err = types.DecodeTransaction(rlp.NewStream(bytes.NewReader(txnRlp), uint64(len(txnRlp)))) if err != nil { return } - txn.SetSender(*(*common.Address)(sender)) // see: https://tip.golang.org/ref/spec#Conversions_from_slice_to_array_pointer + + txn.SetSender(sender) // see: https://tip.golang.org/ref/spec#Conversions_from_slice_to_array_pointer reader2 := recsplit.NewIndexReader(sn.IdxTxnHash2BlockNum) blockNum = reader2.Lookup(txnHash[:]) @@ -694,6 +705,8 @@ func (back *BlockReaderWithSnapshots) txnByHash(txnHash common.Hash, segments [] return } +// TxnByIdxInBlock - doesn't include system-transactions in the begin/end of block +// return nil if 0 < i < body.TxAmoun func (back *BlockReaderWithSnapshots) TxnByIdxInBlock(ctx context.Context, tx kv.Getter, blockNum uint64, i int) (txn types.Transaction, err error) { var b *types.BodyForStorage ok, err := back.sn.ViewBodies(blockNum, func(segment *BodySegment) error { @@ -704,10 +717,18 @@ func (back *BlockReaderWithSnapshots) TxnByIdxInBlock(ctx context.Context, tx kv if b == nil { return nil } - return nil }) + if err != nil { + return nil, err + } + if ok { + // if block has no transactions, or requested txNum out of non-system transactions length + if b.TxAmount == 2 || i == -1 || i >= int(b.TxAmount-2) { + return nil, nil + } + ok, err = back.sn.Txs.ViewSegment(blockNum, func(segment *TxnSegment) error { // +1 because block has system-txn in the beginning of block txn, err = back.txnByID(b.BaseTxId+1+uint64(i), segment, nil) diff --git a/turbo/snapshotsync/block_snapshots.go b/turbo/snapshotsync/block_snapshots.go index 4200e40aef5..d594ed4b6d4 100644 --- a/turbo/snapshotsync/block_snapshots.go +++ b/turbo/snapshotsync/block_snapshots.go @@ -9,6 +9,7 @@ import ( "os" "path" "path/filepath" + "reflect" "runtime" "strings" "sync" @@ -16,6 +17,7 @@ import ( "github.com/holiman/uint256" common2 "github.com/ledgerwatch/erigon-lib/common" + "github.com/ledgerwatch/erigon-lib/common/background" "github.com/ledgerwatch/erigon-lib/common/cmp" "github.com/ledgerwatch/erigon-lib/common/dbg" "github.com/ledgerwatch/erigon-lib/compress" @@ -25,7 +27,7 @@ import ( "github.com/ledgerwatch/erigon-lib/recsplit" types2 "github.com/ledgerwatch/erigon-lib/types" "github.com/ledgerwatch/erigon/cmd/downloader/downloadergrpc" - "github.com/ledgerwatch/erigon/cmd/hack/tool" + "github.com/ledgerwatch/erigon/cmd/hack/tool/fromdb" "github.com/ledgerwatch/erigon/common" "github.com/ledgerwatch/erigon/common/dbutils" "github.com/ledgerwatch/erigon/core/rawdb" @@ -33,6 +35,7 @@ import ( "github.com/ledgerwatch/erigon/crypto" "github.com/ledgerwatch/erigon/eth/ethconfig" "github.com/ledgerwatch/erigon/eth/stagedsync/stages" + "github.com/ledgerwatch/erigon/node/nodecfg/datadir" "github.com/ledgerwatch/erigon/params" "github.com/ledgerwatch/erigon/rlp" "github.com/ledgerwatch/erigon/turbo/snapshotsync/snap" @@ -172,17 +175,21 @@ func (sn *BodySegment) reopenIdx(dir string) (err error) { return nil } -func (sn *BodySegment) Iterate(f func(blockNum, baseTxNum, txAmout uint64)) error { +func (sn *BodySegment) Iterate(f func(blockNum, baseTxNum, txAmount uint64) error) error { + defer sn.seg.EnableReadAhead().DisableReadAhead() + var buf []byte g := sn.seg.MakeGetter() - blockNum := sn.idxBodyNumber.BaseDataID() + blockNum := sn.ranges.from var b types.BodyForStorage for g.HasNext() { buf, _ = g.Next(buf[:0]) if err := rlp.DecodeBytes(buf, &b); err != nil { return err } - f(blockNum, b.BaseTxId, uint64(b.TxAmount)) + if err := f(blockNum, b.BaseTxId, uint64(b.TxAmount)); err != nil { + return err + } blockNum++ } return nil @@ -331,10 +338,10 @@ type RoSnapshots struct { } // NewRoSnapshots - opens all snapshots. But to simplify everything: -// - it opens snapshots only on App start and immutable after -// - all snapshots of given blocks range must exist - to make this blocks range available -// - gaps are not allowed -// - segment have [from:to) semantic +// - it opens snapshots only on App start and immutable after +// - all snapshots of given blocks range must exist - to make this blocks range available +// - gaps are not allowed +// - segment have [from:to) semantic func NewRoSnapshots(cfg ethconfig.Snapshot, snapDir string) *RoSnapshots { return &RoSnapshots{dir: snapDir, cfg: cfg, Headers: &headerSegments{}, Bodies: &bodySegments{}, Txs: &txnSegments{}} } @@ -349,7 +356,7 @@ func (s *RoSnapshots) BlocksAvailable() uint64 { return cmp.Min(s.segmentsMax.Lo func (s *RoSnapshots) LogStat() { var m runtime.MemStats common2.ReadMemStats(&m) - log.Info("[Snapshots] Stat", + log.Info("[Snapshots] Blocks Stat", "blocks", fmt.Sprintf("%dk", (s.BlocksAvailable()+1)/1000), "indices", fmt.Sprintf("%dk", (s.IndicesMax()+1)/1000), "alloc", common2.ByteCount(m.Alloc), "sys", common2.ByteCount(m.Sys)) @@ -362,6 +369,79 @@ func (s *RoSnapshots) EnsureExpectedBlocksAreAvailable(cfg *snapcfg.Cfg) error { return nil } +// DisableReadAhead - usage: `defer d.EnableReadAhead().DisableReadAhead()`. Please don't use this funcs without `defer` to avoid leak. +func (s *RoSnapshots) DisableReadAhead() { + s.Headers.lock.RLock() + defer s.Headers.lock.RUnlock() + s.Bodies.lock.RLock() + defer s.Bodies.lock.RUnlock() + s.Txs.lock.RLock() + defer s.Txs.lock.RUnlock() + for _, sn := range s.Headers.segments { + sn.seg.DisableReadAhead() + } + for _, sn := range s.Bodies.segments { + sn.seg.DisableReadAhead() + } + for _, sn := range s.Txs.segments { + sn.Seg.DisableReadAhead() + } +} +func (s *RoSnapshots) EnableReadAhead() *RoSnapshots { + s.Headers.lock.RLock() + defer s.Headers.lock.RUnlock() + s.Bodies.lock.RLock() + defer s.Bodies.lock.RUnlock() + s.Txs.lock.RLock() + defer s.Txs.lock.RUnlock() + for _, sn := range s.Headers.segments { + sn.seg.EnableReadAhead() + } + for _, sn := range s.Bodies.segments { + sn.seg.EnableReadAhead() + } + for _, sn := range s.Txs.segments { + sn.Seg.EnableReadAhead() + } + return s +} +func (s *RoSnapshots) EnableMadvWillNeed() *RoSnapshots { + s.Headers.lock.RLock() + defer s.Headers.lock.RUnlock() + s.Bodies.lock.RLock() + defer s.Bodies.lock.RUnlock() + s.Txs.lock.RLock() + defer s.Txs.lock.RUnlock() + for _, sn := range s.Headers.segments { + sn.seg.EnableWillNeed() + } + for _, sn := range s.Bodies.segments { + sn.seg.EnableWillNeed() + } + for _, sn := range s.Txs.segments { + sn.Seg.EnableWillNeed() + } + return s +} +func (s *RoSnapshots) EnableMadvNormal() *RoSnapshots { + s.Headers.lock.RLock() + defer s.Headers.lock.RUnlock() + s.Bodies.lock.RLock() + defer s.Bodies.lock.RUnlock() + s.Txs.lock.RLock() + defer s.Txs.lock.RUnlock() + for _, sn := range s.Headers.segments { + sn.seg.EnableMadvNormal() + } + for _, sn := range s.Bodies.segments { + sn.seg.EnableMadvNormal() + } + for _, sn := range s.Txs.segments { + sn.Seg.EnableMadvNormal() + } + return s +} + func (s *RoSnapshots) idxAvailability() uint64 { var headers, bodies, txs uint64 for _, seg := range s.Headers.segments { @@ -407,6 +487,9 @@ func (s *RoSnapshots) Files() (list []string) { defer s.Txs.lock.RUnlock() max := s.BlocksAvailable() for _, seg := range s.Bodies.segments { + if seg.seg == nil { + continue + } if seg.ranges.from > max { continue } @@ -414,6 +497,9 @@ func (s *RoSnapshots) Files() (list []string) { list = append(list, fName) } for _, seg := range s.Headers.segments { + if seg.seg == nil { + continue + } if seg.ranges.from > max { continue } @@ -421,12 +507,16 @@ func (s *RoSnapshots) Files() (list []string) { list = append(list, fName) } for _, seg := range s.Txs.segments { + if seg.Seg == nil { + continue + } if seg.ranges.from > max { continue } _, fName := filepath.Split(seg.Seg.FilePath()) list = append(list, fName) } + slices.Sort(list) return list } @@ -453,6 +543,9 @@ Loop: switch f.T { case snap.Headers: for _, sn := range s.Headers.segments { + if sn.seg == nil { // it's ok if some segment was not able to open + continue + } _, name := filepath.Split(sn.seg.FilePath()) if fName == name { if err := sn.reopenIdxIfNeed(s.dir, optimistic); err != nil { @@ -487,6 +580,9 @@ Loop: } case snap.Bodies: for _, sn := range s.Bodies.segments { + if sn.seg == nil { + continue + } _, name := filepath.Split(sn.seg.FilePath()) if fName == name { if err := sn.reopenIdxIfNeed(s.dir, optimistic); err != nil { @@ -518,6 +614,9 @@ Loop: } case snap.Transactions: for _, sn := range s.Txs.segments { + if sn.Seg == nil { + continue + } _, name := filepath.Split(sn.Seg.FilePath()) if fName == name { if err := sn.reopenIdxIfNeed(s.dir, optimistic); err != nil { @@ -576,12 +675,14 @@ func (s *RoSnapshots) Ranges() (ranges []Range) { return ranges } +func (s *RoSnapshots) OptimisticalyReopenFolder() { _ = s.ReopenFolder() } +func (s *RoSnapshots) OptimisticalyReopenWithDB(db kv.RoDB) { _ = s.ReopenWithDB(db) } func (s *RoSnapshots) ReopenFolder() error { files, _, err := Segments(s.dir) if err != nil { return err } - var list []string + list := make([]string, 0, len(files)) for _, f := range files { _, fName := filepath.Split(f.Path) list = append(list, fName) @@ -646,7 +747,7 @@ Loop3: s.Txs.segments[i] = nil } var i int - for i = 0; i < len(s.Headers.segments) && s.Headers.segments[i] != nil; i++ { + for i = 0; i < len(s.Headers.segments) && s.Headers.segments[i] != nil && s.Headers.segments[i].seg != nil; i++ { } tail := s.Headers.segments[i:] s.Headers.segments = s.Headers.segments[:i] @@ -657,7 +758,7 @@ Loop3: } } - for i = 0; i < len(s.Bodies.segments) && s.Bodies.segments[i] != nil; i++ { + for i = 0; i < len(s.Bodies.segments) && s.Bodies.segments[i] != nil && s.Bodies.segments[i].seg != nil; i++ { } tailB := s.Bodies.segments[i:] s.Bodies.segments = s.Bodies.segments[:i] @@ -668,7 +769,7 @@ Loop3: } } - for i = 0; i < len(s.Txs.segments) && s.Txs.segments[i] != nil; i++ { + for i = 0; i < len(s.Txs.segments) && s.Txs.segments[i] != nil && s.Txs.segments[i].Seg != nil; i++ { } tailC := s.Txs.segments[i:] s.Txs.segments = s.Txs.segments[:i] @@ -687,7 +788,6 @@ func (s *RoSnapshots) PrintDebug() { defer s.Bodies.lock.RUnlock() s.Txs.lock.RLock() defer s.Txs.lock.RUnlock() - fmt.Printf("sn: %d, %d\n", s.segmentsMax.Load(), s.idxMax.Load()) fmt.Println(" == Snapshots, Header") for _, sn := range s.Headers.segments { fmt.Printf("%d, %t\n", sn.ranges.from, sn.idxHeaderHash == nil) @@ -720,26 +820,29 @@ func (s *RoSnapshots) ViewTxs(blockNum uint64, f func(sn *TxnSegment) error) (fo return s.Txs.ViewSegment(blockNum, f) } -func buildIdx(ctx context.Context, sn snap.FileInfo, chainID uint256.Int, tmpDir string, lvl log.Lvl) error { +func buildIdx(ctx context.Context, sn snap.FileInfo, chainID uint256.Int, tmpDir string, p *background.Progress, lvl log.Lvl) error { + _, fName := filepath.Split(sn.Path) + log.Debug("[snapshots] build idx", "file", fName) switch sn.T { case snap.Headers: - if err := HeadersIdx(ctx, sn.Path, sn.From, tmpDir, lvl); err != nil { + if err := HeadersIdx(ctx, sn.Path, sn.From, tmpDir, p, lvl); err != nil { return err } case snap.Bodies: - if err := BodiesIdx(ctx, sn.Path, sn.From, tmpDir, lvl); err != nil { + if err := BodiesIdx(ctx, sn.Path, sn.From, tmpDir, p, lvl); err != nil { return err } case snap.Transactions: dir, _ := filepath.Split(sn.Path) - if err := TransactionsIdx(ctx, chainID, sn.From, sn.To, dir, tmpDir, lvl); err != nil { + if err := TransactionsIdx(ctx, chainID, sn.From, sn.To, dir, tmpDir, p, lvl); err != nil { return err } } return nil } -func BuildMissedIndices(ctx context.Context, dir string, chainID uint256.Int, tmpDir string, workers int, lvl log.Lvl) error { +func BuildMissedIndices(logPrefix string, ctx context.Context, dirs datadir.Dirs, chainID uint256.Int, workers int) error { + dir, tmpDir := dirs.Snap, dirs.Tmp //log.Log(lvl, "[snapshots] Build indices", "from", min) logEvery := time.NewTicker(20 * time.Second) defer logEvery.Stop() @@ -749,54 +852,59 @@ func BuildMissedIndices(ctx context.Context, dir string, chainID uint256.Int, tm } errs := make(chan error, 1024) wg := &sync.WaitGroup{} + ps := background.NewProgressSet() sem := semaphore.NewWeighted(int64(workers)) - for _, t := range snap.AllSnapshotTypes { - for _, sn := range segments { - if sn.T != t { - continue - } - if hasIdxFile(&sn) { - continue - } - wg.Add(1) - if err := sem.Acquire(ctx, 1); err != nil { - return err - } - go func(sn snap.FileInfo) { - defer sem.Release(1) - defer wg.Done() - - log.Log(log.LvlInfo, "[snapshots] BuildMissedIndices", "from", sn.From, "to", sn.To) - if err := buildIdx(ctx, sn, chainID, tmpDir, lvl); err != nil { - errs <- err + startIndexingTime := time.Now() + go func() { + for _, t := range snap.AllSnapshotTypes { + for index := range segments { + segment := segments[index] + if segment.T != t { + continue } - - select { - case <-ctx.Done(): - errs <- ctx.Err() + if hasIdxFile(&segment) { + continue + } + if err := sem.Acquire(ctx, 1); err != nil { + errs <- err return - case <-logEvery.C: - var m runtime.MemStats - if lvl >= log.LvlInfo { - common2.ReadMemStats(&m) - } - log.Log(lvl, "[snapshots] Indexing", "type", t.String(), "blockNum", sn.To, "alloc", common2.ByteCount(m.Alloc), "sys", common2.ByteCount(m.Sys)) - default: } - }(sn) + wg.Add(1) + go func(sn snap.FileInfo) { + defer sem.Release(1) + defer wg.Done() + + p := &background.Progress{} + ps.Add(p) + defer ps.Delete(p) + if err := buildIdx(ctx, sn, chainID, tmpDir, p, log.LvlInfo); err != nil { + errs <- err + } + }(segment) + } } - } - go func() { wg.Wait() close(errs) }() - for err := range errs { - if err != nil { - return err + + for { + select { + case err, ok := <-errs: + if !ok { + log.Info(fmt.Sprintf("[%s] finished indexing", logPrefix), "time", time.Since(startIndexingTime).String()) + return nil + } + if err != nil { + return err + } + case <-ctx.Done(): + return ctx.Err() + case <-logEvery.C: + var m runtime.MemStats + common2.ReadMemStats(&m) + log.Info(fmt.Sprintf("[%s] Indexing", logPrefix), "progress", ps.String(), "total-indexing-time", time.Since(startIndexingTime).Round(time.Second).String(), "alloc", common2.ByteCount(m.Alloc), "sys", common2.ByteCount(m.Sys)) } } - - return nil } func noGaps(in []snap.FileInfo) (out []snap.FileInfo, missingSnapshots []Range) { @@ -857,45 +965,6 @@ func noOverlaps(in []snap.FileInfo) (res []snap.FileInfo) { return res } -func SegmentsList(dir string) (res []string, err error) { - files, _, err := Segments(dir) - if err != nil { - return nil, err - } - for _, f := range files { - _, fName := filepath.Split(f.Path) - res = append(res, fName) - } - return res, nil -} - -// EnforceSnapshotsInvariant if DB has record - then file exists, if file exists - DB has record. -// it also does notify about changes after db commit -func EnforceSnapshotsInvariant(db kv.RwDB, dir string, allSnapshots *RoSnapshots, notifier DBEventNotifier) (snList []string, err error) { - snListInFolder, err := SegmentsList(dir) - if err != nil { - return nil, err - } - if err = db.Update(context.Background(), func(tx kv.RwTx) error { - snList, err = rawdb.EnforceSnapshotsInvariant(tx, snListInFolder) - if err != nil { - return err - } - return err - }); err != nil { - return snList, err - } - if allSnapshots != nil { - if err := allSnapshots.ReopenList(snList, false); err != nil { - return snList, err - } - } - if notifier != nil { - notifier.OnNewSnapshot() - } - return snList, nil -} - func Segments(dir string) (res []snap.FileInfo, missingSnapshots []Range, err error) { list, err := snap.Segments(dir) if err != nil { @@ -943,13 +1012,11 @@ func Segments(dir string) (res []snap.FileInfo, missingSnapshots []Range, err er func chooseSegmentEnd(from, to, blocksPerFile uint64) uint64 { next := (from/blocksPerFile + 1) * blocksPerFile to = cmp.Min(next, to) - return to - (to % snap.MIN_SEGMENT_SIZE) // round down to the nearest 1k + return to - (to % snap.Erigon2MinSegmentSize) // round down to the nearest 1k } type BlockRetire struct { working atomic.Bool - wg *sync.WaitGroup - result *BlockRetireResult workers int tmpDir string @@ -958,29 +1025,27 @@ type BlockRetire struct { downloader proto_downloader.DownloaderClient notifier DBEventNotifier -} -type BlockRetireResult struct { - BlockFrom, BlockTo uint64 - Err error + BackgroundResult *BackgroundResult } func NewBlockRetire(workers int, tmpDir string, snapshots *RoSnapshots, db kv.RoDB, downloader proto_downloader.DownloaderClient, notifier DBEventNotifier) *BlockRetire { - return &BlockRetire{workers: workers, tmpDir: tmpDir, snapshots: snapshots, wg: &sync.WaitGroup{}, db: db, downloader: downloader, notifier: notifier} + return &BlockRetire{workers: workers, tmpDir: tmpDir, snapshots: snapshots, db: db, downloader: downloader, notifier: notifier, BackgroundResult: &BackgroundResult{}} } func (br *BlockRetire) Snapshots() *RoSnapshots { return br.snapshots } func (br *BlockRetire) Working() bool { return br.working.Load() } -func (br *BlockRetire) Wait() { br.wg.Wait() } -func (br *BlockRetire) Result() *BlockRetireResult { - r := br.result - br.result = nil - return r -} + func CanRetire(curBlockNum uint64, snapshots *RoSnapshots) (blockFrom, blockTo uint64, can bool) { + if curBlockNum <= params.FullImmutabilityThreshold { + return + } blockFrom = snapshots.BlocksAvailable() + 1 return canRetire(blockFrom, curBlockNum-params.FullImmutabilityThreshold) } func canRetire(from, to uint64) (blockFrom, blockTo uint64, can bool) { + if to <= from { + return + } blockFrom = (from / 1_000) * 1_000 roundedTo1K := (to / 1_000) * 1_000 var maxJump uint64 = 1_000 @@ -1016,13 +1081,13 @@ func CanDeleteTo(curBlockNum uint64, snapshots *RoSnapshots) (blockTo uint64) { return cmp.Min(hardLimit, snapshots.BlocksAvailable()+1) } func (br *BlockRetire) RetireBlocks(ctx context.Context, blockFrom, blockTo uint64, lvl log.Lvl) error { - chainConfig := tool.ChainConfigFromDB(br.db) + chainConfig := fromdb.ChainConfig(br.db) chainID, _ := uint256.FromBig(chainConfig.ChainID) return retireBlocks(ctx, blockFrom, blockTo, *chainID, br.tmpDir, br.snapshots, br.db, br.workers, br.downloader, lvl, br.notifier) } func (br *BlockRetire) PruneAncientBlocks(tx kv.RwTx) error { - if !br.snapshots.cfg.KeepBlocks { + if br.snapshots.cfg.KeepBlocks { return nil } currentProgress, err := stages.GetStageProgress(tx, stages.Senders) @@ -1044,16 +1109,14 @@ func (br *BlockRetire) RetireBlocksInBackground(ctx context.Context, forwardProg // go-routine is still working return } - if br.result != nil { + if br.BackgroundResult.Has() { // Prevent invocation for the same range twice, result needs to be cleared in the Result() function return } - br.wg.Add(1) + br.working.Store(true) go func() { - br.working.Store(true) defer br.working.Store(false) - defer br.wg.Done() blockFrom, blockTo, ok := CanRetire(forwardProgress, br.Snapshots()) if !ok { @@ -1061,10 +1124,10 @@ func (br *BlockRetire) RetireBlocksInBackground(ctx context.Context, forwardProg } err := br.RetireBlocks(ctx, blockFrom, blockTo, lvl) - br.result = &BlockRetireResult{ - BlockFrom: blockFrom, - BlockTo: blockTo, - Err: err, + if err != nil { + br.BackgroundResult.Set(fmt.Errorf("retire blocks error: %w, fromBlock=%d, toBlock=%d", err, blockFrom, blockTo)) + } else { + br.BackgroundResult.Set(nil) } }() } @@ -1076,14 +1139,14 @@ type DBEventNotifier interface { func retireBlocks(ctx context.Context, blockFrom, blockTo uint64, chainID uint256.Int, tmpDir string, snapshots *RoSnapshots, db kv.RoDB, workers int, downloader proto_downloader.DownloaderClient, lvl log.Lvl, notifier DBEventNotifier) error { log.Log(lvl, "[snapshots] Retire Blocks", "range", fmt.Sprintf("%dk-%dk", blockFrom/1000, blockTo/1000)) // in future we will do it in background - if err := DumpBlocks(ctx, blockFrom, blockTo, snap.DEFAULT_SEGMENT_SIZE, tmpDir, snapshots.Dir(), db, workers, lvl); err != nil { + if err := DumpBlocks(ctx, blockFrom, blockTo, snap.Erigon2SegmentSize, tmpDir, snapshots.Dir(), db, workers, lvl); err != nil { return fmt.Errorf("DumpBlocks: %w", err) } if err := snapshots.ReopenFolder(); err != nil { return fmt.Errorf("reopen: %w", err) } snapshots.LogStat() - if notifier != nil { // notify about new snapshots of any size + if notifier != nil && !reflect.ValueOf(notifier).IsNil() { // notify about new snapshots of any size notifier.OnNewSnapshot() } merger := NewMerger(tmpDir, workers, lvl, chainID, notifier) @@ -1091,7 +1154,7 @@ func retireBlocks(ctx context.Context, blockFrom, blockTo uint64, chainID uint25 if len(rangesToMerge) == 0 { return nil } - err := merger.Merge(ctx, snapshots, rangesToMerge, snapshots.Dir(), true) + err := merger.Merge(ctx, snapshots, rangesToMerge, snapshots.Dir(), true /* doIndex */) if err != nil { return err } @@ -1099,13 +1162,13 @@ func retireBlocks(ctx context.Context, blockFrom, blockTo uint64, chainID uint25 return fmt.Errorf("reopen: %w", err) } snapshots.LogStat() - if notifier != nil { // notify about new snapshots of any size + if notifier != nil && !reflect.ValueOf(notifier).IsNil() { // notify about new snapshots of any size notifier.OnNewSnapshot() } - var downloadRequest []DownloadRequest - for _, r := range rangesToMerge { - downloadRequest = append(downloadRequest, NewDownloadRequest(&r, "", "")) + downloadRequest := make([]DownloadRequest, 0, len(rangesToMerge)) + for i := range rangesToMerge { + downloadRequest = append(downloadRequest, NewDownloadRequest(&rangesToMerge[i], "", "")) } return RequestSnapshotsDownload(ctx, downloadRequest, downloader) @@ -1115,38 +1178,45 @@ func DumpBlocks(ctx context.Context, blockFrom, blockTo, blocksPerFile uint64, t if blocksPerFile == 0 { return nil } - chainConfig := tool.ChainConfigFromDB(chainDB) - chainID, _ := uint256.FromBig(chainConfig.ChainID) + chainConfig := fromdb.ChainConfig(chainDB) for i := blockFrom; i < blockTo; i = chooseSegmentEnd(i, blockTo, blocksPerFile) { - if err := dumpBlocksRange(ctx, i, chooseSegmentEnd(i, blockTo, blocksPerFile), tmpDir, snapDir, chainDB, *chainID, workers, lvl); err != nil { + if err := dumpBlocksRange(ctx, i, chooseSegmentEnd(i, blockTo, blocksPerFile), tmpDir, snapDir, chainDB, *chainConfig, workers, lvl); err != nil { return err } } return nil } -func dumpBlocksRange(ctx context.Context, blockFrom, blockTo uint64, tmpDir, snapDir string, chainDB kv.RoDB, chainID uint256.Int, workers int, lvl log.Lvl) error { - f, _ := snap.ParseFileName(snapDir, snap.SegmentFileName(blockFrom, blockTo, snap.Headers)) +func dumpBlocksRange(ctx context.Context, blockFrom, blockTo uint64, tmpDir, snapDir string, chainDB kv.RoDB, chainConfig params.ChainConfig, workers int, lvl log.Lvl) error { + segName := snap.SegmentFileName(blockFrom, blockTo, snap.Headers) + f, _ := snap.ParseFileName(snapDir, segName) if err := DumpHeaders(ctx, chainDB, f.Path, tmpDir, blockFrom, blockTo, workers, lvl); err != nil { return fmt.Errorf("DumpHeaders: %w", err) } - if err := buildIdx(ctx, f, chainID, tmpDir, lvl); err != nil { + p := &background.Progress{} + + chainId, _ := uint256.FromBig(chainConfig.ChainID) + if err := buildIdx(ctx, f, *chainId, tmpDir, p, lvl); err != nil { return err } - f, _ = snap.ParseFileName(snapDir, snap.SegmentFileName(blockFrom, blockTo, snap.Bodies)) + segName = snap.SegmentFileName(blockFrom, blockTo, snap.Bodies) + f, _ = snap.ParseFileName(snapDir, segName) if err := DumpBodies(ctx, chainDB, f.Path, tmpDir, blockFrom, blockTo, workers, lvl); err != nil { return fmt.Errorf("DumpBodies: %w", err) } - if err := buildIdx(ctx, f, chainID, tmpDir, lvl); err != nil { + p = &background.Progress{} + if err := buildIdx(ctx, f, *chainId, tmpDir, p, lvl); err != nil { return err } - f, _ = snap.ParseFileName(snapDir, snap.SegmentFileName(blockFrom, blockTo, snap.Transactions)) + segName = snap.SegmentFileName(blockFrom, blockTo, snap.Transactions) + f, _ = snap.ParseFileName(snapDir, segName) if _, err := DumpTxs(ctx, chainDB, f.Path, tmpDir, blockFrom, blockTo, workers, lvl); err != nil { return fmt.Errorf("DumpTxs: %w", err) } - if err := buildIdx(ctx, f, chainID, tmpDir, lvl); err != nil { + p = &background.Progress{} + if err := buildIdx(ctx, f, *chainId, tmpDir, p, lvl); err != nil { return err } @@ -1194,7 +1264,7 @@ func DumpTxs(ctx context.Context, db kv.RoDB, segmentFile, tmpDir string, blockF warmupCtx, cancel := context.WithCancel(ctx) defer cancel() - chainConfig := tool.ChainConfigFromDB(db) + chainConfig := fromdb.ChainConfig(db) chainID, _ := uint256.FromBig(chainConfig.ChainID) f, err := compress.NewCompressor(ctx, "Snapshot Txs", segmentFile, tmpDir, compress.MinPatternScore, workers, lvl) @@ -1309,6 +1379,7 @@ func DumpTxs(ctx context.Context, db kv.RoDB, segmentFile, tmpDir string, blockF if err != nil { return fmt.Errorf("%w, block: %d", err, blockNum) } + // first tx byte => sender adress => tx rlp if err := f.AddWord(valueBuf); err != nil { return err } @@ -1439,7 +1510,9 @@ func DumpBodies(ctx context.Context, db kv.RoDB, segmentFilePath, tmpDir string, } defer f.Close() - key := make([]byte, 8+32) + blockNumByteLength := 8 + blockHashByteLength := 32 + key := make([]byte, blockNumByteLength+blockHashByteLength) from := dbutils.EncodeBlockNumber(blockFrom) if err := kv.BigChunks(db, kv.HeaderCanonical, from, func(tx kv.Tx, k, v []byte) (bool, error) { blockNum := binary.BigEndian.Uint64(k) @@ -1524,7 +1597,7 @@ func expectedTxsAmount(snapDir string, blockFrom, blockTo uint64) (firstTxID, ex return } -func TransactionsIdx(ctx context.Context, chainID uint256.Int, blockFrom, blockTo uint64, snapDir string, tmpDir string, lvl log.Lvl) (err error) { +func TransactionsIdx(ctx context.Context, chainID uint256.Int, blockFrom, blockTo uint64, snapDir string, tmpDir string, p *background.Progress, lvl log.Lvl) (err error) { defer func() { if rec := recover(); rec != nil { err = fmt.Errorf("TransactionsIdx: at=%d-%d, %v, %s", blockFrom, blockTo, rec, dbg.Stack()) @@ -1542,15 +1615,18 @@ func TransactionsIdx(ctx context.Context, chainID uint256.Int, blockFrom, blockT } defer bodiesSegment.Close() - segmentFilePath := filepath.Join(snapDir, snap.SegmentFileName(blockFrom, blockTo, snap.Transactions)) + segFileName := snap.SegmentFileName(blockFrom, blockTo, snap.Transactions) + segmentFilePath := filepath.Join(snapDir, segFileName) d, err := compress.NewDecompressor(segmentFilePath) if err != nil { return err } defer d.Close() if uint64(d.Count()) != expectedCount { - panic(fmt.Errorf("expect: %d, got %d\n", expectedCount, d.Count())) + return fmt.Errorf("TransactionsIdx: at=%d-%d, pre index building, expect: %d, got %d", blockFrom, blockTo, expectedCount, d.Count()) } + p.Name.Store(segFileName) + p.Total.Store(uint64(d.Count() * 2)) txnHashIdx, err := recsplit.NewRecSplit(recsplit.RecSplitArgs{ KeyCount: d.Count(), @@ -1578,7 +1654,11 @@ func TransactionsIdx(ctx context.Context, chainID uint256.Int, blockFrom, blockT if err != nil { return err } - txnHashIdx.LogLvl(log.LvlDebug) + idxLogLvl := log.LvlDebug + if d.Count() > 1_000_000 { + idxLogLvl = log.LvlInfo + } + txnHashIdx.LogLvl(idxLogLvl) txnHash2BlockNumIdx.LogLvl(log.LvlDebug) parseCtx := types2.NewTxParseContext(chainID) @@ -1586,91 +1666,92 @@ func TransactionsIdx(ctx context.Context, chainID uint256.Int, blockFrom, blockT slot := types2.TxSlot{} bodyBuf, word := make([]byte, 0, 4096), make([]byte, 0, 4096) - withReadAhead := func(f func(g, bodyGetter *compress.Getter) error) error { - return d.WithReadAhead(func() error { - return bodiesSegment.WithReadAhead(func() error { - return f(d.MakeGetter(), bodiesSegment.MakeGetter()) - }) - }) - } + defer d.EnableReadAhead().DisableReadAhead() + defer bodiesSegment.EnableReadAhead().DisableReadAhead() RETRY: - if err := withReadAhead(func(g, bodyGetter *compress.Getter) error { - var i, offset, nextPos uint64 - blockNum := firstBlockNum - body := &types.BodyForStorage{} - - bodyBuf, _ = bodyGetter.Next(bodyBuf[:0]) - if err := rlp.DecodeBytes(bodyBuf, body); err != nil { - return err - } + g, bodyGetter := d.MakeGetter(), bodiesSegment.MakeGetter() + var i, offset, nextPos uint64 + blockNum := firstBlockNum + body := &types.BodyForStorage{} - for g.HasNext() { - word, nextPos = g.Next(word[:0]) - select { - case <-ctx.Done(): - return ctx.Err() - default: - } + bodyBuf, _ = bodyGetter.Next(bodyBuf[:0]) + if err := rlp.DecodeBytes(bodyBuf, body); err != nil { + return err + } - for body.BaseTxId+uint64(body.TxAmount) <= firstTxID+i { // skip empty blocks - if !bodyGetter.HasNext() { - return fmt.Errorf("not enough bodies") - } - bodyBuf, _ = bodyGetter.Next(bodyBuf[:0]) - if err := rlp.DecodeBytes(bodyBuf, body); err != nil { - return err - } - blockNum++ - } + for g.HasNext() { + p.Processed.Inc() + word, nextPos = g.Next(word[:0]) + select { + case <-ctx.Done(): + return ctx.Err() + default: + } - isSystemTx := len(word) == 0 - if isSystemTx { // system-txs hash:pad32(txnID) - binary.BigEndian.PutUint64(slot.IDHash[:], firstTxID+i) - } else { - if _, err := parseCtx.ParseTransaction(word[1+20:], 0, &slot, nil, true /* hasEnvelope */, nil); err != nil { - return fmt.Errorf("ParseTransaction: %w, blockNum: %d, i: %d", err, blockNum, i) - } + for body.BaseTxId+uint64(body.TxAmount) <= firstTxID+i { // skip empty blocks + if !bodyGetter.HasNext() { + return fmt.Errorf("not enough bodies") } - if err := txnHashIdx.AddKey(slot.IDHash[:], offset); err != nil { - return err - } - if err := txnHash2BlockNumIdx.AddKey(slot.IDHash[:], blockNum); err != nil { + bodyBuf, _ = bodyGetter.Next(bodyBuf[:0]) + if err := rlp.DecodeBytes(bodyBuf, body); err != nil { return err } - i++ - offset = nextPos + blockNum++ } - - if i != expectedCount { - panic(fmt.Errorf("expect: %d, got %d\n", expectedCount, i)) + firstTxByteAndlengthOfAddress := 21 + isSystemTx := len(word) == 0 + if isSystemTx { // system-txs hash:pad32(txnID) + binary.BigEndian.PutUint64(slot.IDHash[:], firstTxID+i) + } else { + if _, err = parseCtx.ParseTransaction(word[firstTxByteAndlengthOfAddress:], 0, &slot, nil, true /* hasEnvelope */, nil /* validateHash */); err != nil { + return fmt.Errorf("ParseTransaction: %w, blockNum: %d, i: %d", err, blockNum, i) + } } - if err := txnHashIdx.Build(); err != nil { - return fmt.Errorf("txnHashIdx: %w", err) + if err := txnHashIdx.AddKey(slot.IDHash[:], offset); err != nil { + return err } - if err := txnHash2BlockNumIdx.Build(); err != nil { - return fmt.Errorf("txnHash2BlockNumIdx: %w", err) + if err := txnHash2BlockNumIdx.AddKey(slot.IDHash[:], blockNum); err != nil { + return err } - return nil - }); err != nil { + i++ + offset = nextPos + } + + if i != expectedCount { + return fmt.Errorf("TransactionsIdx: at=%d-%d, post index building, expect: %d, got %d", blockFrom, blockTo, expectedCount, i) + } + + if err := txnHashIdx.Build(); err != nil { if errors.Is(err, recsplit.ErrCollision) { log.Warn("Building recsplit. Collision happened. It's ok. Restarting with another salt...", "err", err) txnHashIdx.ResetNextSalt() txnHash2BlockNumIdx.ResetNextSalt() goto RETRY } - return err + return fmt.Errorf("txnHashIdx: %w", err) } + if err := txnHash2BlockNumIdx.Build(); err != nil { + if errors.Is(err, recsplit.ErrCollision) { + log.Warn("Building recsplit. Collision happened. It's ok. Restarting with another salt...", "err", err) + txnHashIdx.ResetNextSalt() + txnHash2BlockNumIdx.ResetNextSalt() + goto RETRY + } + return fmt.Errorf("txnHash2BlockNumIdx: %w", err) + } + + p.Processed.Store(p.Total.Load()) return nil } // HeadersIdx - headerHash -> offset (analog of kv.HeaderNumber) -func HeadersIdx(ctx context.Context, segmentFilePath string, firstBlockNumInSegment uint64, tmpDir string, lvl log.Lvl) (err error) { +func HeadersIdx(ctx context.Context, segmentFilePath string, firstBlockNumInSegment uint64, tmpDir string, p *background.Progress, lvl log.Lvl) (err error) { defer func() { if rec := recover(); rec != nil { _, fName := filepath.Split(segmentFilePath) @@ -1684,9 +1765,14 @@ func HeadersIdx(ctx context.Context, segmentFilePath string, firstBlockNumInSegm } defer d.Close() + _, fname := filepath.Split(segmentFilePath) + p.Name.Store(fname) + p.Total.Store(uint64(d.Count())) + hasher := crypto.NewKeccakState() var h common.Hash if err := Idx(ctx, d, firstBlockNumInSegment, tmpDir, log.LvlDebug, func(idx *recsplit.RecSplit, i, offset uint64, word []byte) error { + p.Processed.Inc() headerRlp := word[1:] hasher.Reset() hasher.Write(headerRlp) @@ -1701,7 +1787,7 @@ func HeadersIdx(ctx context.Context, segmentFilePath string, firstBlockNumInSegm return nil } -func BodiesIdx(ctx context.Context, segmentFilePath string, firstBlockNumInSegment uint64, tmpDir string, lvl log.Lvl) (err error) { +func BodiesIdx(ctx context.Context, segmentFilePath string, firstBlockNumInSegment uint64, tmpDir string, p *background.Progress, lvl log.Lvl) (err error) { defer func() { if rec := recover(); rec != nil { _, fName := filepath.Split(segmentFilePath) @@ -1717,7 +1803,12 @@ func BodiesIdx(ctx context.Context, segmentFilePath string, firstBlockNumInSegme } defer d.Close() + _, fname := filepath.Split(segmentFilePath) + p.Name.Store(fname) + p.Total.Store(uint64(d.Count())) + if err := Idx(ctx, d, firstBlockNumInSegment, tmpDir, log.LvlDebug, func(idx *recsplit.RecSplit, i, offset uint64, word []byte) error { + p.Processed.Inc() n := binary.PutUvarint(num, i) if err := idx.AddKey(num[:n], offset); err != nil { return err @@ -1729,38 +1820,6 @@ func BodiesIdx(ctx context.Context, segmentFilePath string, firstBlockNumInSegme return nil } -type decompressItem struct { - i, offset uint64 - word []byte - err error -} - -func forEachAsync(ctx context.Context, d *compress.Decompressor) chan decompressItem { - ch := make(chan decompressItem, 1024) - go func() { - defer close(ch) - if err := d.WithReadAhead(func() error { - g := d.MakeGetter() - var wc, pos, nextPos uint64 - word := make([]byte, 0, 4096) - for g.HasNext() { - word, nextPos = g.Next(word[:0]) - select { - case <-ctx.Done(): - return nil - case ch <- decompressItem{i: wc, offset: pos, word: common2.Copy(word)}: - } - wc++ - pos = nextPos - } - return nil - }); err != nil { - ch <- decompressItem{err: err} - } - }() - return ch -} - // Idx - iterate over segment and building .idx file func Idx(ctx context.Context, d *compress.Decompressor, firstDataID uint64, tmpDir string, lvl log.Lvl, walker func(idx *recsplit.RecSplit, i, offset uint64, word []byte) error) error { segmentFileName := d.FilePath() @@ -1781,30 +1840,26 @@ func Idx(ctx context.Context, d *compress.Decompressor, firstDataID uint64, tmpD } rs.LogLvl(lvl) + defer d.EnableReadAhead().DisableReadAhead() + RETRY: - if err := d.WithReadAhead(func() error { - g := d.MakeGetter() - var i, offset, nextPos uint64 - word := make([]byte, 0, 4096) - for g.HasNext() { - word, nextPos = g.Next(word[:0]) - if err := walker(rs, i, offset, word); err != nil { - return err - } - i++ - offset = nextPos + g := d.MakeGetter() + var i, offset, nextPos uint64 + word := make([]byte, 0, 4096) + for g.HasNext() { + word, nextPos = g.Next(word[:0]) + if err := walker(rs, i, offset, word); err != nil { + return err + } + i++ + offset = nextPos - select { - case <-ctx.Done(): - return ctx.Err() - default: - } + select { + case <-ctx.Done(): + return ctx.Err() + default: } - return nil - }); err != nil { - return err } - if err = rs.Build(); err != nil { if errors.Is(err, recsplit.ErrCollision) { log.Info("Building recsplit. Collision happened. It's ok. Restarting with another salt...", "err", err) @@ -1818,22 +1873,25 @@ RETRY: func ForEachHeader(ctx context.Context, s *RoSnapshots, walker func(header *types.Header) error) error { r := bytes.NewReader(nil) + word := make([]byte, 0, 2*4096) err := s.Headers.View(func(snapshots []*HeaderSegment) error { for _, sn := range snapshots { - ch := forEachAsync(ctx, sn.seg) - for it := range ch { - if it.err != nil { - return nil - } - - header := new(types.Header) - r.Reset(it.word[1:]) - if err := rlp.Decode(r, header); err != nil { - return err - } - if err := walker(header); err != nil { - return err + if err := sn.seg.WithReadAhead(func() error { + g := sn.seg.MakeGetter() + for g.HasNext() { + word, _ = g.Next(word[:0]) + var header types.Header + r.Reset(word[1:]) + if err := rlp.Decode(r, &header); err != nil { + return err + } + if err := walker(&header); err != nil { + return err + } } + return nil + }); err != nil { + return err } } return nil @@ -1866,7 +1924,7 @@ func (r Range) String() string { return fmt.Sprintf("%dk-%dk", r.from/1000, r.to func (*Merger) FindMergeRanges(currentRanges []Range) (toMerge []Range) { for i := len(currentRanges) - 1; i > 0; i-- { r := currentRanges[i] - if r.to-r.from >= snap.DEFAULT_SEGMENT_SIZE { // is complete .seg + if r.to-r.from >= snap.Erigon2SegmentSize { // is complete .seg continue } @@ -1927,12 +1985,14 @@ func (m *Merger) Merge(ctx context.Context, snapshots *RoSnapshots, mergeRanges return err } for _, t := range snap.AllSnapshotTypes { - f, _ := snap.ParseFileName(snapDir, snap.SegmentFileName(r.from, r.to, t)) + segName := snap.SegmentFileName(r.from, r.to, t) + f, _ := snap.ParseFileName(snapDir, segName) if err := m.merge(ctx, toMerge[t], f.Path, logEvery); err != nil { return fmt.Errorf("mergeByAppendSegments: %w", err) } if doIndex { - if err := buildIdx(ctx, f, m.chainID, m.tmpDir, m.lvl); err != nil { + p := &background.Progress{} + if err := buildIdx(ctx, f, m.chainID, m.tmpDir, p, m.lvl); err != nil { return err } } @@ -1997,7 +2057,7 @@ func (m *Merger) merge(ctx context.Context, toMerge []string, targetFile string, d.Close() } if f.Count() != expectedTotal { - return fmt.Errorf("unexpected amount after segments merge. got: %d, expected: %d\n", f.Count(), expectedTotal) + return fmt.Errorf("unexpected amount after segments merge. got: %d, expected: %d", f.Count(), expectedTotal) } if err = f.Compress(); err != nil { return err @@ -2058,7 +2118,7 @@ func BuildProtoRequest(downloadRequest []DownloadRequest) *proto_downloader.Down }) } } else { - if r.ranges.to-r.ranges.from != snap.DEFAULT_SEGMENT_SIZE { + if r.ranges.to-r.ranges.from != snap.Erigon2SegmentSize { continue } for _, t := range snap.AllSnapshotTypes { @@ -2070,3 +2130,54 @@ func BuildProtoRequest(downloadRequest []DownloadRequest) *proto_downloader.Down } return req } + +type BodiesIterator struct{} + +func (i BodiesIterator) ForEach(tx kv.Tx, s *RoSnapshots, f func(blockNum uint64, baseTxNum uint64, txAmount uint64) error) error { + var blocksInSnapshtos uint64 + if s != nil && s.cfg.Enabled { + blocksInSnapshtos = s.SegmentsMax() + } + + if s != nil && s.cfg.Enabled { + if err := s.Bodies.View(func(bs []*BodySegment) error { + for _, b := range bs { + if err := b.Iterate(f); err != nil { + return err + } + } + return nil + }); err != nil { + return fmt.Errorf("build txNum => blockNum mapping: %w", err) + } + } + + for i := blocksInSnapshtos + 1; ; i++ { + body, baseTxId, txAmount, err := rawdb.ReadBodyByNumber(tx, i) + if err != nil { + return err + } + if body == nil { + break + } + if err := f(i, baseTxId-1, uint64(txAmount)+2); err != nil { + return err + } + } + return nil +} + +// BackgroundResult - used only indicate that some work is done +// no much reason to pass exact results by this object, just get latest state when need +type BackgroundResult struct { + has bool + err error +} + +func (br *BackgroundResult) Has() bool { return br.has } +func (br *BackgroundResult) Set(err error) { br.has, br.err = true, err } +func (br *BackgroundResult) GetAndReset() (bool, error) { + has, err := br.has, br.err + br.has, br.err = false, nil + return has, err +} diff --git a/turbo/snapshotsync/block_snapshots_test.go b/turbo/snapshotsync/block_snapshots_test.go index 91ae606743c..54ef18f0998 100644 --- a/turbo/snapshotsync/block_snapshots_test.go +++ b/turbo/snapshotsync/block_snapshots_test.go @@ -74,7 +74,6 @@ func TestMergeSnapshots(t *testing.T) { s := NewRoSnapshots(cfg, dir) defer s.Close() require.NoError(s.ReopenFolder()) - { merger := NewMerger(dir, 1, log.LvlInfo, uint256.Int{}, nil) ranges := merger.FindMergeRanges(s.Ranges()) @@ -127,7 +126,7 @@ func TestCanRetire(t *testing.T) { } func TestOpenAllSnapshot(t *testing.T) { dir, require := t.TempDir(), require.New(t) - chainSnapshotCfg := snapcfg.KnownCfg(networkname.MainnetChainName, nil) + chainSnapshotCfg := snapcfg.KnownCfg(networkname.MainnetChainName, nil, nil) chainSnapshotCfg.ExpectBlocks = math.MaxUint64 cfg := ethconfig.Snapshot{Enabled: true} createFile := func(from, to uint64, name snap.Type) { createTestSegmentFile(t, from, to, name, dir) } diff --git a/turbo/snapshotsync/const.go b/turbo/snapshotsync/const.go deleted file mode 100644 index 10566537903..00000000000 --- a/turbo/snapshotsync/const.go +++ /dev/null @@ -1,9 +0,0 @@ -package snapshotsync - -import ( - "errors" -) - -var ( - ErrInvalidSnapshot = errors.New("this snapshot for this chainID not supported ") -) diff --git a/turbo/snapshotsync/snap/files.go b/turbo/snapshotsync/snap/files.go index a457d7755de..462edfb3c50 100644 --- a/turbo/snapshotsync/snap/files.go +++ b/turbo/snapshotsync/snap/files.go @@ -92,6 +92,11 @@ func IsCorrectFileName(name string) bool { return len(parts) == 4 && parts[3] != "v1" } +func IsCorrectHistoryFileName(name string) bool { + parts := strings.Split(name, ".") + return len(parts) == 3 +} + func ParseFileName(dir, fileName string) (res FileInfo, err error) { ext := filepath.Ext(fileName) onlyName := fileName[:len(fileName)-len(ext)] @@ -128,9 +133,9 @@ func ParseFileName(dir, fileName string) (res FileInfo, err error) { return FileInfo{From: from * 1_000, To: to * 1_000, Path: filepath.Join(dir, fileName), T: snapshotType, Ext: ext}, nil } -const MERGE_THRESHOLD = 2 // don't trigger merge if have too small amount of partial segments -const DEFAULT_SEGMENT_SIZE = 500_000 -const MIN_SEGMENT_SIZE = 1_000 +const Erigon3SeedableSteps = 32 +const Erigon2SegmentSize = 500_000 +const Erigon2MinSegmentSize = 1_000 // FileInfo - parsed file metadata type FileInfo struct { @@ -142,7 +147,7 @@ type FileInfo struct { } func (f FileInfo) TorrentFileExists() bool { return common.FileExist(f.Path + ".torrent") } -func (f FileInfo) Seedable() bool { return f.To-f.From == DEFAULT_SEGMENT_SIZE } +func (f FileInfo) Seedable() bool { return f.To-f.From == Erigon2SegmentSize } func (f FileInfo) NeedTorrentFile() bool { return f.Seedable() && !f.TorrentFileExists() } func IdxFiles(dir string) (res []FileInfo, err error) { return FilesWithExt(dir, ".idx") } @@ -214,7 +219,7 @@ func ParseDir(dir string) (res []FileInfo, err error) { } func RemoveNonPreverifiedFiles(chainName, snapDir string) error { - preverified := snapcfg.KnownCfg(chainName, nil).Preverified + preverified := snapcfg.KnownCfg(chainName, nil, nil).Preverified keep := map[string]struct{}{} for _, p := range preverified { ext := filepath.Ext(p.Name) diff --git a/turbo/snapshotsync/snapcfg/util.go b/turbo/snapshotsync/snapcfg/util.go index e34ca3f5008..1e2e0e33aa9 100644 --- a/turbo/snapshotsync/snapcfg/util.go +++ b/turbo/snapshotsync/snapcfg/util.go @@ -12,17 +12,21 @@ import ( "golang.org/x/exp/slices" ) -var Mainnet = fromToml(snapshothashes.Mainnet) - -var Goerli = fromToml(snapshothashes.Goerli) - -var Bsc = fromToml(snapshothashes.Bsc) - -var Ropsten = fromToml(snapshothashes.Ropsten) - -var Mumbai = fromToml(snapshothashes.Mumbai) - -var BorMainnet = fromToml(snapshothashes.BorMainnet) +var ( + Mainnet = fromToml(snapshothashes.Mainnet) + Goerli = fromToml(snapshothashes.Goerli) + Bsc = fromToml(snapshothashes.Bsc) + Ropsten = fromToml(snapshothashes.Ropsten) + Mumbai = fromToml(snapshothashes.Mumbai) + BorMainnet = fromToml(snapshothashes.BorMainnet) + + MainnetHistory = fromToml(snapshothashes.MainnetHistory) + GoerliHistory = fromToml(snapshothashes.GoerliHistory) + BscHistory = fromToml(snapshothashes.BscHistory) + RopstenHistory = fromToml(snapshothashes.RopstenHistory) + MumbaiHistory = fromToml(snapshothashes.MumbaiHistory) + BorMainnetHistory = fromToml(snapshothashes.BorMainnetHistory) +) type PreverifiedItem struct { Name string @@ -48,16 +52,16 @@ func doSort(in preverified) Preverified { } var ( - MainnetChainSnapshotCfg = newCfg(Mainnet) - GoerliChainSnapshotCfg = newCfg(Goerli) - BscChainSnapshotCfg = newCfg(Bsc) - RopstenChainSnapshotCfg = newCfg(Ropsten) - MumbaiChainSnapshotCfg = newCfg(Mumbai) - BorMainnetChainSnapshotCfg = newCfg(BorMainnet) + MainnetChainSnapshotCfg = newCfg(Mainnet, MainnetHistory) + GoerliChainSnapshotCfg = newCfg(Goerli, GoerliHistory) + BscChainSnapshotCfg = newCfg(Bsc, BscHistory) + RopstenChainSnapshotCfg = newCfg(Ropsten, RopstenHistory) + MumbaiChainSnapshotCfg = newCfg(Mumbai, MumbaiHistory) + BorMainnetChainSnapshotCfg = newCfg(BorMainnet, BorMainnetHistory) ) -func newCfg(preverified Preverified) *Cfg { - return &Cfg{ExpectBlocks: maxBlockNum(preverified), Preverified: preverified} +func newCfg(preverified, preverifiedHistory Preverified) *Cfg { + return &Cfg{ExpectBlocks: maxBlockNum(preverified), Preverified: preverified, PreverifiedHistory: preverifiedHistory} } func maxBlockNum(preverified Preverified) uint64 { @@ -91,8 +95,9 @@ func maxBlockNum(preverified Preverified) uint64 { } type Cfg struct { - ExpectBlocks uint64 - Preverified Preverified + ExpectBlocks uint64 + Preverified Preverified + PreverifiedHistory Preverified } var KnownCfgs = map[string]*Cfg{ @@ -105,26 +110,46 @@ var KnownCfgs = map[string]*Cfg{ } // KnownCfg return list of preverified hashes for given network, but apply whiteList filter if it's not empty -func KnownCfg(networkName string, whiteList []string) *Cfg { +func KnownCfg(networkName string, whiteList, whiteListHistory []string) *Cfg { c, ok := KnownCfgs[networkName] if !ok { - return newCfg(Preverified{}) + return newCfg(Preverified{}, Preverified{}) } + + var result, result2 Preverified if len(whiteList) == 0 { - return c - } + result = c.Preverified + } else { + wlMap := make(map[string]struct{}, len(whiteList)) + for _, fName := range whiteList { + wlMap[fName] = struct{}{} + } - wlMap := make(map[string]struct{}, len(whiteList)) - for _, fName := range whiteList { - wlMap[fName] = struct{}{} + result = make(Preverified, 0, len(c.Preverified)) + for _, p := range c.Preverified { + if _, ok := wlMap[p.Name]; !ok { + continue + } + result = append(result, p) + } } - result := make(Preverified, 0, len(c.Preverified)) - for _, p := range c.Preverified { - if _, ok := wlMap[p.Name]; !ok { - continue + if len(whiteList) == 0 { + result2 = c.PreverifiedHistory + } else { + wlMap2 := make(map[string]struct{}, len(whiteListHistory)) + for _, fName := range whiteListHistory { + wlMap2[fName] = struct{}{} + } + + result2 = make(Preverified, 0, len(c.PreverifiedHistory)) + for _, p := range c.PreverifiedHistory { + if _, ok := wlMap2[p.Name]; !ok { + continue + } + result2 = append(result2, p) } - result = append(result, p) } - return newCfg(result) + + return newCfg(result, result2) } diff --git a/turbo/snapshotsync/snapshot_mode.go b/turbo/snapshotsync/snapshot_mode.go deleted file mode 100644 index 2cdb44b0f9b..00000000000 --- a/turbo/snapshotsync/snapshot_mode.go +++ /dev/null @@ -1,87 +0,0 @@ -package snapshotsync - -/* -import ( - "fmt" - - "github.com/ledgerwatch/erigon-lib/gointerfaces/snapshotsync" -) - -var DefaultSnapshotMode = SnapshotMode{} - -type SnapshotMode struct { - Headers bool - Bodies bool - State bool - Receipts bool -} - -func (m SnapshotMode) ToString() string { - var mode string - if m.Headers { - mode += "h" - } - if m.Bodies { - mode += "b" - } - if m.State { - mode += "s" - } - if m.Receipts { - mode += "r" - } - return mode -} - -func (m SnapshotMode) ToSnapshotTypes() []snapshotsync.Type { - var types []snapshotsync.Type - if m.Headers { - types = append(types, snapshotsync.SnapshotType_headers) - } - if m.Bodies { - types = append(types, snapshotsync.SnapshotType_bodies) - } - if m.State { - types = append(types, snapshotsync.SnapshotType_state) - } - if m.Receipts { - types = append(types, snapshotsync.SnapshotType_receipts) - } - return types -} - -func FromSnapshotTypes(st []snapshotsync.Type) SnapshotMode { - var mode SnapshotMode - for i := range st { - switch st[i] { - case snapshotsync.SnapshotType_headers: - mode.Headers = true - case snapshotsync.SnapshotType_bodies: - mode.Bodies = true - case snapshotsync.SnapshotType_state: - mode.State = true - case snapshotsync.SnapshotType_receipts: - mode.Receipts = true - } - } - return mode -} -func SnapshotModeFromString(flags string) (SnapshotMode, error) { - mode := SnapshotMode{} - for _, flag := range flags { - switch flag { - case 'h': - mode.Headers = true - case 'b': - mode.Bodies = true - case 's': - mode.State = true - case 'r': - mode.Receipts = true - default: - return mode, fmt.Errorf("unexpected flag found: %c", flag) - } - } - return mode, nil -} -*/ diff --git a/turbo/snapshotsync/snapshot_mode_test.go b/turbo/snapshotsync/snapshot_mode_test.go deleted file mode 100644 index 84d24418ebb..00000000000 --- a/turbo/snapshotsync/snapshot_mode_test.go +++ /dev/null @@ -1,43 +0,0 @@ -package snapshotsync - -/* -import ( - "reflect" - "testing" -) - -func TestSnapshotMode(t *testing.T) { - sm := SnapshotMode{} - sm.Receipts = true - if sm.ToString() != "r" { - t.Fatal(sm.ToString()) - } - sm.State = true - if sm.ToString() != "sr" { - t.Fatal(sm.ToString()) - } - sm.Bodies = true - if sm.ToString() != "bsr" { - t.Fatal(sm.ToString()) - } - sm.Headers = true - if sm.ToString() != "hbsr" { - t.Fatal(sm.ToString()) - } -} - -func TestSnapshotModeFromString(t *testing.T) { - sm, err := SnapshotModeFromString("hsbr") - if err != nil { - t.Fatal(err) - } - if reflect.DeepEqual(sm, SnapshotMode{ - Headers: true, - Bodies: true, - State: true, - Receipts: true, - }) == false { - t.Fatal(sm) - } -} -*/ diff --git a/turbo/stages/blockchain_test.go b/turbo/stages/blockchain_test.go index 9323db20958..6ac18283fad 100644 --- a/turbo/stages/blockchain_test.go +++ b/turbo/stages/blockchain_test.go @@ -492,8 +492,12 @@ func TestChainTxReorgs(t *testing.T) { if txn, _, _, _, _ := rawdb.ReadTransactionByHash(tx, txn.Hash()); txn == nil { t.Errorf("add %d: expected tx to be found", i) } - if rcpt, _, _, _, _ := rawdb.ReadReceipt(tx, txn.Hash()); rcpt == nil { - t.Errorf("add %d: expected receipt to be found", i) + if m.HistoryV3 { + // m.HistoryV3 doesn't store + } else { + if rcpt, _, _, _, _ := rawdb.ReadReceipt(tx, txn.Hash()); rcpt == nil { + t.Errorf("add %d: expected receipt to be found", i) + } } } // shared tx @@ -502,8 +506,12 @@ func TestChainTxReorgs(t *testing.T) { if txn, _, _, _, _ := rawdb.ReadTransactionByHash(tx, txn.Hash()); txn == nil { t.Errorf("share %d: expected tx to be found", i) } - if rcpt, _, _, _, _ := rawdb.ReadReceipt(tx, txn.Hash()); rcpt == nil { - t.Errorf("share %d: expected receipt to be found", i) + if m.HistoryV3 { + // m.HistoryV3 doesn't store + } else { + if rcpt, _, _, _, _ := rawdb.ReadReceipt(tx, txn.Hash()); rcpt == nil { + t.Errorf("share %d: expected receipt to be found", i) + } } } } @@ -753,7 +761,11 @@ func doModesTest(t *testing.T, pm prune.Mode) error { }) require.NoError(err) require.GreaterOrEqual(receiptsAvailable, pm.Receipts.PruneTo(head)) - require.Greater(found, uint64(0)) + if m.HistoryV3 { + // receipts are not stored in erigon3 + } else { + require.Greater(found, uint64(0)) + } } else { receiptsAvailable, err := rawdb.ReceiptsAvailableFrom(tx) require.NoError(err) @@ -1002,20 +1014,21 @@ func TestDoubleAccountRemoval(t *testing.T) { }) assert.NoError(t, err) - tx, err := db.RwKV().BeginRo(context.Background()) + tx, err := db.RwKV().BeginRw(context.Background()) if err != nil { t.Fatalf("read only db tx to read state: %v", err) } defer tx.Rollback() - st := state.New(state.NewPlainState(tx, 1)) + + st := state.New(m.NewHistoricalStateReader(1, tx)) assert.NoError(t, err) assert.False(t, st.Exist(theAddr), "Contract should not exist at block #0") - st = state.New(state.NewPlainState(tx, 2)) + st = state.New(m.NewHistoricalStateReader(2, tx)) assert.NoError(t, err) assert.True(t, st.Exist(theAddr), "Contract should exist at block #1") - st = state.New(state.NewPlainState(tx, 3)) + st = state.New(m.NewHistoricalStateReader(3, tx)) assert.NoError(t, err) assert.True(t, st.Exist(theAddr), "Contract should exist at block #2") } @@ -1136,8 +1149,8 @@ func TestLargeReorgTrieGC(t *testing.T) { // overtake the 'canon' chain until after it's passed canon by about 200 blocks. // // Details at: -// - https://github.com/ethereum/go-ethereum/issues/18977 -// - https://github.com/ethereum/go-ethereum/pull/18988 +// - https://github.com/ethereum/go-ethereum/issues/18977 +// - https://github.com/ethereum/go-ethereum/pull/18988 func TestLowDiffLongChain(t *testing.T) { // Generate a canonical chain to act as the main dataset m := stages.Mock(t) @@ -1400,6 +1413,95 @@ func TestDeleteRecreateSlots(t *testing.T) { require.NoError(t, err) } +func TestCVE2020_26265(t *testing.T) { + var ( + // Generate a canonical chain to act as the main dataset + // A sender who makes transactions, has some funds + key, _ = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291") + address = crypto.PubkeyToAddress(key.PublicKey) + funds = big.NewInt(1000000000) + + aa = common.HexToAddress("0x000000000000000000000000000000000000aaaa") + aaStorage = make(map[common.Hash]common.Hash) // Initial storage in AA + aaCode = []byte{byte(vm.ADDRESS), byte(vm.SELFDESTRUCT)} // Code for AA (selfdestruct to itself) + + caller = common.HexToAddress("0x000000000000000000000000000000000000bbbb") + callerStorage = make(map[common.Hash]common.Hash) // Initial storage in CALLER + callerCode = []byte{ + byte(vm.PC), // [0] + byte(vm.DUP1), // [0,0] + byte(vm.DUP1), // [0,0,0] + byte(vm.DUP1), // [0,0,0,0] + byte(vm.PUSH1), 0x00, // [0,0,0,0,1] (value) + byte(vm.PUSH2), 0xaa, 0xaa, // [0,0,0,0,1, 0xaaaa] + byte(vm.GAS), + byte(vm.CALL), // Cause self-destruct of aa + + byte(vm.PC), // [0] + byte(vm.DUP1), // [0,0] + byte(vm.DUP1), // [0,0,0] + byte(vm.DUP1), // [0,0,0,0] + byte(vm.PUSH1), 0x01, // [0,0,0,0,1] (value) + byte(vm.PUSH2), 0xaa, 0xaa, // [0,0,0,0,1, 0xaaaa] + byte(vm.GAS), + byte(vm.CALL), // Send 1 wei to add + + byte(vm.RETURN), + } // Code for CALLER + ) + gspec := &core.Genesis{ + Config: params.TestChainConfig, + Alloc: core.GenesisAlloc{ + address: {Balance: funds}, + // The address 0xAAAAA selfdestructs if called + aa: { + // Code needs to just selfdestruct + Code: aaCode, + Nonce: 1, + Balance: big.NewInt(3), + Storage: aaStorage, + }, + caller: { + // Code needs to just selfdestruct + Code: callerCode, + Nonce: 1, + Balance: big.NewInt(10), + Storage: callerStorage, + }, + }, + } + m := stages.MockWithGenesis(t, gspec, key, false) + + chain, err := core.GenerateChain(m.ChainConfig, m.Genesis, m.Engine, m.DB, 1, func(i int, b *core.BlockGen) { + b.SetCoinbase(common.Address{1}) + // One transaction to AA, to kill it + tx, _ := types.SignTx(types.NewTransaction(0, caller, + u256.Num0, 100000, u256.Num1, nil), *types.LatestSignerForChainID(nil), key) + b.AddTx(tx) + // One transaction to AA, to recreate it (but without storage + tx, _ = types.SignTx(types.NewTransaction(1, aa, + new(uint256.Int).SetUint64(5), 100000, u256.Num1, nil), *types.LatestSignerForChainID(nil), key) + b.AddTx(tx) + }, false /* intermediateHashes */) + if err != nil { + t.Fatalf("generate blocks: %v", err) + } + // Import the canonical chain + if err := m.InsertChain(chain); err != nil { + t.Fatalf("failed to insert into chain: %v", err) + } + err = m.DB.View(context.Background(), func(tx kv.Tx) error { + statedb := state.New(state.NewPlainState(tx, 2)) + + got := statedb.GetBalance(aa) + if !got.Eq(new(uint256.Int).SetUint64(5)) { + t.Errorf("got %x exp %x", got, 5) + } + return nil + }) + require.NoError(t, err) +} + // TestDeleteRecreateAccount tests a state-transition that contains deletion of a // contract with storage, and a recreate of the same contract via a // regular value-transfer @@ -1677,20 +1779,19 @@ func TestDeleteRecreateSlotsAcrossManyBlocks(t *testing.T) { // TestInitThenFailCreateContract tests a pretty notorious case that happened // on mainnet over blocks 7338108, 7338110 and 7338115. -// - Block 7338108: address e771789f5cccac282f23bb7add5690e1f6ca467c is initiated -// with 0.001 ether (thus created but no code) -// - Block 7338110: a CREATE2 is attempted. The CREATE2 would deploy code on -// the same address e771789f5cccac282f23bb7add5690e1f6ca467c. However, the -// deployment fails due to OOG during initcode execution -// - Block 7338115: another tx checks the balance of -// e771789f5cccac282f23bb7add5690e1f6ca467c, and the snapshotter returned it as -// zero. +// - Block 7338108: address e771789f5cccac282f23bb7add5690e1f6ca467c is initiated +// with 0.001 ether (thus created but no code) +// - Block 7338110: a CREATE2 is attempted. The CREATE2 would deploy code on +// the same address e771789f5cccac282f23bb7add5690e1f6ca467c. However, the +// deployment fails due to OOG during initcode execution +// - Block 7338115: another tx checks the balance of +// e771789f5cccac282f23bb7add5690e1f6ca467c, and the snapshotter returned it as +// zero. // // The problem being that the snapshotter maintains a destructset, and adds items // to the destructset in case something is created "onto" an existing item. // We need to either roll back the snapDestructs, or not place it into snapDestructs // in the first place. -// func TestInitThenFailCreateContract(t *testing.T) { var ( // Generate a canonical chain to act as the main dataset @@ -1884,13 +1985,13 @@ func TestEIP2718Transition(t *testing.T) { // TestEIP1559Transition tests the following: // -// 1. A tranaction whose feeCap is greater than the baseFee is valid. -// 2. Gas accounting for access lists on EIP-1559 transactions is correct. -// 3. Only the transaction's tip will be received by the coinbase. -// 4. The transaction sender pays for both the tip and baseFee. -// 5. The coinbase receives only the partially realized tip when -// feeCap - tip < baseFee. -// 6. Legacy transaction behave as expected (e.g. gasPrice = feeCap = tip). +// 1. A tranaction whose feeCap is greater than the baseFee is valid. +// 2. Gas accounting for access lists on EIP-1559 transactions is correct. +// 3. Only the transaction's tip will be received by the coinbase. +// 4. The transaction sender pays for both the tip and baseFee. +// 5. The coinbase receives only the partially realized tip when +// feeCap - tip < baseFee. +// 6. Legacy transaction behave as expected (e.g. gasPrice = feeCap = tip). func TestEIP1559Transition(t *testing.T) { t.Skip("needs fixing") var ( diff --git a/turbo/stages/bodydownload/body_algos.go b/turbo/stages/bodydownload/body_algos.go index edb406bcc51..964c343166c 100644 --- a/turbo/stages/bodydownload/body_algos.go +++ b/turbo/stages/bodydownload/body_algos.go @@ -12,24 +12,18 @@ import ( "github.com/ledgerwatch/log/v3" "github.com/ledgerwatch/erigon/common" - "github.com/ledgerwatch/erigon/consensus" + "github.com/ledgerwatch/erigon/common/dbutils" + "github.com/ledgerwatch/erigon/common/hexutil" "github.com/ledgerwatch/erigon/core/rawdb" "github.com/ledgerwatch/erigon/core/types" "github.com/ledgerwatch/erigon/eth/stagedsync/stages" + "github.com/ledgerwatch/erigon/rlp" "github.com/ledgerwatch/erigon/turbo/adapter" "github.com/ledgerwatch/erigon/turbo/services" - "github.com/ledgerwatch/erigon/turbo/stages/headerdownload" ) const BlockBufferSize = 128 -// VerifyUnclesFunc validates the given block's uncles and verifies the block -// header's transaction and uncle roots. The headers are assumed to be already -// validated at this point. -// It returns 2 errors - first is Validation error (reason to penalize peer and continue processing other -// bodies), second is internal runtime error (like network error or db error) -type VerifyUnclesFunc func(peerID [64]byte, header *types.Header, uncles []*types.Header) error - // UpdateFromDb reads the state of the database and refreshes the state of the body download func (bd *BodyDownload) UpdateFromDb(db kv.Tx) (headHeight uint64, headHash common.Hash, headTd256 *uint256.Int, err error) { var headerProgress, bodyProgress uint64 @@ -50,11 +44,8 @@ func (bd *BodyDownload) UpdateFromDb(db kv.Tx) (headHeight uint64, headHash comm bd.delivered.Clear() bd.deliveredCount = 0 bd.wastedCount = 0 - for i := 0; i < len(bd.deliveriesH); i++ { - bd.deliveriesH[i] = nil - bd.deliveriesB[i] = nil - bd.requests[i] = nil - } + bd.deliveriesH = make(map[uint64]*types.Header) + bd.requests = make(map[uint64]*BodyRequest) bd.peerMap = make(map[[64]byte]int) headHeight = bodyProgress headHash, err = rawdb.ReadCanonicalHash(db, headHeight) @@ -79,14 +70,16 @@ func (bd *BodyDownload) UpdateFromDb(db kv.Tx) (headHeight uint64, headHash comm // RequestMoreBodies - returns nil if nothing to request func (bd *BodyDownload) RequestMoreBodies(tx kv.RwTx, blockReader services.FullBlockReader, blockNum uint64, currentTime uint64, blockPropagator adapter.BlockPropagator) (*BodyRequest, uint64, error) { - if blockNum < bd.requestedLow { - blockNum = bd.requestedLow - } var bodyReq *BodyRequest blockNums := make([]uint64, 0, BlockBufferSize) hashes := make([]common.Hash, 0, BlockBufferSize) + + if blockNum < bd.requestedLow { + blockNum = bd.requestedLow + } + for ; len(blockNums) < BlockBufferSize && bd.requestedLow <= bd.maxProgress; blockNum++ { - // Check if we reached highest allowed request block number, and turn back + // Check if we reached the highest allowed request block number, and turn back if blockNum >= bd.requestedLow+bd.outstandingLimit || blockNum >= bd.maxProgress { blockNum = 0 break // Avoid tight loop @@ -95,76 +88,96 @@ func (bd *BodyDownload) RequestMoreBodies(tx kv.RwTx, blockReader services.FullB // Already delivered, no need to request continue } - req := bd.requests[blockNum-bd.requestedLow] + + req := bd.requests[blockNum] if req != nil { if currentTime < req.waitUntil { continue } bd.peerMap[req.peerID]++ - bd.requests[blockNum-bd.requestedLow] = nil + bd.requests[blockNum] = nil } + + // check in the bucket if that has been received either in this run or a previous one. + // if we already have the body we can continue on to populate header info and then skip + // the body request altogether + var err error + key := dbutils.EncodeBlockNumber(blockNum) + var bodyInBucket bool + if !bd.UsingExternalTx { + bodyInBucket, err = tx.Has("BodiesStage", key) + if err != nil { + return nil, blockNum, err + } + } else { + _, bodyInBucket = bd.bodyCache[blockNum] + } + + if bodyInBucket { + bd.delivered.Add(blockNum) + continue + } + var hash common.Hash var header *types.Header - var err error request := true - if bd.deliveriesH[blockNum-bd.requestedLow] != nil { + if bd.deliveriesH[blockNum] != nil { // If this block was requested before, we don't need to fetch the headers from the database the second time - header = bd.deliveriesH[blockNum-bd.requestedLow] + header = bd.deliveriesH[blockNum] if header == nil { - return nil, 0, fmt.Errorf("header not found: %w, blockNum=%d, trace=%s", err, blockNum, dbg.Stack()) + return nil, blockNum, fmt.Errorf("header not found: %w, blockNum=%d, trace=%s", err, blockNum, dbg.Stack()) } hash = header.Hash() + + // check here if we have the block prefetched as this could have come in as part of a data race + // we want to avoid an infinite loop if the header was populated in deliveriesH before the block + // was added to the prefetched cache + if hasPrefetched := bd.checkPrefetchedBlock(hash, tx, blockNum, blockPropagator); hasPrefetched { + request = false + } } else { hash, err = rawdb.ReadCanonicalHash(tx, blockNum) if err != nil { - return nil, 0, fmt.Errorf("could not find canonical header: %w, blockNum=%d, trace=%s", err, blockNum, dbg.Stack()) + return nil, blockNum, fmt.Errorf("could not find canonical header: %w, blockNum=%d, trace=%s", err, blockNum, dbg.Stack()) } header, err = blockReader.Header(context.Background(), tx, hash, blockNum) if err != nil { - return nil, 0, fmt.Errorf("header not found: %w, blockNum=%d, trace=%s", err, blockNum, dbg.Stack()) + return nil, blockNum, fmt.Errorf("header not found: %w, blockNum=%d, trace=%s", err, blockNum, dbg.Stack()) } if header == nil { - return nil, 0, fmt.Errorf("header not found: blockNum=%d, hash=%x, trace=%s", blockNum, hash, dbg.Stack()) + return nil, blockNum, fmt.Errorf("header not found: blockNum=%d, hash=%x, trace=%s", blockNum, hash, dbg.Stack()) } - if block := bd.prefetchedBlocks.Pop(hash); block != nil { - // Block is prefetched, no need to request - bd.deliveriesH[blockNum-bd.requestedLow] = block.Header() - bd.deliveriesB[blockNum-bd.requestedLow] = block.RawBody() - - // Calculate the TD of the block (it's not imported yet, so block.Td is not valid) - if parent, err := rawdb.ReadTd(tx, block.ParentHash(), block.NumberU64()-1); err != nil { - log.Error("Failed to ReadTd", "err", err, "number", block.NumberU64()-1, "hash", block.ParentHash()) - } else if parent != nil { - if block.Difficulty().Sign() != 0 { // don't propagate proof-of-stake blocks - td := new(big.Int).Add(block.Difficulty(), parent) - go blockPropagator(context.Background(), block, td) - } - } else { - log.Error("Propagating dangling block", "number", block.Number(), "hash", hash) - } + if hasPrefetched := bd.checkPrefetchedBlock(hash, tx, blockNum, blockPropagator); hasPrefetched { request = false } else { - bd.deliveriesH[blockNum-bd.requestedLow] = header + bd.deliveriesH[blockNum] = header if header.UncleHash != types.EmptyUncleHash || header.TxHash != types.EmptyRootHash { // Perhaps we already have this block - block = rawdb.ReadBlock(tx, hash, blockNum) + block := rawdb.ReadBlock(tx, hash, blockNum) if block == nil { var doubleHash DoubleHash copy(doubleHash[:], header.UncleHash.Bytes()) copy(doubleHash[common.HashLength:], header.TxHash.Bytes()) bd.requestedMap[doubleHash] = blockNum } else { - bd.deliveriesB[blockNum-bd.requestedLow] = block.RawBody() + err = bd.addBodyToBucket(tx, blockNum, block.RawBody()) + if err != nil { + log.Error("Failed to add block body to bucket", "err", err, "number", block.NumberU64()-1, "hash", block.ParentHash()) + } request = false } } else { - bd.deliveriesB[blockNum-bd.requestedLow] = &types.RawBody{} + err = bd.addBodyToBucket(tx, blockNum, &types.RawBody{}) + if err != nil { + log.Error("Failed to add block body to bucket", "err", err, "number", blockNum, "hash", hash) + } request = false } } } + if request { blockNums = append(blockNums, blockNum) hashes = append(hashes, hash) @@ -175,22 +188,52 @@ func (bd *BodyDownload) RequestMoreBodies(tx kv.RwTx, blockReader services.FullB } if len(blockNums) > 0 { bodyReq = &BodyRequest{BlockNums: blockNums, Hashes: hashes} - for _, blockNum := range blockNums { - bd.requests[blockNum-bd.requestedLow] = bodyReq + for _, num := range blockNums { + bd.requests[num] = bodyReq } } + return bodyReq, blockNum, nil } +// checks if we have the block prefetched, returns true if found and stored or false if not present +func (bd *BodyDownload) checkPrefetchedBlock(hash common.Hash, tx kv.RwTx, blockNum uint64, blockPropagator adapter.BlockPropagator) bool { + block := bd.prefetchedBlocks.Pop(hash) + + if block == nil { + return false + } + + // Block is prefetched, no need to request + bd.deliveriesH[blockNum] = block.Header() + + // make sure we have the body in the bucket for later use + bd.addBodyToBucket(tx, blockNum, block.RawBody()) + + // Calculate the TD of the block (it's not imported yet, so block.Td is not valid) + if parent, err := rawdb.ReadTd(tx, block.ParentHash(), block.NumberU64()-1); err != nil { + log.Error("Failed to ReadTd", "err", err, "number", block.NumberU64()-1, "hash", block.ParentHash()) + } else if parent != nil { + if block.Difficulty().Sign() != 0 { // don't propagate proof-of-stake blocks + td := new(big.Int).Add(block.Difficulty(), parent) + go blockPropagator(context.Background(), block, td) + } + } else { + log.Error("Propagating dangling block", "number", block.Number(), "hash", hash) + } + + return true +} + func (bd *BodyDownload) RequestSent(bodyReq *BodyRequest, timeWithTimeout uint64, peer [64]byte) { for _, blockNum := range bodyReq.BlockNums { if blockNum < bd.requestedLow { continue } - req := bd.requests[blockNum-bd.requestedLow] + req := bd.requests[blockNum] if req != nil { - bd.requests[blockNum-bd.requestedLow].waitUntil = timeWithTimeout - bd.requests[blockNum-bd.requestedLow].peerID = peer + bd.requests[blockNum].waitUntil = timeWithTimeout + bd.requests[blockNum].peerID = peer } } } @@ -222,7 +265,7 @@ func (rt RawTransactions) EncodeIndex(i int, w *bytes.Buffer) { w.Write(rt[i][1:]) //nolint:errcheck return } else if firstByte >= 184 && firstByte < 192 { - // RLP striong >= 56 bytes long, firstByte-183 is the length of encoded size + // RLP string >= 56 bytes long, firstByte-183 is the length of encoded size w.Write(rt[i][1+firstByte-183:]) //nolint:errcheck return } @@ -230,7 +273,13 @@ func (rt RawTransactions) EncodeIndex(i int, w *bytes.Buffer) { w.Write(rt[i]) //nolint:errcheck } -func (bd *BodyDownload) doDeliverBodies() (err error) { +func (bd *BodyDownload) DeliverySize(delivered float64, wasted float64) { + bd.deliveredCount += delivered + bd.wastedCount += wasted +} + +func (bd *BodyDownload) GetDeliveries(tx kv.RwTx) (uint64, uint64, error) { + var delivered, undelivered int Loop: for { var delivery Delivery @@ -254,7 +303,6 @@ Loop: reqMap := make(map[uint64]*BodyRequest) txs, uncles, lenOfP2PMessage, _ := *delivery.txs, *delivery.uncles, delivery.lenOfP2PMessage, delivery.peerID - var delivered, undelivered int for i := range txs { uncleHash := types.CalcUncleHash(uncles[i]) @@ -263,14 +311,14 @@ Loop: copy(doubleHash[:], uncleHash.Bytes()) copy(doubleHash[common.HashLength:], txHash.Bytes()) - // Block numbers are added to the bd.delivered bitmap here, only for blocks for which the body has been received, and their double hashes are present in the bd.requesredMap + // Block numbers are added to the bd.delivered bitmap here, only for blocks for which the body has been received, and their double hashes are present in the bd.requestedMap // Also, block numbers can be added to bd.delivered for empty blocks, above blockNum, ok := bd.requestedMap[doubleHash] if !ok { undelivered++ continue } - req := bd.requests[blockNum-bd.requestedLow] + req := bd.requests[blockNum] if req != nil { if _, ok := reqMap[req.BlockNums[0]]; !ok { reqMap[req.BlockNums[0]] = req @@ -278,14 +326,17 @@ Loop: } delete(bd.requestedMap, doubleHash) // Delivered, cleaning up - bd.deliveriesB[blockNum-bd.requestedLow] = &types.RawBody{Transactions: txs[i], Uncles: uncles[i]} + err := bd.addBodyToBucket(tx, blockNum, &types.RawBody{Transactions: txs[i], Uncles: uncles[i]}) + if err != nil { + return 0, 0, err + } bd.delivered.Add(blockNum) delivered++ } // Clean up the requests for _, req := range reqMap { for _, blockNum := range req.BlockNums { - bd.requests[blockNum-bd.requestedLow] = nil + bd.requests[blockNum] = nil } } total := delivered + undelivered @@ -294,65 +345,20 @@ Loop: bd.DeliverySize(float64(lenOfP2PMessage)*float64(delivered)/float64(delivered+undelivered), float64(lenOfP2PMessage)*float64(undelivered)/float64(delivered+undelivered)) } } - return nil -} - -func (bd *BodyDownload) DeliverySize(delivered float64, wasted float64) { - bd.deliveredCount += delivered - bd.wastedCount += wasted -} -// ValidateBody validates the given block's uncles and verifies the block -// header's transaction and uncle roots. The headers are assumed to be already -// validated at this point. -// It returns 2 errors - first is Validation error (reason to penalize peer and continue processing other -// bodies), second is internal runtime error (like network error or db error) -func (bd *BodyDownload) VerifyUncles(header *types.Header, uncles []*types.Header, r consensus.ChainReader) (headerdownload.Penalty, error) { - - // Header validity is known at this point, check the uncles and transactions - //header := block.Header() - if err := bd.Engine.VerifyUncles(r, header, uncles); err != nil { - return headerdownload.BadBlockPenalty, err - } - //if hash := types.CalcUncleHash(block.Uncles()); hash != header.UncleHash { - // return headerdownload.BadBlockPenalty, fmt.Errorf("uncle root hash mismatch: have %x, want %x", hash, header.UncleHash), nil - //} - //if hash := types.DeriveSha(block.Transactions()); hash != header.TxHash { - // return headerdownload.BadBlockPenalty, fmt.Errorf("transaction root hash mismatch: have %x, want %x", hash, header.TxHash), nil - //} - return headerdownload.NoPenalty, nil + return bd.requestedLow, uint64(delivered), nil } -func (bd *BodyDownload) GetDeliveries() ([]*types.Header, []*types.RawBody, error) { - err := bd.doDeliverBodies() // TODO: join this 2 funcs and simplify - if err != nil { - return nil, nil, err - } - +// NextProcessingCount returns the count of contiguous block numbers ready to process from the +// requestedLow minimum value. +// the requestedLow count is increased by the number returned +func (bd *BodyDownload) NextProcessingCount() uint64 { var i uint64 for i = 0; !bd.delivered.IsEmpty() && bd.requestedLow+i == bd.delivered.Minimum(); i++ { bd.delivered.Remove(bd.requestedLow + i) } - // Move the deliveries back - // bd.requestedLow can only be moved forward if there are consecutive block numbers present in the bd.delivered map - var headers []*types.Header - var rawBodies []*types.RawBody - if i > 0 { - headers = make([]*types.Header, i) - rawBodies = make([]*types.RawBody, i) - copy(headers, bd.deliveriesH[:i]) - copy(rawBodies, bd.deliveriesB[:i]) - copy(bd.deliveriesH, bd.deliveriesH[i:]) - copy(bd.deliveriesB, bd.deliveriesB[i:]) - copy(bd.requests, bd.requests[i:]) - for j := len(bd.deliveriesH) - int(i); j < len(bd.deliveriesH); j++ { - bd.deliveriesH[j] = nil - bd.deliveriesB[j] = nil - bd.requests[j] = nil - } - bd.requestedLow += i - } - return headers, rawBodies, nil + bd.requestedLow += i + return i } func (bd *BodyDownload) DeliveryCounts() (float64, float64) { @@ -394,3 +400,89 @@ func (bd *BodyDownload) AddMinedBlock(block *types.Block) error { bd.AddToPrefetch(block) return nil } + +// GetHeader returns a header by either loading from the deliveriesH slice populated when running RequestMoreBodies +// or if the code is continuing from a previous run and this isn't present, by reading from the DB as the RequestMoreBodies would have. +// as the requestedLow count is incremented before a call to this function we need the process count so that we can anticipate this, +// effectively reversing time a little to get the actual position we need in the slice prior to requestedLow being incremented +func (bd *BodyDownload) GetHeader(blockNum uint64, blockReader services.FullBlockReader, tx kv.Tx) (*types.Header, common.Hash, error) { + var header *types.Header + if bd.deliveriesH[blockNum] != nil { + header = bd.deliveriesH[blockNum] + } else { + hash, err := rawdb.ReadCanonicalHash(tx, blockNum) + if err != nil { + return nil, common.Hash{}, err + } + header, err = blockReader.Header(context.Background(), tx, hash, blockNum) + if err != nil { + return nil, common.Hash{}, err + } + if header == nil { + return nil, common.Hash{}, fmt.Errorf("header not found: blockNum=%d, hash=%x, trace=%s", blockNum, hash, dbg.Stack()) + } + } + return header, header.Hash(), nil +} + +func (bd *BodyDownload) addBodyToBucket(tx kv.RwTx, key uint64, body *types.RawBody) error { + if !bd.UsingExternalTx { + // use the kv store to hold onto bodies as we're anticipating a lot of memory usage + writer := bytes.NewBuffer(nil) + err := body.EncodeRLP(writer) + if err != nil { + return err + } + rlpBytes := common.CopyBytes(writer.Bytes()) + writer.Reset() + writer.WriteString(hexutil.Encode(rlpBytes)) + + k := dbutils.EncodeBlockNumber(key) + err = tx.Put("BodiesStage", k, writer.Bytes()) + if err != nil { + return err + } + } else { + // use an in memory cache as we're near the top of the chain + bd.bodyCache[key] = body + } + + bd.bodiesAdded = true + return nil +} + +func (bd *BodyDownload) GetBlockFromCache(tx kv.RwTx, blockNum uint64) (*types.RawBody, error) { + if !bd.UsingExternalTx { + key := dbutils.EncodeBlockNumber(blockNum) + body, err := tx.GetOne("BodiesStage", key) + if err != nil { + return nil, err + } + + var rawBody types.RawBody + fromHex := common.CopyBytes(common.FromHex(string(body))) + bodyReader := bytes.NewReader(fromHex) + stream := rlp.NewStream(bodyReader, 0) + err = rawBody.DecodeRLP(stream) + if err != nil { + log.Error("Unexpected body from bucket", "err", err, "block", blockNum) + return nil, fmt.Errorf("%w, nextBlock=%d", err, blockNum) + } + + return &rawBody, nil + } else { + return bd.bodyCache[blockNum], nil + } +} + +func (bd *BodyDownload) ClearBodyCache() { + bd.bodyCache = make(map[uint64]*types.RawBody) +} + +func (bd *BodyDownload) HasAddedBodies() bool { + return bd.bodiesAdded +} + +func (bd *BodyDownload) ResetAddedBodies() { + bd.bodiesAdded = false +} diff --git a/turbo/stages/bodydownload/body_data_struct.go b/turbo/stages/bodydownload/body_data_struct.go index eedbf1c3ea0..9280b314856 100644 --- a/turbo/stages/bodydownload/body_data_struct.go +++ b/turbo/stages/bodydownload/body_data_struct.go @@ -2,6 +2,7 @@ package bodydownload import ( "github.com/RoaringBitmap/roaring/roaring64" + "github.com/ledgerwatch/erigon/common" "github.com/ledgerwatch/erigon/consensus" "github.com/ledgerwatch/erigon/core/types" @@ -28,9 +29,8 @@ type BodyDownload struct { Engine consensus.Engine delivered *roaring64.Bitmap prefetchedBlocks *PrefetchedBlocks - deliveriesH []*types.Header - deliveriesB []*types.RawBody - requests []*BodyRequest + deliveriesH map[uint64]*types.Header + requests map[uint64]*BodyRequest maxProgress uint64 requestedLow uint64 // Lower bound of block number for outstanding requests requestHigh uint64 @@ -38,6 +38,9 @@ type BodyDownload struct { outstandingLimit uint64 // Limit of number of outstanding blocks for body requests deliveredCount float64 wastedCount float64 + bodiesAdded bool + bodyCache map[uint64]*types.RawBody + UsingExternalTx bool } // BodyRequest is a sketch of the request for block bodies, meaning that access to the database is required to convert it to the actual BlockBodies request (look up hashes of canonical blocks) @@ -54,9 +57,8 @@ func NewBodyDownload(outstandingLimit int, engine consensus.Engine) *BodyDownloa requestedMap: make(map[DoubleHash]uint64), outstandingLimit: uint64(outstandingLimit), delivered: roaring64.New(), - deliveriesH: make([]*types.Header, outstandingLimit+MaxBodiesInRequest), - deliveriesB: make([]*types.RawBody, outstandingLimit+MaxBodiesInRequest), - requests: make([]*BodyRequest, outstandingLimit+MaxBodiesInRequest), + deliveriesH: make(map[uint64]*types.Header), + requests: make(map[uint64]*BodyRequest), peerMap: make(map[[64]byte]int), prefetchedBlocks: NewPrefetchedBlocks(), // DeliveryNotify has capacity 1, and it is also used so that senders never block @@ -69,6 +71,7 @@ func NewBodyDownload(outstandingLimit int, engine consensus.Engine) *BodyDownloa // deliveris, this is a good number for the channel capacity deliveryCh: make(chan Delivery, outstandingLimit+MaxBodiesInRequest), Engine: engine, + bodyCache: make(map[uint64]*types.RawBody), } return bd } diff --git a/turbo/stages/bodydownload/body_test.go b/turbo/stages/bodydownload/body_test.go index 5522855d3da..179b1b9b141 100644 --- a/turbo/stages/bodydownload/body_test.go +++ b/turbo/stages/bodydownload/body_test.go @@ -4,6 +4,7 @@ import ( "testing" "github.com/ledgerwatch/erigon-lib/kv/memdb" + "github.com/ledgerwatch/erigon/consensus/ethash" ) diff --git a/turbo/stages/genesis_test.go b/turbo/stages/genesis_test.go index b3094fc2653..27faff93070 100644 --- a/turbo/stages/genesis_test.go +++ b/turbo/stages/genesis_test.go @@ -71,6 +71,17 @@ func TestDefaultGenesisBlock(t *testing.T) { if block.Hash() != params.FermionGenesisHash { t.Errorf("wrong fermion genesis hash, got %v, want %v", block.Hash(), params.FermionGenesisHash) } + + block, _, err = core.DefaultGnosisGenesisBlock().ToBlock() + if err != nil { + t.Errorf("error: %v", err) + } + if block.Root() != params.GnosisGenesisStateRoot { + t.Errorf("wrong Gnosis Chain genesis state root, got %v, want %v", block.Root(), params.GnosisGenesisStateRoot) + } + if block.Hash() != params.GnosisGenesisHash { + t.Errorf("wrong Gnosis Chain genesis hash, got %v, want %v", block.Hash(), params.GnosisGenesisHash) + } } func TestSokolHeaderRLP(t *testing.T) { diff --git a/turbo/stages/headerdownload/header_algos.go b/turbo/stages/headerdownload/header_algos.go index 53cea0779bc..db14a6c0915 100644 --- a/turbo/stages/headerdownload/header_algos.go +++ b/turbo/stages/headerdownload/header_algos.go @@ -159,7 +159,7 @@ func (hd *HeaderDownload) removeUpwards(link *Link) { if link == nil { return } - var toRemove []*Link = []*Link{link} + var toRemove = []*Link{link} for len(toRemove) > 0 { removal := toRemove[len(toRemove)-1] toRemove = toRemove[:len(toRemove)-1] @@ -298,7 +298,7 @@ func (hd *HeaderDownload) logAnchorState() { ss = append(ss, sb.String()) } sort.Strings(ss) - log.Info("Queue sizes", "anchors", hd.anchorQueue.Len(), "links", hd.linkQueue.Len(), "persisted", hd.persistedLinkQueue.Len()) + log.Info("[Downloader] Queue sizes", "anchors", hd.anchorQueue.Len(), "links", hd.linkQueue.Len(), "persisted", hd.persistedLinkQueue.Len()) for _, s := range ss { log.Info(s) } @@ -348,7 +348,7 @@ func (hd *HeaderDownload) RecoverFromDb(db kv.RoDB) error { select { case <-logEvery.C: - log.Info("recover headers from db", "left", hd.persistedLinkLimit-hd.persistedLinkQueue.Len()) + log.Info("[Downloader] recover headers from db", "left", hd.persistedLinkLimit-hd.persistedLinkQueue.Len()) default: } } @@ -375,7 +375,7 @@ func (hd *HeaderDownload) ReadProgressFromDb(tx kv.RwTx) (err error) { } func (hd *HeaderDownload) invalidateAnchor(anchor *Anchor, reason string) { - log.Debug("Invalidating anchor", "height", anchor.blockHeight, "hash", anchor.parentHash, "reason", reason) + log.Debug("[Downloader] Invalidating anchor", "height", anchor.blockHeight, "hash", anchor.parentHash, "reason", reason) hd.removeAnchor(anchor) for child := anchor.fLink; child != nil; child, child.next = child.next, nil { hd.removeUpwards(child) @@ -387,7 +387,7 @@ func (hd *HeaderDownload) RequestMoreHeaders(currentTime time.Time) (*HeaderRequ defer hd.lock.Unlock() var penalties []PenaltyItem if hd.anchorQueue.Len() == 0 { - log.Trace("Empty anchor queue") + log.Trace("[Downloader] Empty anchor queue") return nil, penalties } for hd.anchorQueue.Len() > 0 { @@ -417,7 +417,7 @@ func (hd *HeaderDownload) RequestMoreHeaders(currentTime time.Time) (*HeaderRequ func (hd *HeaderDownload) requestMoreHeadersForPOS(currentTime time.Time) (timeout bool, request *HeaderRequest, penalties []PenaltyItem) { anchor := hd.posAnchor if anchor == nil { - log.Debug("No PoS anchor") + log.Debug("[Downloader] No PoS anchor") return } @@ -428,6 +428,7 @@ func (hd *HeaderDownload) requestMoreHeadersForPOS(currentTime time.Time) (timeo timeout = anchor.timeouts >= 10 if timeout { + log.Warn("[Downloader] Timeout", "requestId", hd.requestId, "peerID", common.Bytes2Hex(anchor.peerID[:])) penalties = []PenaltyItem{{Penalty: AbandonedAnchorPenalty, PeerID: anchor.peerID}} return } @@ -485,7 +486,7 @@ func (hd *HeaderDownload) UpdateRetryTime(req *HeaderRequest, currentTime time.T func (hd *HeaderDownload) RequestSkeleton() *HeaderRequest { hd.lock.RLock() defer hd.lock.RUnlock() - log.Debug("Request skeleton", "anchors", len(hd.anchors), "top seen height", hd.topSeenHeightPoW, "highestInDb", hd.highestInDb) + log.Debug("[Downloader] Request skeleton", "anchors", len(hd.anchors), "top seen height", hd.topSeenHeightPoW, "highestInDb", hd.highestInDb) stride := uint64(8 * 192) strideHeight := hd.highestInDb + stride var length uint64 = 192 @@ -518,12 +519,13 @@ func (hd *HeaderDownload) InsertHeader(hf FeedHeaderFunc, terminalTotalDifficult } if !link.verified { if err := hd.VerifyHeader(link.header); err != nil { + hd.badPoSHeaders[link.hash] = link.header.ParentHash if errors.Is(err, consensus.ErrFutureBlock) { // This may become valid later - log.Warn("Added future link", "hash", link.hash, "height", link.blockHeight, "timestamp", link.header.Time) + log.Warn("[Downloader] Added future link", "hash", link.hash, "height", link.blockHeight, "timestamp", link.header.Time) return false, false, 0, nil // prevent removal of the link from the hd.linkQueue } else { - log.Debug("Verification failed for header", "hash", link.hash, "height", link.blockHeight, "err", err) + log.Debug("[Downloader] Verification failed for header", "hash", link.hash, "height", link.blockHeight, "err", err) hd.moveLinkToQueue(link, NoQueue) delete(hd.links, link.hash) hd.removeUpwards(link) @@ -542,6 +544,8 @@ func (hd *HeaderDownload) InsertHeader(hf FeedHeaderFunc, terminalTotalDifficult if err != nil { return false, false, 0, err } + // Some blocks may be marked as non-valid on PoS chain because they were far into the future. + delete(hd.badPoSHeaders, link.hash) if td != nil { if hd.seenAnnounces.Pop(link.hash) { hd.toAnnounce = append(hd.toAnnounce, Announce{Hash: link.hash, Number: link.blockHeight}) @@ -557,13 +561,10 @@ func (hd *HeaderDownload) InsertHeader(hf FeedHeaderFunc, terminalTotalDifficult lastD = link.header.Difficulty } } - if link.blockHeight == hd.latestMinedBlockNumber { - return false, true, 0, nil - } if link.blockHeight > hd.highestInDb { if hd.trace { - log.Info("Highest in DB change", "number", link.blockHeight, "hash", link.hash) + log.Info("[Downloader] Highest in DB change", "number", link.blockHeight, "hash", link.hash) } hd.highestInDb = link.blockHeight } @@ -576,6 +577,9 @@ func (hd *HeaderDownload) InsertHeader(hf FeedHeaderFunc, terminalTotalDifficult hd.moveLinkToQueue(child, InsertQueueID) } } + if link.blockHeight == hd.latestMinedBlockNumber { + return false, true, 0, nil + } } for hd.persistedLinkQueue.Len() > hd.persistedLinkLimit { link := heap.Pop(&hd.persistedLinkQueue).(*Link) @@ -599,7 +603,7 @@ func (hd *HeaderDownload) InsertHeader(hf FeedHeaderFunc, terminalTotalDifficult // InsertHeaders attempts to insert headers into the database, verifying them first // It returns true in the first return value if the system is "in sync" func (hd *HeaderDownload) InsertHeaders(hf FeedHeaderFunc, terminalTotalDifficulty *big.Int, logPrefix string, logChannel <-chan time.Time) (bool, error) { - var more bool = true + var more = true var err error var force bool var blocksToTTD uint64 @@ -623,7 +627,7 @@ func (hd *HeaderDownload) SetHeaderToDownloadPoS(hash common.Hash, height uint64 hd.lock.Lock() defer hd.lock.Unlock() - log.Debug("Set posAnchor", "blockHeight", height+1) + log.Debug("[Downloader] Set posAnchor", "blockHeight", height+1) hd.posAnchor = &Anchor{ parentHash: hash, blockHeight: height + 1, @@ -634,12 +638,12 @@ func (hd *HeaderDownload) ProcessHeadersPOS(csHeaders []ChainSegmentHeader, tx k if len(csHeaders) == 0 { return nil, nil } - log.Debug("Collecting...", "from", csHeaders[0].Number, "to", csHeaders[len(csHeaders)-1].Number, "len", len(csHeaders)) + log.Debug("[Downloader] Collecting...", "from", csHeaders[0].Number, "to", csHeaders[len(csHeaders)-1].Number, "len", len(csHeaders)) hd.lock.Lock() defer hd.lock.Unlock() if hd.posAnchor == nil { // May happen if peers are sending unrequested header packets after we've synced - log.Debug("posAnchor is nil") + log.Debug("[Downloader] posAnchor is nil") return nil, nil } @@ -651,12 +655,13 @@ func (hd *HeaderDownload) ProcessHeadersPOS(csHeaders []ChainSegmentHeader, tx k for _, sh := range csHeaders { header := sh.Header headerHash := sh.Hash + if headerHash != hd.posAnchor.parentHash { if hd.posAnchor.blockHeight != 1 && sh.Number != hd.posAnchor.blockHeight-1 { - log.Info("posAnchor", "blockHeight", hd.posAnchor.blockHeight) + log.Info("[Downloader] posAnchor", "blockHeight", hd.posAnchor.blockHeight) return nil, nil } - log.Warn("Unexpected header", "hash", headerHash, "expected", hd.posAnchor.parentHash) + log.Warn("[Downloader] Unexpected header", "hash", headerHash, "expected", hd.posAnchor.parentHash, "peerID", common.Bytes2Hex(peerId[:])) return []PenaltyItem{{PeerID: peerId, Penalty: BadBlockPenalty}}, nil } @@ -670,7 +675,7 @@ func (hd *HeaderDownload) ProcessHeadersPOS(csHeaders []ChainSegmentHeader, tx k return nil, err } if hh != nil { - log.Debug("Synced", "requestId", hd.requestId) + log.Debug("[Downloader] Synced", "requestId", hd.requestId) if headerNumber != hh.Number.Uint64()+1 { hd.badPoSHeaders[headerHash] = header.ParentHash return nil, fmt.Errorf("invalid PoS segment detected: invalid block number. got %d, expected %d", headerNumber, hh.Number.Uint64()+1) @@ -950,11 +955,11 @@ func (hd *HeaderDownload) ProcessHeader(sh ChainSegmentHeader, newBlock bool, pe anchor, foundAnchor := hd.anchors[sh.Hash] if !foundParent && !foundAnchor { if sh.Number < hd.highestInDb { - log.Debug(fmt.Sprintf("new anchor too far in the past: %d, latest header in db: %d", sh.Number, hd.highestInDb)) + log.Debug(fmt.Sprintf("[Downloader] new anchor too far in the past: %d, latest header in db: %d", sh.Number, hd.highestInDb)) return false } if len(hd.anchors) >= hd.anchorLimit { - log.Debug(fmt.Sprintf("too many anchors: %d, limit %d", len(hd.anchors), hd.anchorLimit)) + log.Debug(fmt.Sprintf("[Downloader] too many anchors: %d, limit %d", len(hd.anchors), hd.anchorLimit)) return false } } @@ -982,7 +987,7 @@ func (hd *HeaderDownload) ProcessHeader(sh ChainSegmentHeader, newBlock bool, pe } else { // The link has not known parent, therefore it becomes an anchor, unless it is too far in the past if sh.Number+params.FullImmutabilityThreshold < hd.highestInDb { - log.Debug("Remove upwards", "height", link.blockHeight, "hash", link.blockHeight) + log.Debug("[Downloader] Remove upwards", "height", link.blockHeight, "hash", link.blockHeight) hd.removeUpwards(link) return false } @@ -1011,9 +1016,9 @@ func (hd *HeaderDownload) ProcessHeaders(csHeaders []ChainSegmentHeader, newBloc hd.lock.Lock() defer hd.lock.Unlock() hd.stats.Responses++ - log.Trace("Link queue", "size", hd.linkQueue.Len()) + log.Trace("[Downloader] Link queue", "size", hd.linkQueue.Len()) if hd.linkQueue.Len() > hd.linkLimit { - log.Trace("Too many links, cutting down", "count", hd.linkQueue.Len(), "tried to add", len(csHeaders), "limit", hd.linkLimit) + log.Trace("[Downloader] Too many links, cutting down", "count", hd.linkQueue.Len(), "tried to add", len(csHeaders), "limit", hd.linkLimit) hd.pruneLinkQueue() } // Wake up stage loop if it is outside any of the stages @@ -1031,6 +1036,21 @@ func (hd *HeaderDownload) ExtractStats() Stats { return s } +func (hd *HeaderDownload) FirstPoSHeight() *uint64 { + hd.lock.RLock() + defer hd.lock.RUnlock() + return hd.firstSeenHeightPoS +} + +func (hd *HeaderDownload) SetFirstPoSHeight(blockHeight uint64) { + hd.lock.RLock() + defer hd.lock.RUnlock() + if hd.firstSeenHeightPoS == nil { + hd.firstSeenHeightPoS = new(uint64) + *hd.firstSeenHeightPoS = blockHeight + } +} + func (hd *HeaderDownload) TopSeenHeight() uint64 { hd.lock.RLock() defer hd.lock.RUnlock() @@ -1262,7 +1282,6 @@ func (hd *HeaderDownload) StartPoSDownloader( var timeout bool timeout, req, penalties = hd.requestMoreHeadersForPOS(currentTime) if timeout { - log.Warn("Timeout", "requestId", hd.requestId) hd.BeaconRequestList.Remove(hd.requestId) hd.cleanUpPoSDownload() } @@ -1276,7 +1295,7 @@ func (hd *HeaderDownload) StartPoSDownloader( if sentToPeer { // If request was actually sent to a peer, we update retry time to be 5 seconds in the future hd.UpdateRetryTime(req, currentTime, 5*time.Second /* timeout */) - log.Debug("Sent request", "height", req.Number) + log.Debug("[Downloader] Sent request", "height", req.Number) } } if len(penalties) > 0 { @@ -1299,7 +1318,7 @@ func (hd *HeaderDownload) StartPoSDownloader( prevProgress = progress } else if progress <= prevProgress { diff := prevProgress - progress - log.Info("Downloaded PoS Headers", "now", progress, + log.Info("[Downloader] Downloaded PoS Headers", "now", progress, "blk/sec", float64(diff)/float64(logInterval/time.Second)) prevProgress = progress } diff --git a/turbo/stages/headerdownload/header_data_struct.go b/turbo/stages/headerdownload/header_data_struct.go index a858f1554fa..217029d2919 100644 --- a/turbo/stages/headerdownload/header_data_struct.go +++ b/turbo/stages/headerdownload/header_data_struct.go @@ -301,6 +301,7 @@ type HeaderDownload struct { // Proof of Stake (PoS) topSeenHeightPoS uint64 + firstSeenHeightPoS *uint64 requestId int posAnchor *Anchor posStatus SyncStatus @@ -308,6 +309,7 @@ type HeaderDownload struct { headersCollector *etl.Collector // ETL collector for headers BeaconRequestList *engineapi.RequestList // Requests from ethbackend to staged sync PayloadStatusCh chan engineapi.PayloadStatus // Responses (validation/execution status) + ShutdownCh chan struct{} // Channel to signal shutdown pendingPayloadHash common.Hash // Header whose status we still should send to PayloadStatusCh pendingPayloadStatus *engineapi.PayloadStatus // Alternatively, there can be an already prepared response to send to PayloadStatusCh unsettledForkChoice *engineapi.ForkChoiceMessage // Forkchoice to process after unwind @@ -343,6 +345,7 @@ func NewHeaderDownload( QuitPoWMining: make(chan struct{}), BeaconRequestList: engineapi.NewRequestList(), PayloadStatusCh: make(chan engineapi.PayloadStatus, 1), + ShutdownCh: make(chan struct{}), headerReader: headerReader, badPoSHeaders: make(map[common.Hash]common.Hash), } diff --git a/turbo/stages/mock_sentry.go b/turbo/stages/mock_sentry.go index 26864c03588..5cfb9c16101 100644 --- a/turbo/stages/mock_sentry.go +++ b/turbo/stages/mock_sentry.go @@ -11,6 +11,7 @@ import ( "github.com/c2h5oh/datasize" "github.com/holiman/uint256" + "github.com/ledgerwatch/erigon-lib/common/dir" "github.com/ledgerwatch/erigon-lib/direct" "github.com/ledgerwatch/erigon-lib/gointerfaces" proto_downloader "github.com/ledgerwatch/erigon-lib/gointerfaces/downloader" @@ -20,14 +21,19 @@ import ( "github.com/ledgerwatch/erigon-lib/kv/kvcache" "github.com/ledgerwatch/erigon-lib/kv/memdb" "github.com/ledgerwatch/erigon-lib/kv/remotedbserver" + libstate "github.com/ledgerwatch/erigon-lib/state" "github.com/ledgerwatch/erigon-lib/txpool" types2 "github.com/ledgerwatch/erigon-lib/types" + "github.com/ledgerwatch/log/v3" + "google.golang.org/protobuf/types/known/emptypb" + "github.com/ledgerwatch/erigon/cmd/sentry/sentry" "github.com/ledgerwatch/erigon/common" "github.com/ledgerwatch/erigon/consensus" "github.com/ledgerwatch/erigon/consensus/ethash" "github.com/ledgerwatch/erigon/core" "github.com/ledgerwatch/erigon/core/rawdb" + "github.com/ledgerwatch/erigon/core/state" "github.com/ledgerwatch/erigon/core/types" "github.com/ledgerwatch/erigon/core/vm" "github.com/ledgerwatch/erigon/crypto" @@ -35,7 +41,6 @@ import ( "github.com/ledgerwatch/erigon/eth/protocols/eth" "github.com/ledgerwatch/erigon/eth/stagedsync" "github.com/ledgerwatch/erigon/eth/stagedsync/stages" - "github.com/ledgerwatch/erigon/ethdb/privateapi" "github.com/ledgerwatch/erigon/ethdb/prune" "github.com/ledgerwatch/erigon/node/nodecfg/datadir" "github.com/ledgerwatch/erigon/params" @@ -45,8 +50,6 @@ import ( "github.com/ledgerwatch/erigon/turbo/snapshotsync" "github.com/ledgerwatch/erigon/turbo/stages/bodydownload" "github.com/ledgerwatch/erigon/turbo/stages/headerdownload" - "github.com/ledgerwatch/log/v3" - "google.golang.org/protobuf/types/known/emptypb" ) type MockSentry struct { @@ -56,9 +59,9 @@ type MockSentry struct { t *testing.T cancel context.CancelFunc DB kv.RwDB - tmpdir string - snapDir string + Dirs datadir.Dirs Engine consensus.Engine + gspec *core.Genesis ChainConfig *params.ChainConfig Sync *stagedsync.Sync MiningSync *stagedsync.Sync @@ -76,7 +79,7 @@ type MockSentry struct { ReceiveWg sync.WaitGroup Address common.Address - Notifications *stagedsync.Notifications + Notifications *shards.Notifications // TxPool TxPoolFetch *txpool.Fetch @@ -84,6 +87,9 @@ type MockSentry struct { TxPoolGrpcServer *txpool.GrpcServer TxPool *txpool.TxPool txPoolDB kv.RwDB + + HistoryV3 bool + agg *libstate.Aggregator22 } func (ms *MockSentry) Close() { @@ -92,6 +98,9 @@ func (ms *MockSentry) Close() { ms.txPoolDB.Close() } ms.DB.Close() + if ms.HistoryV3 { + ms.agg.Close() + } } // Stream returns stream, waiting if necessary @@ -199,24 +208,25 @@ func MockWithEverything(t *testing.T, gspec *core.Genesis, key *ecdsa.PrivateKey db := memdb.New() ctx, ctxCancel := context.WithCancel(context.Background()) - erigonGrpcServeer := remotedbserver.NewKvServer(ctx, db, nil) + erigonGrpcServeer := remotedbserver.NewKvServer(ctx, db, nil, nil) mock := &MockSentry{ Ctx: ctx, cancel: ctxCancel, DB: db, t: t, Log: log.New(), - tmpdir: tmpdir, - snapDir: dirs.Snap, + Dirs: dirs, Engine: engine, + gspec: gspec, ChainConfig: gspec.Config, Key: key, - Notifications: &stagedsync.Notifications{ - Events: privateapi.NewEvents(), - Accumulator: shards.NewAccumulator(gspec.Config), + Notifications: &shards.Notifications{ + Events: shards.NewEvents(), + Accumulator: shards.NewAccumulator(), StateChangesConsumer: erigonGrpcServeer, }, UpdateHead: func(Ctx context.Context, head uint64, hash common.Hash, td *uint256.Int) { }, - PeerId: gointerfaces.ConvertHashToH512([64]byte{0x12, 0x34, 0x50}), // "12345" + PeerId: gointerfaces.ConvertHashToH512([64]byte{0x12, 0x34, 0x50}), // "12345" + HistoryV3: ethconfig.EnableHistoryV3InTest, } if t != nil { t.Cleanup(mock.Close) @@ -228,12 +238,32 @@ func MockWithEverything(t *testing.T, gspec *core.Genesis, key *ecdsa.PrivateKey propagateNewBlockHashes := func(context.Context, []headerdownload.Announce) {} penalize := func(context.Context, []headerdownload.PenaltyItem) {} cfg := ethconfig.Defaults + cfg.HistoryV3 = mock.HistoryV3 cfg.StateStream = true cfg.BatchSize = 1 * datasize.MB cfg.Sync.BodyDownloadTimeoutSeconds = 10 cfg.DeprecatedTxPool.Disable = !withTxPool cfg.DeprecatedTxPool.StartOnInit = true + _ = db.Update(ctx, func(tx kv.RwTx) error { + _, _ = rawdb.HistoryV3.WriteOnce(tx, cfg.HistoryV3) + return nil + }) + + allSnapshots := snapshotsync.NewRoSnapshots(ethconfig.Defaults.Snapshot, dirs.Snap) + + if cfg.HistoryV3 { + dir.MustExist(dirs.SnapHistory) + mock.agg, err = libstate.NewAggregator22(dirs.SnapHistory, ethconfig.HistoryV3AggregationStep) + if err != nil { + panic(err) + } + if err := mock.agg.ReopenFiles(); err != nil { + panic(err) + } + + } + mock.SentryClient = direct.NewSentryClientDirect(eth.ETH66, mock) sentries := []direct.SentryClient{mock.SentryClient} @@ -293,7 +323,10 @@ func MockWithEverything(t *testing.T, gspec *core.Genesis, key *ecdsa.PrivateKey cfg.Sync, blockReader, false, + nil, ) + + mock.sentriesClient.IsMock = true if err != nil { if t != nil { t.Fatal(err) @@ -302,30 +335,37 @@ func MockWithEverything(t *testing.T, gspec *core.Genesis, key *ecdsa.PrivateKey } } - var allSnapshots *snapshotsync.RoSnapshots var snapshotsDownloader proto_downloader.DownloaderClient isBor := mock.ChainConfig.Bor != nil - + var sprint uint64 + if isBor { + sprint = mock.ChainConfig.Bor.Sprint + } + blockRetire := snapshotsync.NewBlockRetire(1, dirs.Tmp, allSnapshots, mock.DB, snapshotsDownloader, mock.Notifications.Events) mock.Sync = stagedsync.New( stagedsync.DefaultStages(mock.Ctx, prune, - stagedsync.StageHeadersCfg(mock.DB, mock.sentriesClient.Hd, mock.sentriesClient.Bd, *mock.ChainConfig, sendHeaderRequest, propagateNewBlockHashes, penalize, cfg.BatchSize, false, false, allSnapshots, snapshotsDownloader, blockReader, mock.tmpdir, mock.Notifications.Events, mock.Notifications, engineapi.NewForkValidatorMock(1)), - stagedsync.StageCumulativeIndexCfg(mock.DB), - stagedsync.StageBlockHashesCfg(mock.DB, mock.tmpdir, mock.ChainConfig), - stagedsync.StageBodiesCfg( + stagedsync.StageSnapshotsCfg(mock.DB, *mock.ChainConfig, dirs, allSnapshots, blockRetire, snapshotsDownloader, blockReader, mock.Notifications.Events, mock.HistoryV3, mock.agg), + stagedsync.StageHeadersCfg( mock.DB, + mock.sentriesClient.Hd, mock.sentriesClient.Bd, - sendBodyRequest, - penalize, - blockPropagator, - cfg.Sync.BodyDownloadTimeoutSeconds, *mock.ChainConfig, + sendHeaderRequest, + propagateNewBlockHashes, + penalize, cfg.BatchSize, + false, allSnapshots, blockReader, - ), + dirs.Tmp, + mock.Notifications, + engineapi.NewForkValidatorMock(1)), + stagedsync.StageCumulativeIndexCfg(mock.DB), + stagedsync.StageBlockHashesCfg(mock.DB, mock.Dirs.Tmp, mock.ChainConfig), + stagedsync.StageBodiesCfg(mock.DB, mock.sentriesClient.Bd, sendBodyRequest, penalize, blockPropagator, cfg.Sync.BodyDownloadTimeoutSeconds, *mock.ChainConfig, cfg.BatchSize, allSnapshots, blockReader, cfg.HistoryV3), stagedsync.StageIssuanceCfg(mock.DB, mock.ChainConfig, blockReader, true), - stagedsync.StageSendersCfg(mock.DB, mock.ChainConfig, false, mock.tmpdir, prune, snapshotsync.NewBlockRetire(1, mock.tmpdir, allSnapshots, mock.DB, snapshotsDownloader, mock.Notifications.Events), nil), + stagedsync.StageSendersCfg(mock.DB, mock.ChainConfig, false, dirs.Tmp, prune, blockRetire, nil), stagedsync.StageExecuteBlocksCfg( mock.DB, prune, @@ -337,18 +377,21 @@ func MockWithEverything(t *testing.T, gspec *core.Genesis, key *ecdsa.PrivateKey mock.Notifications.Accumulator, cfg.StateStream, /*stateStream=*/ false, - mock.tmpdir, + /*exec22=*/ cfg.HistoryV3, + dirs, blockReader, mock.sentriesClient.Hd, + mock.gspec, + 1, + mock.agg, ), - stagedsync.StageTranspileCfg(mock.DB, cfg.BatchSize, mock.ChainConfig), - stagedsync.StageHashStateCfg(mock.DB, mock.tmpdir), - stagedsync.StageTrieCfg(mock.DB, true, true, false, mock.tmpdir, blockReader, nil), - stagedsync.StageHistoryCfg(mock.DB, prune, mock.tmpdir), - stagedsync.StageLogIndexCfg(mock.DB, prune, mock.tmpdir), - stagedsync.StageCallTracesCfg(mock.DB, prune, 0, mock.tmpdir), - stagedsync.StageTxLookupCfg(mock.DB, prune, mock.tmpdir, allSnapshots, isBor), - stagedsync.StageFinishCfg(mock.DB, mock.tmpdir, mock.Log, nil, nil), + stagedsync.StageHashStateCfg(mock.DB, mock.Dirs, cfg.HistoryV3, mock.agg), + stagedsync.StageTrieCfg(mock.DB, true, true, false, dirs.Tmp, blockReader, nil, cfg.HistoryV3, mock.agg), + stagedsync.StageHistoryCfg(mock.DB, prune, dirs.Tmp), + stagedsync.StageLogIndexCfg(mock.DB, prune, dirs.Tmp), + stagedsync.StageCallTracesCfg(mock.DB, prune, 0, dirs.Tmp), + stagedsync.StageTxLookupCfg(mock.DB, prune, dirs.Tmp, allSnapshots, isBor, sprint), + stagedsync.StageFinishCfg(mock.DB, dirs.Tmp, nil), !withPosDownloader), stagedsync.DefaultUnwindOrder, stagedsync.DefaultPruneOrder, @@ -361,17 +404,22 @@ func MockWithEverything(t *testing.T, gspec *core.Genesis, key *ecdsa.PrivateKey miningConfig.Noverify = false miningConfig.Etherbase = mock.Address miningConfig.SigKey = mock.Key + miningCancel := make(chan struct{}) + go func() { + <-mock.Ctx.Done() + close(miningCancel) + }() miner := stagedsync.NewMiningState(&miningConfig) mock.PendingBlocks = miner.PendingResultCh mock.MinedBlocks = miner.MiningResultCh mock.MiningSync = stagedsync.New( stagedsync.MiningStages(mock.Ctx, - stagedsync.StageMiningCreateBlockCfg(mock.DB, miner, *mock.ChainConfig, mock.Engine, mock.TxPool, nil, nil, mock.tmpdir), - stagedsync.StageMiningExecCfg(mock.DB, miner, nil, *mock.ChainConfig, mock.Engine, &vm.Config{}, mock.tmpdir, nil), - stagedsync.StageHashStateCfg(mock.DB, mock.tmpdir), - stagedsync.StageTrieCfg(mock.DB, false, true, false, mock.tmpdir, blockReader, nil), - stagedsync.StageMiningFinishCfg(mock.DB, *mock.ChainConfig, mock.Engine, miner, mock.Ctx.Done()), + stagedsync.StageMiningCreateBlockCfg(mock.DB, miner, *mock.ChainConfig, mock.Engine, mock.TxPool, nil, nil, dirs.Tmp), + stagedsync.StageMiningExecCfg(mock.DB, miner, nil, *mock.ChainConfig, mock.Engine, &vm.Config{}, dirs.Tmp, nil, 0), + stagedsync.StageHashStateCfg(mock.DB, dirs, cfg.HistoryV3, mock.agg), + stagedsync.StageTrieCfg(mock.DB, false, true, false, dirs.Tmp, blockReader, nil, cfg.HistoryV3, mock.agg), + stagedsync.StageMiningFinishCfg(mock.DB, *mock.ChainConfig, mock.Engine, miner, miningCancel), ), stagedsync.MiningUnwindOrder, stagedsync.MiningPruneOrder, @@ -511,7 +559,7 @@ func (ms *MockSentry) insertPoWBlocks(chain *core.ChainPack) error { if ms.TxPool != nil { ms.ReceiveWg.Add(1) } - if _, err = StageLoopStep(ms.Ctx, ms.DB, ms.Sync, highestSeenHeader, ms.Notifications, initialCycle, ms.UpdateHead, nil); err != nil { + if _, err = StageLoopStep(ms.Ctx, ms.ChainConfig, ms.DB, ms.Sync, highestSeenHeader, ms.Notifications, initialCycle, ms.UpdateHead, nil); err != nil { return err } if ms.TxPool != nil { @@ -532,7 +580,7 @@ func (ms *MockSentry) insertPoSBlocks(chain *core.ChainPack) error { initialCycle := false highestSeenHeader := chain.TopBlock.NumberU64() - headBlockHash, err := StageLoopStep(ms.Ctx, ms.DB, ms.Sync, highestSeenHeader, ms.Notifications, initialCycle, ms.UpdateHead, nil) + headBlockHash, err := StageLoopStep(ms.Ctx, ms.ChainConfig, ms.DB, ms.Sync, highestSeenHeader, ms.Notifications, initialCycle, ms.UpdateHead, nil) if err != nil { return err } @@ -545,7 +593,7 @@ func (ms *MockSentry) insertPoSBlocks(chain *core.ChainPack) error { FinalizedBlockHash: chain.TopBlock.Hash(), } ms.SendForkChoiceRequest(&fc) - headBlockHash, err = StageLoopStep(ms.Ctx, ms.DB, ms.Sync, highestSeenHeader, ms.Notifications, initialCycle, ms.UpdateHead, nil) + headBlockHash, err = StageLoopStep(ms.Ctx, ms.ChainConfig, ms.DB, ms.Sync, highestSeenHeader, ms.Notifications, initialCycle, ms.UpdateHead, nil) if err != nil { return err } @@ -599,3 +647,28 @@ func (ms *MockSentry) ReceivePayloadStatus() engineapi.PayloadStatus { func (ms *MockSentry) HeaderDownload() *headerdownload.HeaderDownload { return ms.sentriesClient.Hd } + +func (ms *MockSentry) NewHistoricalStateReader(blockNum uint64, tx kv.Tx) state.StateReader { + if ms.HistoryV3 { + aggCtx := ms.agg.MakeContext() + aggCtx.SetTx(tx) + r := state.NewHistoryReader22(aggCtx) + r.SetTx(tx) + minTxNum, err := rawdb.TxNums.Min(tx, blockNum) + if err != nil { + panic(err) + } + r.SetTxNum(minTxNum) + return r + } + + return state.NewPlainState(tx, blockNum) +} + +func (ms *MockSentry) NewStateReader(tx kv.Tx) state.StateReader { + return state.NewPlainStateReader(tx) +} + +func (ms *MockSentry) HistoryV3Components() *libstate.Aggregator22 { + return ms.agg +} diff --git a/turbo/stages/sentry_mock_test.go b/turbo/stages/sentry_mock_test.go index be73c12ddc6..1adf638614e 100644 --- a/turbo/stages/sentry_mock_test.go +++ b/turbo/stages/sentry_mock_test.go @@ -56,8 +56,8 @@ func TestHeaderStep(t *testing.T) { m.ReceiveWg.Wait() // Wait for all messages to be processed before we proceed initialCycle := true - highestSeenHeader := uint64(chain.TopBlock.NumberU64()) - if _, err := stages.StageLoopStep(m.Ctx, m.DB, m.Sync, highestSeenHeader, m.Notifications, initialCycle, m.UpdateHead, nil); err != nil { + highestSeenHeader := chain.TopBlock.NumberU64() + if _, err := stages.StageLoopStep(m.Ctx, m.ChainConfig, m.DB, m.Sync, highestSeenHeader, m.Notifications, initialCycle, m.UpdateHead, nil); err != nil { t.Fatal(err) } } @@ -95,8 +95,8 @@ func TestMineBlockWith1Tx(t *testing.T) { m.ReceiveWg.Wait() // Wait for all messages to be processed before we proceeed initialCycle := true - highestSeenHeader := uint64(chain.TopBlock.NumberU64()) - if _, err := stages.StageLoopStep(m.Ctx, m.DB, m.Sync, highestSeenHeader, m.Notifications, initialCycle, m.UpdateHead, nil); err != nil { + highestSeenHeader := chain.TopBlock.NumberU64() + if _, err := stages.StageLoopStep(m.Ctx, m.ChainConfig, m.DB, m.Sync, highestSeenHeader, m.Notifications, initialCycle, m.UpdateHead, nil); err != nil { t.Fatal(err) } } @@ -164,8 +164,8 @@ func TestReorg(t *testing.T) { m.ReceiveWg.Wait() // Wait for all messages to be processed before we proceeed initialCycle := true - highestSeenHeader := uint64(chain.TopBlock.NumberU64()) - if _, err := stages.StageLoopStep(m.Ctx, m.DB, m.Sync, highestSeenHeader, m.Notifications, initialCycle, m.UpdateHead, nil); err != nil { + highestSeenHeader := chain.TopBlock.NumberU64() + if _, err := stages.StageLoopStep(m.Ctx, m.ChainConfig, m.DB, m.Sync, highestSeenHeader, m.Notifications, initialCycle, m.UpdateHead, nil); err != nil { t.Fatal(err) } @@ -217,9 +217,9 @@ func TestReorg(t *testing.T) { } m.ReceiveWg.Wait() // Wait for all messages to be processed before we proceeed - highestSeenHeader = uint64(short.TopBlock.NumberU64()) + highestSeenHeader = short.TopBlock.NumberU64() initialCycle = false - if _, err := stages.StageLoopStep(m.Ctx, m.DB, m.Sync, highestSeenHeader, m.Notifications, initialCycle, m.UpdateHead, nil); err != nil { + if _, err := stages.StageLoopStep(m.Ctx, m.ChainConfig, m.DB, m.Sync, highestSeenHeader, m.Notifications, initialCycle, m.UpdateHead, nil); err != nil { t.Fatal(err) } @@ -262,8 +262,8 @@ func TestReorg(t *testing.T) { m.ReceiveWg.Wait() // Wait for all messages to be processed before we proceeed // This is unwind step - highestSeenHeader = uint64(long1.TopBlock.NumberU64()) - if _, err := stages.StageLoopStep(m.Ctx, m.DB, m.Sync, highestSeenHeader, m.Notifications, initialCycle, m.UpdateHead, nil); err != nil { + highestSeenHeader = long1.TopBlock.NumberU64() + if _, err := stages.StageLoopStep(m.Ctx, m.ChainConfig, m.DB, m.Sync, highestSeenHeader, m.Notifications, initialCycle, m.UpdateHead, nil); err != nil { t.Fatal(err) } @@ -299,9 +299,9 @@ func TestReorg(t *testing.T) { } m.ReceiveWg.Wait() // Wait for all messages to be processed before we proceeed - highestSeenHeader = uint64(short2.TopBlock.NumberU64()) + highestSeenHeader = short2.TopBlock.NumberU64() initialCycle = false - if _, err := stages.StageLoopStep(m.Ctx, m.DB, m.Sync, highestSeenHeader, m.Notifications, initialCycle, m.UpdateHead, nil); err != nil { + if _, err := stages.StageLoopStep(m.Ctx, m.ChainConfig, m.DB, m.Sync, highestSeenHeader, m.Notifications, initialCycle, m.UpdateHead, nil); err != nil { t.Fatal(err) } } @@ -396,9 +396,9 @@ func TestAnchorReplace(t *testing.T) { m.ReceiveWg.Wait() // Wait for all messages to be processed before we proceeed - highestSeenHeader := uint64(long.TopBlock.NumberU64()) + highestSeenHeader := long.TopBlock.NumberU64() initialCycle := true - if _, err := stages.StageLoopStep(m.Ctx, m.DB, m.Sync, highestSeenHeader, m.Notifications, initialCycle, m.UpdateHead, nil); err != nil { + if _, err := stages.StageLoopStep(m.Ctx, m.ChainConfig, m.DB, m.Sync, highestSeenHeader, m.Notifications, initialCycle, m.UpdateHead, nil); err != nil { t.Fatal(err) } } @@ -501,9 +501,9 @@ func TestAnchorReplace2(t *testing.T) { m.ReceiveWg.Wait() // Wait for all messages to be processed before we proceeed - highestSeenHeader := uint64(long.TopBlock.NumberU64()) + highestSeenHeader := long.TopBlock.NumberU64() initialCycle := true - if _, err := stages.StageLoopStep(m.Ctx, m.DB, m.Sync, highestSeenHeader, m.Notifications, initialCycle, m.UpdateHead, nil); err != nil { + if _, err := stages.StageLoopStep(m.Ctx, m.ChainConfig, m.DB, m.Sync, highestSeenHeader, m.Notifications, initialCycle, m.UpdateHead, nil); err != nil { t.Fatal(err) } } @@ -520,7 +520,7 @@ func TestForkchoiceToGenesis(t *testing.T) { m.SendForkChoiceRequest(&forkChoiceMessage) initialCycle := false - headBlockHash, err := stages.StageLoopStep(m.Ctx, m.DB, m.Sync, 0, m.Notifications, initialCycle, m.UpdateHead, nil) + headBlockHash, err := stages.StageLoopStep(m.Ctx, m.ChainConfig, m.DB, m.Sync, 0, m.Notifications, initialCycle, m.UpdateHead, nil) require.NoError(t, err) stages.SendPayloadStatus(m.HeaderDownload(), headBlockHash, err) @@ -542,7 +542,7 @@ func TestBogusForkchoice(t *testing.T) { m.SendForkChoiceRequest(&forkChoiceMessage) initialCycle := false - headBlockHash, err := stages.StageLoopStep(m.Ctx, m.DB, m.Sync, 0, m.Notifications, initialCycle, m.UpdateHead, nil) + headBlockHash, err := stages.StageLoopStep(m.Ctx, m.ChainConfig, m.DB, m.Sync, 0, m.Notifications, initialCycle, m.UpdateHead, nil) require.NoError(t, err) stages.SendPayloadStatus(m.HeaderDownload(), headBlockHash, err) @@ -557,7 +557,7 @@ func TestBogusForkchoice(t *testing.T) { } m.SendForkChoiceRequest(&forkChoiceMessage) - headBlockHash, err = stages.StageLoopStep(m.Ctx, m.DB, m.Sync, 0, m.Notifications, initialCycle, m.UpdateHead, nil) + headBlockHash, err = stages.StageLoopStep(m.Ctx, m.ChainConfig, m.DB, m.Sync, 0, m.Notifications, initialCycle, m.UpdateHead, nil) require.NoError(t, err) stages.SendPayloadStatus(m.HeaderDownload(), headBlockHash, err) @@ -577,7 +577,7 @@ func TestPoSDownloader(t *testing.T) { m.SendPayloadRequest(chain.TopBlock) initialCycle := false - headBlockHash, err := stages.StageLoopStep(m.Ctx, m.DB, m.Sync, 0, m.Notifications, initialCycle, m.UpdateHead, nil) + headBlockHash, err := stages.StageLoopStep(m.Ctx, m.ChainConfig, m.DB, m.Sync, 0, m.Notifications, initialCycle, m.UpdateHead, nil) require.NoError(t, err) stages.SendPayloadStatus(m.HeaderDownload(), headBlockHash, err) @@ -597,15 +597,14 @@ func TestPoSDownloader(t *testing.T) { m.ReceiveWg.Wait() // First cycle: save the downloaded header - headBlockHash, err = stages.StageLoopStep(m.Ctx, m.DB, m.Sync, 0, m.Notifications, initialCycle, m.UpdateHead, nil) + headBlockHash, err = stages.StageLoopStep(m.Ctx, m.ChainConfig, m.DB, m.Sync, 0, m.Notifications, initialCycle, m.UpdateHead, nil) require.NoError(t, err) stages.SendPayloadStatus(m.HeaderDownload(), headBlockHash, err) // Second cycle: process the previous beacon request - headBlockHash, err = stages.StageLoopStep(m.Ctx, m.DB, m.Sync, 0, m.Notifications, initialCycle, m.UpdateHead, nil) + headBlockHash, err = stages.StageLoopStep(m.Ctx, m.ChainConfig, m.DB, m.Sync, 0, m.Notifications, initialCycle, m.UpdateHead, nil) require.NoError(t, err) stages.SendPayloadStatus(m.HeaderDownload(), headBlockHash, err) - assert.Equal(t, chain.TopBlock.Hash(), headBlockHash) // Point forkChoice to the head forkChoiceMessage := engineapi.ForkChoiceMessage{ @@ -614,9 +613,10 @@ func TestPoSDownloader(t *testing.T) { FinalizedBlockHash: chain.TopBlock.Hash(), } m.SendForkChoiceRequest(&forkChoiceMessage) - headBlockHash, err = stages.StageLoopStep(m.Ctx, m.DB, m.Sync, 0, m.Notifications, initialCycle, m.UpdateHead, nil) + headBlockHash, err = stages.StageLoopStep(m.Ctx, m.ChainConfig, m.DB, m.Sync, 0, m.Notifications, initialCycle, m.UpdateHead, nil) require.NoError(t, err) stages.SendPayloadStatus(m.HeaderDownload(), headBlockHash, err) + assert.Equal(t, chain.TopBlock.Hash(), headBlockHash) payloadStatus = m.ReceivePayloadStatus() assert.Equal(t, remote.EngineStatus_VALID, payloadStatus.Status) @@ -645,7 +645,7 @@ func TestPoSSyncWithInvalidHeader(t *testing.T) { m.SendPayloadRequest(payloadMessage) initialCycle := false - headBlockHash, err := stages.StageLoopStep(m.Ctx, m.DB, m.Sync, 0, m.Notifications, initialCycle, m.UpdateHead, nil) + headBlockHash, err := stages.StageLoopStep(m.Ctx, m.ChainConfig, m.DB, m.Sync, 0, m.Notifications, initialCycle, m.UpdateHead, nil) require.NoError(t, err) stages.SendPayloadStatus(m.HeaderDownload(), headBlockHash, err) @@ -664,7 +664,7 @@ func TestPoSSyncWithInvalidHeader(t *testing.T) { } m.ReceiveWg.Wait() - headBlockHash, err = stages.StageLoopStep(m.Ctx, m.DB, m.Sync, 0, m.Notifications, initialCycle, m.UpdateHead, nil) + headBlockHash, err = stages.StageLoopStep(m.Ctx, m.ChainConfig, m.DB, m.Sync, 0, m.Notifications, initialCycle, m.UpdateHead, nil) require.NoError(t, err) stages.SendPayloadStatus(m.HeaderDownload(), headBlockHash, err) @@ -675,7 +675,7 @@ func TestPoSSyncWithInvalidHeader(t *testing.T) { FinalizedBlockHash: invalidTip.Hash(), } m.SendForkChoiceRequest(&forkChoiceMessage) - _, err = stages.StageLoopStep(m.Ctx, m.DB, m.Sync, 0, m.Notifications, initialCycle, m.UpdateHead, nil) + _, err = stages.StageLoopStep(m.Ctx, m.ChainConfig, m.DB, m.Sync, 0, m.Notifications, initialCycle, m.UpdateHead, nil) require.NoError(t, err) bad, lastValidHash := m.HeaderDownload().IsBadHeaderPoS(invalidTip.Hash()) diff --git a/turbo/stages/stageloop.go b/turbo/stages/stageloop.go index cf62e8c5e18..17b24edcc48 100644 --- a/turbo/stages/stageloop.go +++ b/turbo/stages/stageloop.go @@ -14,6 +14,7 @@ import ( "github.com/ledgerwatch/erigon-lib/gointerfaces/remote" "github.com/ledgerwatch/erigon-lib/kv" "github.com/ledgerwatch/erigon-lib/kv/memdb" + "github.com/ledgerwatch/erigon-lib/state" "github.com/ledgerwatch/erigon/cmd/sentry/sentry" "github.com/ledgerwatch/erigon/common" "github.com/ledgerwatch/erigon/consensus/misc" @@ -23,9 +24,12 @@ import ( "github.com/ledgerwatch/erigon/eth/ethconfig" "github.com/ledgerwatch/erigon/eth/stagedsync" "github.com/ledgerwatch/erigon/eth/stagedsync/stages" + "github.com/ledgerwatch/erigon/node/nodecfg/datadir" "github.com/ledgerwatch/erigon/p2p" + "github.com/ledgerwatch/erigon/params" "github.com/ledgerwatch/erigon/turbo/engineapi" "github.com/ledgerwatch/erigon/turbo/services" + "github.com/ledgerwatch/erigon/turbo/shards" "github.com/ledgerwatch/erigon/turbo/snapshotsync" "github.com/ledgerwatch/erigon/turbo/stages/headerdownload" "github.com/ledgerwatch/log/v3" @@ -62,10 +66,11 @@ func SendPayloadStatus(hd *headerdownload.HeaderDownload, headBlockHash common.H // StageLoop runs the continuous loop of staged sync func StageLoop( ctx context.Context, + chainConfig *params.ChainConfig, db kv.RwDB, sync *stagedsync.Sync, hd *headerdownload.HeaderDownload, - notifications *stagedsync.Notifications, + notifications *shards.Notifications, updateHead func(ctx context.Context, head uint64, hash common.Hash, td *uint256.Int), waitForDone chan struct{}, loopMinTime time.Duration, @@ -76,9 +81,16 @@ func StageLoop( for { start := time.Now() + select { + case <-hd.ShutdownCh: + return + default: + // continue + } + // Estimate the current top height seen from the peer height := hd.TopSeenHeight() - headBlockHash, err := StageLoopStep(ctx, db, sync, height, notifications, initialCycle, updateHead, nil) + headBlockHash, err := StageLoopStep(ctx, chainConfig, db, sync, height, notifications, initialCycle, updateHead, nil) SendPayloadStatus(hd, headBlockHash, err) @@ -113,10 +125,11 @@ func StageLoop( func StageLoopStep( ctx context.Context, + chainConfig *params.ChainConfig, db kv.RwDB, sync *stagedsync.Sync, highestSeenHeader uint64, - notifications *stagedsync.Notifications, + notifications *shards.Notifications, initialCycle bool, updateHead func(ctx context.Context, head uint64, hash common.Hash, td *uint256.Int), snapshotMigratorFinal func(tx kv.Tx) error, @@ -157,11 +170,14 @@ func StageLoopStep( notifications.Accumulator.Reset(tx.ViewID()) } - err = sync.Run(db, tx, initialCycle) + err = sync.Run(db, tx, initialCycle, false /* quiet */) if err != nil { return headBlockHash, err } + logCtx := sync.PrintTimings() + var tableSizes []interface{} if canRunCycleInOneTransaction { + tableSizes = stagedsync.PrintTables(db, tx) // Need to do this before commit to access tx commitStart := time.Now() errTx := tx.Commit() if errTx != nil { @@ -189,6 +205,12 @@ func StageLoopStep( return headBlockHash, err } headBlockHash = rawdb.ReadHeadBlockHash(rotx) + if head != finishProgressBefore && len(logCtx) > 0 { // No printing of timings or table sizes if there were no progress + log.Info("Timings (slower than 50ms)", logCtx...) + if len(tableSizes) > 0 { + log.Info("Tables", tableSizes...) + } + } if canRunCycleInOneTransaction && snapshotMigratorFinal != nil { err = snapshotMigratorFinal(rotx) @@ -208,10 +230,11 @@ func StageLoopStep( if notifications.Accumulator != nil { header := rawdb.ReadCurrentHeader(rotx) if header != nil { - pendingBaseFee := misc.CalcBaseFee(notifications.Accumulator.ChainConfig(), header) + pendingBaseFee := misc.CalcBaseFee(chainConfig, header) if header.Number.Uint64() == 0 { notifications.Accumulator.StartChange(0, header.Hash(), nil, false) } + notifications.Accumulator.SendAndReset(ctx, notifications.StateChangesConsumer, pendingBaseFee.Uint64(), header.GasLimit) if err = stagedsync.NotifyNewHeaders(ctx, finishProgressBefore, head, sync.PrevUnwindPoint(), notifications.Events, rotx); err != nil { @@ -240,15 +263,14 @@ func MiningStep(ctx context.Context, kv kv.RwDB, mining *stagedsync.Sync) (err e miningBatch := memdb.NewMemoryBatch(tx) defer miningBatch.Rollback() - if err = mining.Run(nil, miningBatch, false); err != nil { + if err = mining.Run(nil, miningBatch, false /* firstCycle */, false /* quiet */); err != nil { return err } tx.Rollback() return nil } -func StateStep(ctx context.Context, batch kv.RwTx, stateSync *stagedsync.Sync, headerReader services.FullBlockReader, header *types.Header, body *types.RawBody, unwindPoint uint64, headersChain []*types.Header, bodiesChain []*types.RawBody) (err error) { - +func StateStep(ctx context.Context, batch kv.RwTx, stateSync *stagedsync.Sync, header *types.Header, body *types.RawBody, unwindPoint uint64, headersChain []*types.Header, bodiesChain []*types.RawBody, quiet bool) (err error) { defer func() { if rec := recover(); rec != nil { err = fmt.Errorf("%+v, trace: %s", rec, dbg.Stack()) @@ -263,16 +285,29 @@ func StateStep(ctx context.Context, batch kv.RwTx, stateSync *stagedsync.Sync, h return err } } - // Once we unwond we can start constructing the chain (assumption: len(headersChain) == len(bodiesChain)) + // Once we unwound we can start constructing the chain (assumption: len(headersChain) == len(bodiesChain)) for i := range headersChain { currentHeader := headersChain[i] currentBody := bodiesChain[i] currentHeight := headersChain[i].Number.Uint64() currentHash := headersChain[i].Hash() // Prepare memory state for block execution - if err = rawdb.WriteRawBodyIfNotExists(batch, currentHash, currentHeight, currentBody); err != nil { + _, _, err := rawdb.WriteRawBodyIfNotExists(batch, currentHash, currentHeight, currentBody) + if err != nil { return err } + /* + ok, lastTxnNum, err := rawdb.WriteRawBodyIfNotExists(batch, currentHash, currentHeight, currentBody) + if err != nil { + return err + } + if ok { + + if txNums != nil { + txNums.Append(currentHeight, lastTxnNum) + } + } + */ rawdb.WriteHeader(batch, currentHeader) if err = rawdb.WriteHeaderNumber(batch, currentHash, currentHeight); err != nil { return err @@ -310,49 +345,65 @@ func StateStep(ctx context.Context, batch kv.RwTx, stateSync *stagedsync.Sync, h if err = stages.SaveStageProgress(batch, stages.Bodies, height); err != nil { return err } - if err = rawdb.WriteRawBodyIfNotExists(batch, hash, height, body); err != nil { + _, _, err := rawdb.WriteRawBodyIfNotExists(batch, hash, height, body) + if err != nil { return err } + /* + ok, lastTxnNum, err := rawdb.WriteRawBodyIfNotExists(batch, hash, height, body) + if err != nil { + return err + } + if ok { + if txNums != nil { + txNums.Append(height, lastTxnNum) + } + } + */ } else { if err = stages.SaveStageProgress(batch, stages.Bodies, height-1); err != nil { return err } } // Run state sync - if err = stateSync.Run(nil, batch, false); err != nil { + if err = stateSync.Run(nil, batch, false /* firstCycle */, quiet); err != nil { return err } return nil } -func NewStagedSync( - ctx context.Context, - logger log.Logger, +func NewStagedSync(ctx context.Context, db kv.RwDB, p2pCfg p2p.Config, - cfg ethconfig.Config, + cfg *ethconfig.Config, controlServer *sentry.MultiClient, - tmpdir string, - notifications *stagedsync.Notifications, + notifications *shards.Notifications, snapDownloader proto_downloader.DownloaderClient, snapshots *snapshotsync.RoSnapshots, - headCh chan *types.Block, + agg *state.Aggregator22, forkValidator *engineapi.ForkValidator, ) (*stagedsync.Sync, error) { + dirs := cfg.Dirs var blockReader services.FullBlockReader if cfg.Snapshot.Enabled { blockReader = snapshotsync.NewBlockReaderWithSnapshots(snapshots) } else { blockReader = snapshotsync.NewBlockReader() } - blockRetire := snapshotsync.NewBlockRetire(1, tmpdir, snapshots, db, snapDownloader, notifications.Events) + blockRetire := snapshotsync.NewBlockRetire(1, dirs.Tmp, snapshots, db, snapDownloader, notifications.Events) // During Import we don't want other services like header requests, body requests etc. to be running. // Hence we run it in the test mode. runInTestMode := cfg.ImportMode isBor := controlServer.ChainConfig.Bor != nil + var sprint uint64 + if isBor { + sprint = controlServer.ChainConfig.Bor.Sprint + } + return stagedsync.New( stagedsync.DefaultStages(ctx, cfg.Prune, + stagedsync.StageSnapshotsCfg(db, *controlServer.ChainConfig, dirs, snapshots, blockRetire, snapDownloader, blockReader, notifications.Events, cfg.HistoryV3, agg), stagedsync.StageHeadersCfg( db, controlServer.Hd, @@ -363,30 +414,16 @@ func NewStagedSync( controlServer.Penalize, cfg.BatchSize, p2pCfg.NoDiscovery, - cfg.MemoryOverlay, snapshots, - snapDownloader, blockReader, - tmpdir, - notifications.Events, + dirs.Tmp, notifications, forkValidator), stagedsync.StageCumulativeIndexCfg(db), - stagedsync.StageBlockHashesCfg(db, tmpdir, controlServer.ChainConfig), - stagedsync.StageBodiesCfg( - db, - controlServer.Bd, - controlServer.SendBodyRequest, - controlServer.Penalize, - controlServer.BroadcastNewBlock, - cfg.Sync.BodyDownloadTimeoutSeconds, - *controlServer.ChainConfig, - cfg.BatchSize, - snapshots, - blockReader, - ), + stagedsync.StageBlockHashesCfg(db, dirs.Tmp, controlServer.ChainConfig), + stagedsync.StageBodiesCfg(db, controlServer.Bd, controlServer.SendBodyRequest, controlServer.Penalize, controlServer.BroadcastNewBlock, cfg.Sync.BodyDownloadTimeoutSeconds, *controlServer.ChainConfig, cfg.BatchSize, snapshots, blockReader, cfg.HistoryV3), stagedsync.StageIssuanceCfg(db, controlServer.ChainConfig, blockReader, cfg.EnabledIssuance), - stagedsync.StageSendersCfg(db, controlServer.ChainConfig, false, tmpdir, cfg.Prune, blockRetire, controlServer.Hd), + stagedsync.StageSendersCfg(db, controlServer.ChainConfig, false, dirs.Tmp, cfg.Prune, blockRetire, controlServer.Hd), stagedsync.StageExecuteBlocksCfg( db, cfg.Prune, @@ -394,28 +431,31 @@ func NewStagedSync( nil, controlServer.ChainConfig, controlServer.Engine, - &vm.Config{EnableTEMV: cfg.Prune.Experiments.TEVM}, + &vm.Config{}, notifications.Accumulator, cfg.StateStream, /*stateStream=*/ false, - tmpdir, + cfg.HistoryV3, + dirs, blockReader, controlServer.Hd, + cfg.Genesis, + cfg.Sync.ExecWorkerCount, + agg, ), - stagedsync.StageTranspileCfg(db, cfg.BatchSize, controlServer.ChainConfig), - stagedsync.StageHashStateCfg(db, tmpdir), - stagedsync.StageTrieCfg(db, true, true, false, tmpdir, blockReader, controlServer.Hd), - stagedsync.StageHistoryCfg(db, cfg.Prune, tmpdir), - stagedsync.StageLogIndexCfg(db, cfg.Prune, tmpdir), - stagedsync.StageCallTracesCfg(db, cfg.Prune, 0, tmpdir), - stagedsync.StageTxLookupCfg(db, cfg.Prune, tmpdir, snapshots, isBor), - stagedsync.StageFinishCfg(db, tmpdir, logger, headCh, forkValidator), runInTestMode), + stagedsync.StageHashStateCfg(db, dirs, cfg.HistoryV3, agg), + stagedsync.StageTrieCfg(db, true, true, false, dirs.Tmp, blockReader, controlServer.Hd, cfg.HistoryV3, agg), + stagedsync.StageHistoryCfg(db, cfg.Prune, dirs.Tmp), + stagedsync.StageLogIndexCfg(db, cfg.Prune, dirs.Tmp), + stagedsync.StageCallTracesCfg(db, cfg.Prune, 0, dirs.Tmp), + stagedsync.StageTxLookupCfg(db, cfg.Prune, dirs.Tmp, snapshots, isBor, sprint), + stagedsync.StageFinishCfg(db, dirs.Tmp, forkValidator), runInTestMode), stagedsync.DefaultUnwindOrder, stagedsync.DefaultPruneOrder, ), nil } -func NewInMemoryExecution(ctx context.Context, logger log.Logger, db kv.RwDB, cfg ethconfig.Config, controlServer *sentry.MultiClient, tmpdir string, notifications *stagedsync.Notifications, snapshots *snapshotsync.RoSnapshots) (*stagedsync.Sync, error) { +func NewInMemoryExecution(ctx context.Context, db kv.RwDB, cfg *ethconfig.Config, controlServer *sentry.MultiClient, dirs datadir.Dirs, notifications *shards.Notifications, snapshots *snapshotsync.RoSnapshots, agg *state.Aggregator22) (*stagedsync.Sync, error) { var blockReader services.FullBlockReader if cfg.Snapshot.Enabled { blockReader = snapshotsync.NewBlockReaderWithSnapshots(snapshots) @@ -435,25 +475,13 @@ func NewInMemoryExecution(ctx context.Context, logger log.Logger, db kv.RwDB, cf controlServer.Penalize, cfg.BatchSize, false, - cfg.MemoryOverlay, snapshots, - nil, blockReader, - tmpdir, - notifications.Events, - nil, nil), stagedsync.StageBodiesCfg( - db, - controlServer.Bd, - controlServer.SendBodyRequest, - controlServer.Penalize, - controlServer.BroadcastNewBlock, - cfg.Sync.BodyDownloadTimeoutSeconds, - *controlServer.ChainConfig, - cfg.BatchSize, - snapshots, - blockReader, - ), stagedsync.StageBlockHashesCfg(db, tmpdir, controlServer.ChainConfig), - stagedsync.StageSendersCfg(db, controlServer.ChainConfig, true, tmpdir, cfg.Prune, nil, controlServer.Hd), + dirs.Tmp, + nil, nil, + ), + stagedsync.StageBodiesCfg(db, controlServer.Bd, controlServer.SendBodyRequest, controlServer.Penalize, controlServer.BroadcastNewBlock, cfg.Sync.BodyDownloadTimeoutSeconds, *controlServer.ChainConfig, cfg.BatchSize, snapshots, blockReader, cfg.HistoryV3), stagedsync.StageBlockHashesCfg(db, dirs.Tmp, controlServer.ChainConfig), + stagedsync.StageSendersCfg(db, controlServer.ChainConfig, true, dirs.Tmp, cfg.Prune, nil, controlServer.Hd), stagedsync.StageExecuteBlocksCfg( db, cfg.Prune, @@ -461,16 +489,20 @@ func NewInMemoryExecution(ctx context.Context, logger log.Logger, db kv.RwDB, cf nil, controlServer.ChainConfig, controlServer.Engine, - &vm.Config{EnableTEMV: cfg.Prune.Experiments.TEVM}, + &vm.Config{}, notifications.Accumulator, cfg.StateStream, true, - tmpdir, + cfg.HistoryV3, + cfg.Dirs, blockReader, controlServer.Hd, + cfg.Genesis, + cfg.Sync.ExecWorkerCount, + agg, ), - stagedsync.StageHashStateCfg(db, tmpdir), - stagedsync.StageTrieCfg(db, true, true, true, tmpdir, blockReader, controlServer.Hd)), + stagedsync.StageHashStateCfg(db, dirs, cfg.HistoryV3, agg), + stagedsync.StageTrieCfg(db, true, true, true, dirs.Tmp, blockReader, controlServer.Hd, cfg.HistoryV3, agg)), stagedsync.StateUnwindOrder, nil, ), nil diff --git a/turbo/transactions/call.go b/turbo/transactions/call.go index c034ccb6d12..1c7571bd912 100644 --- a/turbo/transactions/call.go +++ b/turbo/transactions/call.go @@ -3,13 +3,12 @@ package transactions import ( "context" "fmt" - "math/big" "time" "github.com/holiman/uint256" "github.com/ledgerwatch/erigon-lib/kv" - "github.com/ledgerwatch/erigon-lib/kv/kvcache" "github.com/ledgerwatch/erigon/common" + "github.com/ledgerwatch/erigon/consensus/ethash" "github.com/ledgerwatch/erigon/core" "github.com/ledgerwatch/erigon/core/state" "github.com/ledgerwatch/erigon/core/types" @@ -17,13 +16,10 @@ import ( "github.com/ledgerwatch/erigon/internal/ethapi" "github.com/ledgerwatch/erigon/params" "github.com/ledgerwatch/erigon/rpc" - "github.com/ledgerwatch/erigon/turbo/rpchelper" "github.com/ledgerwatch/erigon/turbo/services" "github.com/ledgerwatch/log/v3" ) -const callTimeout = 5 * time.Minute - func DoCall( ctx context.Context, args ethapi.CallArgs, @@ -31,10 +27,8 @@ func DoCall( block *types.Block, overrides *ethapi.StateOverrides, gasCap uint64, chainConfig *params.ChainConfig, - filters *rpchelper.Filters, - stateCache kvcache.Cache, - contractHasTEVM func(hash common.Hash) (bool, error), - headerReader services.HeaderReader, + stateReader state.StateReader, + headerReader services.HeaderReader, callTimeout time.Duration, ) (*core.ExecutionResult, error) { // todo: Pending state is only known by the miner /* @@ -43,10 +37,6 @@ func DoCall( return state, block.Header(), nil } */ - stateReader, err := rpchelper.CreateStateReader(ctx, tx, blockNrOrHash, filters, stateCache) - if err != nil { - return nil, err - } state := state.New(stateReader) header := block.Header() @@ -85,7 +75,7 @@ func DoCall( if err != nil { return nil, err } - blockCtx, txCtx := GetEvmContext(msg, header, blockNrOrHash.RequireCanonical, tx, contractHasTEVM, headerReader) + blockCtx, txCtx := GetEvmContext(msg, header, blockNrOrHash.RequireCanonical, tx, headerReader) evm := vm.NewEVM(blockCtx, txCtx, state, chainConfig, vm.Config{NoBaseFee: true}) @@ -109,7 +99,7 @@ func DoCall( return result, nil } -func GetEvmContext(msg core.Message, header *types.Header, requireCanonical bool, tx kv.Tx, contractHasTEVM func(address common.Hash) (bool, error), headerReader services.HeaderReader) (vm.BlockContext, vm.TxContext) { +func GetEvmContext(msg core.Message, header *types.Header, requireCanonical bool, tx kv.Tx, headerReader services.HeaderReader) (vm.BlockContext, vm.TxContext) { var baseFee uint256.Int if header.Eip1559 { overflow := baseFee.SetFromBig(header.BaseFee) @@ -117,18 +107,7 @@ func GetEvmContext(msg core.Message, header *types.Header, requireCanonical bool panic(fmt.Errorf("header.BaseFee higher than 2^256-1")) } } - return vm.BlockContext{ - CanTransfer: core.CanTransfer, - Transfer: core.Transfer, - GetHash: getHashGetter(requireCanonical, tx, headerReader), - ContractHasTEVM: contractHasTEVM, - Coinbase: header.Coinbase, - BlockNumber: header.Number.Uint64(), - Time: header.Time, - Difficulty: new(big.Int).Set(header.Difficulty), - GasLimit: header.GasLimit, - BaseFee: &baseFee, - }, + return core.NewEVMBlockContext(header, getHashGetter(requireCanonical, tx, headerReader), ethash.NewFaker() /* TODO Discover correcrt engine type */, nil /* author */), vm.TxContext{ Origin: msg.From(), GasPrice: msg.GasPrice().ToBig(), diff --git a/turbo/transactions/tracing.go b/turbo/transactions/tracing.go index d0d569c93fc..08414d91de3 100644 --- a/turbo/transactions/tracing.go +++ b/turbo/transactions/tracing.go @@ -31,7 +31,7 @@ type BlockGetter interface { } // ComputeTxEnv returns the execution environment of a certain transaction. -func ComputeTxEnv(ctx context.Context, block *types.Block, cfg *params.ChainConfig, getHeader func(hash common.Hash, number uint64) *types.Header, contractHasTEVM func(common.Hash) (bool, error), engine consensus.Engine, dbtx kv.Tx, blockHash common.Hash, txIndex uint64) (core.Message, vm.BlockContext, vm.TxContext, *state.IntraBlockState, *state.PlainState, error) { +func ComputeTxEnv(ctx context.Context, block *types.Block, cfg *params.ChainConfig, getHeader func(hash common.Hash, number uint64) *types.Header, engine consensus.Engine, dbtx kv.Tx, blockHash common.Hash, txIndex uint64) (core.Message, vm.BlockContext, vm.TxContext, *state.IntraBlockState, *state.PlainState, error) { // Create the parent state database reader := state.NewPlainState(dbtx, block.NumberU64()) statedb := state.New(reader) @@ -43,7 +43,7 @@ func ComputeTxEnv(ctx context.Context, block *types.Block, cfg *params.ChainConf signer := types.MakeSigner(cfg, block.NumberU64()) header := block.Header() - BlockContext := core.NewEVMBlockContext(header, core.GetHashFn(header, getHeader), engine, nil, contractHasTEVM) + BlockContext := core.NewEVMBlockContext(header, core.GetHashFn(header, getHeader), engine, nil) vmenv := vm.NewEVM(BlockContext, vm.TxContext{}, statedb, cfg, vm.Config{}) rules := vmenv.ChainRules() for idx, tx := range block.Transactions() { @@ -89,6 +89,7 @@ func TraceTx( config *tracers.TraceConfig, chainConfig *params.ChainConfig, stream *jsoniter.Stream, + callTimeout time.Duration, ) error { // Assemble the structured logger or the JavaScript tracer var ( @@ -132,7 +133,7 @@ func TraceTx( } // Run the transaction with tracing enabled. vmenv := vm.NewEVM(blockCtx, txCtx, ibs, chainConfig, vm.Config{Debug: true, Tracer: tracer}) - var refunds bool = true + var refunds = true if config != nil && config.NoRefunds != nil && *config.NoRefunds { refunds = false } @@ -250,7 +251,7 @@ func (l *JsonStreamLogger) CaptureState(env *vm.EVM, pc uint64, op vm.OpCode, ga value uint256.Int ) env.IntraBlockState().GetState(contract.Address(), &address, &value) - l.storage[contract.Address()][address] = common.Hash(value.Bytes32()) + l.storage[contract.Address()][address] = value.Bytes32() outputStorage = true } // capture SSTORE opcodes and record the written entry in the local storage. diff --git a/turbo/trie/hack.go b/turbo/trie/hack.go index c30b36e1314..1cf045dbbb5 100644 --- a/turbo/trie/hack.go +++ b/turbo/trie/hack.go @@ -52,7 +52,7 @@ func FullNode4() { } func ShortNode1() { - s := NewShortNode([]byte("1"), valueNode([]byte("2"))) + s := NewShortNode([]byte("1"), valueNode("2")) b, err := rlp.EncodeToBytes(s) if err != nil { panic(err) @@ -61,7 +61,7 @@ func ShortNode1() { } func ShortNode2() { - s := NewShortNode([]byte("1"), valueNode([]byte("123456789012345678901234567890123456789012345678901234567890"))) + s := NewShortNode([]byte("1"), valueNode("123456789012345678901234567890123456789012345678901234567890")) b, err := rlp.EncodeToBytes(s) if err != nil { panic(err) @@ -99,17 +99,17 @@ func Hash2() { } func Hash3() { - s := NewShortNode([]byte("12"), valueNode([]byte("1234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012"))) + s := NewShortNode([]byte("12"), valueNode("1234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012")) hashRoot(s, "Hash3") } func Hash4() { - s := NewShortNode([]byte("12345678901234567890123456789012"), valueNode([]byte("12345678901234567890"))) + s := NewShortNode([]byte("12345678901234567890123456789012"), valueNode("12345678901234567890")) hashRoot(s, "Hash4") } func Hash5() { - s := NewShortNode([]byte("1234567890123456789012345678901"), valueNode([]byte("1"))) + s := NewShortNode([]byte("1234567890123456789012345678901"), valueNode("1")) hashRoot(s, "Hash5") } diff --git a/turbo/trie/hasher_test.go b/turbo/trie/hasher_test.go index bebed6fb01b..70a4dedd844 100644 --- a/turbo/trie/hasher_test.go +++ b/turbo/trie/hasher_test.go @@ -12,7 +12,7 @@ func TestValue(t *testing.T) { h := newHasher(false) var hn common.Hash - h.hash(valueNode([]byte("BLAH")), false, hn[:]) + h.hash(valueNode("BLAH"), false, hn[:]) expected := "0x0" actual := fmt.Sprintf("0x%x", hn[:]) if actual != expected { diff --git a/turbo/trie/stream.go b/turbo/trie/stream.go index e2e773821df..957843f3e54 100644 --- a/turbo/trie/stream.go +++ b/turbo/trie/stream.go @@ -676,7 +676,7 @@ func StreamHash(it *StreamMergeIterator, storagePrefixLen int, hb *HashBuilder, itemType = newItemType switch itemType { case AccountStreamItem: - var a *accounts.Account = aVal + var a = aVal accData.Balance.Set(&a.Balance) accData.Nonce = a.Nonce accData.Incarnation = a.Incarnation diff --git a/turbo/trie/structural_branch_test.go b/turbo/trie/structural_branch_test.go index 7512479eca5..146e9e14a0d 100644 --- a/turbo/trie/structural_branch_test.go +++ b/turbo/trie/structural_branch_test.go @@ -77,7 +77,7 @@ func TestIHCursor(t *testing.T) { }, cursor, nil) k, _, _, _ := ih.AtPrefix([]byte{}) require.Equal(common.FromHex("0001"), k) - require.True(ih.SkipState) + require.False(ih.SkipState) require.Equal([]byte{}, ih.FirstNotCoveredPrefix()) k, _, _, _ = ih.Next() require.Equal(common.FromHex("0100"), k) diff --git a/turbo/trie/structural_test.go b/turbo/trie/structural_test.go index 20a0692fb12..1516d58ea31 100644 --- a/turbo/trie/structural_test.go +++ b/turbo/trie/structural_test.go @@ -50,9 +50,9 @@ func TestV2HashBuilding(t *testing.T) { valueShort := []byte("VAL") for i, key := range keys { if i%2 == 0 { - tr.Update([]byte(key), valueNode(valueLong)) + tr.Update([]byte(key), valueLong) } else { - tr.Update([]byte(key), valueNode(valueShort)) + tr.Update([]byte(key), valueShort) } } trieHash := tr.Hash() @@ -110,7 +110,7 @@ func TestV2Resolution(t *testing.T) { tr := New(common.Hash{}) value := []byte("VALUE123985903485903489043859043859043859048590485904385903485940385439058934058439058439058439058940385904358904385438809348908345") for _, key := range keys { - tr.Update([]byte(key), valueNode(value)) + tr.Update([]byte(key), value) } trieHash := tr.Hash() @@ -201,7 +201,7 @@ func TestEmbeddedStorage(t *testing.T) { tr := New(common.Hash{}) valueShort := []byte("VAL") for _, key := range keys { - tr.Update([]byte(key)[common.HashLength:], valueNode(valueShort)) + tr.Update([]byte(key)[common.HashLength:], valueShort) } trieHash := tr.Hash() diff --git a/turbo/trie/trie.go b/turbo/trie/trie.go index 33e067e87b6..ffda873551d 100644 --- a/turbo/trie/trie.go +++ b/turbo/trie/trie.go @@ -44,8 +44,8 @@ var ( // Use New to create a trie that sits on top of a database. // // Trie is not safe for concurrent use. -//Deprecated -//use package turbo/trie +// Deprecated +// use package turbo/trie type Trie struct { root node @@ -58,7 +58,7 @@ type Trie struct { // trie is initially empty and does not require a database. Otherwise, // New will panic if db is nil and returns a MissingNodeError if root does // not exist in the database. Accessing the trie loads nodes from db on demand. -//Deprecated +// Deprecated // use package turbo/trie func New(root common.Hash) *Trie { trie := &Trie{ @@ -381,7 +381,7 @@ func findSubTriesToLoad(nd node, nibblePath []byte, hook []byte, rl RetainDecide dbPrefix = append(dbPrefix, b<<4) } else { dbPrefix[len(dbPrefix)-1] &= 0xf0 - dbPrefix[len(dbPrefix)-1] |= (b & 0xf) + dbPrefix[len(dbPrefix)-1] |= b & 0xf } bits += 4 } @@ -400,7 +400,7 @@ func findSubTriesToLoad(nd node, nibblePath []byte, hook []byte, rl RetainDecide } else { newDbPrefix = dbPrefix newDbPrefix[len(newDbPrefix)-1] &= 0xf0 - newDbPrefix[len(newDbPrefix)-1] |= (i1 & 0xf) + newDbPrefix[len(newDbPrefix)-1] |= i1 & 0xf } newPrefixes, newFixedBits, newHooks = findSubTriesToLoad(n.child1, newNibblePath, newHook, rl, newDbPrefix, bits+4, newPrefixes, newFixedBits, newHooks) } @@ -413,7 +413,7 @@ func findSubTriesToLoad(nd node, nibblePath []byte, hook []byte, rl RetainDecide } else { newDbPrefix = dbPrefix newDbPrefix[len(newDbPrefix)-1] &= 0xf0 - newDbPrefix[len(newDbPrefix)-1] |= (i2 & 0xf) + newDbPrefix[len(newDbPrefix)-1] |= i2 & 0xf } newPrefixes, newFixedBits, newHooks = findSubTriesToLoad(n.child2, newNibblePath, newHook, rl, newDbPrefix, bits+4, newPrefixes, newFixedBits, newHooks) } @@ -435,7 +435,7 @@ func findSubTriesToLoad(nd node, nibblePath []byte, hook []byte, rl RetainDecide } else { newDbPrefix = dbPrefix newDbPrefix[len(newDbPrefix)-1] &= 0xf0 - newDbPrefix[len(newDbPrefix)-1] |= (byte(i) & 0xf) + newDbPrefix[len(newDbPrefix)-1] |= byte(i) & 0xf } newPrefixes, newFixedBits, newHooks = findSubTriesToLoad(child, newNibblePath, newHook, rl, newDbPrefix, bits+4, newPrefixes, newFixedBits, newHooks) } diff --git a/turbo/trie/trie_root.go b/turbo/trie/trie_root.go index 2e3ee867af5..5018371bc39 100644 --- a/turbo/trie/trie_root.go +++ b/turbo/trie/trie_root.go @@ -174,26 +174,27 @@ func (l *FlatDBTrieLoader) SetStreamReceiver(receiver StreamReceiver) { } // CalcTrieRoot algo: -// for iterateIHOfAccounts { -// if canSkipState -// goto SkipAccounts // -// for iterateAccounts from prevIH to currentIH { -// use(account) -// for iterateIHOfStorage within accountWithIncarnation{ -// if canSkipState -// goto SkipStorage +// for iterateIHOfAccounts { +// if canSkipState +// goto SkipAccounts // -// for iterateStorage from prevIHOfStorage to currentIHOfStorage { -// use(storage) +// for iterateAccounts from prevIH to currentIH { +// use(account) +// for iterateIHOfStorage within accountWithIncarnation{ +// if canSkipState +// goto SkipStorage +// +// for iterateStorage from prevIHOfStorage to currentIHOfStorage { +// use(storage) +// } +// SkipStorage: +// use(ihStorage) // } -// SkipStorage: -// use(ihStorage) // } +// SkipAccounts: +// use(AccTrie) // } -// SkipAccounts: -// use(AccTrie) -// } func (l *FlatDBTrieLoader) CalcTrieRoot(tx kv.Tx, prefix []byte, quit <-chan struct{}) (common.Hash, error) { accC, err := tx.Cursor(kv.HashedAccounts) @@ -309,7 +310,6 @@ func (l *FlatDBTrieLoader) CalcTrieRoot(tx kv.Tx, prefix []byte, quit <-chan str if err := l.receiver.Receive(CutoffStreamItem, nil, nil, nil, nil, nil, false, len(prefix)); err != nil { return EmptyRoot, err } - return l.receiver.Root(), nil } @@ -635,10 +635,6 @@ func (r *RootHashAggregator) genStructAccount() error { if r.hc == nil { return nil } - - //if bytes.HasPrefix(keyHex, common.FromHex("060e")) { - // fmt.Printf("collect: %x,%b,%b, del:%t\n", keyHex, hasHash, hasTree, hashes == nil) - //} return r.hc(keyHex, hasState, hasTree, hasHash, hashes, rootHash) }, data, r.groups, r.hasTree, r.hasHash, false, @@ -740,7 +736,7 @@ func (c *AccTrieCursor) FirstNotCoveredPrefix() []byte { } func (c *AccTrieCursor) AtPrefix(prefix []byte) (k, v []byte, hasTree bool, err error) { - c.SkipState = true + c.SkipState = false // There can be accounts with keys less than the first key in AccTrie _, c.nextCreated = c.canUse([]byte{}) c.prev = append(c.prev[:0], c.cur...) c.prefix = prefix @@ -1318,18 +1314,24 @@ func (c *StorageTrieCursor) _deleteCurrent() error { } /* - Dense Sequence - if between 2 AccTrie records not possible insert any state record - then they form "dense sequence" - If 2 AccTrie records form Dense Sequence - then no reason to iterate over state - just use AccTrie one after another - Example1: - 1234 - 1235 - Example2: - 12ff - 13 - Example3: - 12ff - 13000000 - If 2 AccTrie records form "sequence" then it can be consumed without moving StateCursor +Dense Sequence - if between 2 AccTrie records not possible insert any state record - then they form "dense sequence" +If 2 AccTrie records form Dense Sequence - then no reason to iterate over state - just use AccTrie one after another +Example1: + + 1234 + 1235 + +Example2: + + 12ff + 13 + +Example3: + + 12ff + 13000000 + +If 2 AccTrie records form "sequence" then it can be consumed without moving StateCursor */ func isDenseSequence(prev []byte, next []byte) bool { isSequence := false diff --git a/turbo/trie/trie_test.go b/turbo/trie/trie_test.go index f4691cb330a..9cfbf06aedf 100644 --- a/turbo/trie/trie_test.go +++ b/turbo/trie/trie_test.go @@ -73,7 +73,7 @@ func TestLargeValue(t *testing.T) { // TestRandomCases tests som cases that were found via random fuzzing func TestRandomCases(t *testing.T) { - var rt []randTestStep = []randTestStep{ + var rt = []randTestStep{ {op: 6, key: common.Hex2Bytes(""), value: common.Hex2Bytes("")}, // step 0 {op: 6, key: common.Hex2Bytes(""), value: common.Hex2Bytes("")}, // step 1 {op: 0, key: common.Hex2Bytes("d51b182b95d677e5f1c82508c0228de96b73092d78ce78b2230cd948674f66fd1483bd"), value: common.Hex2Bytes("0000000000000002")}, // step 2 diff --git a/turbo/trie/trie_transform.go b/turbo/trie/trie_transform.go index 9aa6d861907..6fba471cca3 100644 --- a/turbo/trie/trie_transform.go +++ b/turbo/trie/trie_transform.go @@ -24,7 +24,7 @@ func transformSubTrie(nd node, hex []byte, newTrie *Trie, transformFunc keyTrans code = make([]byte, len(n.code)) copy(code, n.code) } - _, newTrie.root = newTrie.insert(newTrie.root, transformFunc(hex), &accountNode{accountCopy, nil, true, codeNode(code), n.codeSize}) + _, newTrie.root = newTrie.insert(newTrie.root, transformFunc(hex), &accountNode{accountCopy, nil, true, code, n.codeSize}) aHex := hex if aHex[len(aHex)-1] == 16 { aHex = aHex[:len(aHex)-1] diff --git a/turbo/trie/vtree/verkle_utils.go b/turbo/trie/vtree/verkle_utils.go new file mode 100644 index 00000000000..0eec4c7acda --- /dev/null +++ b/turbo/trie/vtree/verkle_utils.go @@ -0,0 +1,203 @@ +package vtree + +import ( + "github.com/crate-crypto/go-ipa/bandersnatch/fr" + "github.com/gballet/go-verkle" + "github.com/holiman/uint256" +) + +const ( + VersionLeafKey = 0 + BalanceLeafKey = 1 + NonceLeafKey = 2 + CodeKeccakLeafKey = 3 + CodeSizeLeafKey = 4 +) + +var ( + zero = uint256.NewInt(0) + HeaderStorageOffset = uint256.NewInt(64) + CodeOffset = uint256.NewInt(128) + MainStorageOffset = new(uint256.Int).Lsh(uint256.NewInt(256), 31) + VerkleNodeWidth = uint256.NewInt(256) + codeStorageDelta = uint256.NewInt(0).Sub(CodeOffset, HeaderStorageOffset) + + getTreePolyIndex0Point *verkle.Point +) + +func init() { + getTreePolyIndex0Point = new(verkle.Point) + err := getTreePolyIndex0Point.SetBytes([]byte{34, 25, 109, 242, 193, 5, 144, 224, 76, 52, 189, 92, 197, 126, 9, 145, 27, 152, 199, 130, 165, 3, 210, 27, 193, 131, 142, 28, 110, 26, 16, 191}) + if err != nil { + panic(err) + } +} + +// GetTreeKey performs both the work of the spec's get_tree_key function, and that +// of pedersen_hash: it builds the polynomial in pedersen_hash without having to +// create a mostly zero-filled buffer and "type cast" it to a 128-long 16-byte +// array. Since at most the first 5 coefficients of the polynomial will be non-zero, +// these 5 coefficients are created directly. +func GetTreeKey(address []byte, treeIndex *uint256.Int, subIndex byte) []byte { + if len(address) < 32 { + var aligned [32]byte + address = append(aligned[:32-len(address)], address...) + } + var poly [5]fr.Element + + poly[0].SetZero() + + // 32-byte address, interpreted as two little endian + // 16-byte numbers. + verkle.FromLEBytes(&poly[1], address[:16]) + verkle.FromLEBytes(&poly[2], address[16:]) + + // little-endian, 32-byte aligned treeIndex + var index [32]byte + for i, b := range treeIndex.Bytes() { + index[len(treeIndex.Bytes())-1-i] = b + } + verkle.FromLEBytes(&poly[3], index[:16]) + verkle.FromLEBytes(&poly[4], index[16:]) + + cfg, _ := verkle.GetConfig() + ret := cfg.CommitToPoly(poly[:], 0) + + // add a constant point + ret.Add(ret, getTreePolyIndex0Point) + + return PointToHash(ret, subIndex) + +} + +func GetTreeKeyAccountLeaf(address []byte, leaf byte) []byte { + return GetTreeKey(address, zero, leaf) +} + +func GetTreeKeyVersion(address []byte) []byte { + return GetTreeKey(address, zero, VersionLeafKey) +} + +func GetTreeKeyBalance(address []byte) []byte { + return GetTreeKey(address, zero, BalanceLeafKey) +} + +func GetTreeKeyNonce(address []byte) []byte { + return GetTreeKey(address, zero, NonceLeafKey) +} + +func GetTreeKeyCodeKeccak(address []byte) []byte { + return GetTreeKey(address, zero, CodeKeccakLeafKey) +} + +func GetTreeKeyCodeSize(address []byte) []byte { + return GetTreeKey(address, zero, CodeSizeLeafKey) +} + +func GetTreeKeyCodeChunk(address []byte, chunk *uint256.Int) []byte { + chunkOffset := new(uint256.Int).Add(CodeOffset, chunk) + treeIndex := new(uint256.Int).Div(chunkOffset, VerkleNodeWidth) + subIndexMod := new(uint256.Int).Mod(chunkOffset, VerkleNodeWidth).Bytes() + var subIndex byte + if len(subIndexMod) != 0 { + subIndex = subIndexMod[0] + } + return GetTreeKey(address, treeIndex, subIndex) +} + +func GetTreeKeyStorageSlot(address []byte, storageKey *uint256.Int) []byte { + pos := storageKey.Clone() + if storageKey.Cmp(codeStorageDelta) < 0 { + pos.Add(HeaderStorageOffset, storageKey) + } else { + pos.Add(MainStorageOffset, storageKey) + } + treeIndex := new(uint256.Int).Div(pos, VerkleNodeWidth) + + // calculate the sub_index, i.e. the index in the stem tree. + // Because the modulus is 256, it's the last byte of treeIndex + subIndexMod := new(uint256.Int).Mod(pos, VerkleNodeWidth).Bytes() + var subIndex byte + if len(subIndexMod) != 0 { + // uint256 is broken into 4 little-endian quads, + // each with native endianness. Extract the least + // significant byte. + subIndex = subIndexMod[0] & 0xFF + } + return GetTreeKey(address, treeIndex, subIndex) +} + +func PointToHash(evaluated *verkle.Point, suffix byte) []byte { + // The output of Byte() is big engian for banderwagon. This + // introduces an imbalance in the tree, because hashes are + // elements of a 253-bit field. This means more than half the + // tree would be empty. To avoid this problem, use a little + // endian commitment and chop the MSB. + retb := evaluated.Bytes() + for i := 0; i < 16; i++ { + retb[31-i], retb[i] = retb[i], retb[31-i] + } + retb[31] = suffix + return retb[:] +} + +const ( + PUSH1 = byte(0x60) + PUSH3 = byte(0x62) + PUSH4 = byte(0x63) + PUSH7 = byte(0x66) + PUSH21 = byte(0x74) + PUSH30 = byte(0x7d) + PUSH32 = byte(0x7f) +) + +// ChunkifyCode generates the chunked version of an array representing EVM bytecode +func ChunkifyCode(code []byte) []byte { + var ( + chunkOffset = 0 // offset in the chunk + chunkCount = len(code) / 31 + codeOffset = 0 // offset in the code + ) + if len(code)%31 != 0 { + chunkCount++ + } + chunks := make([]byte, chunkCount*32) + for i := 0; i < chunkCount; i++ { + // number of bytes to copy, 31 unless + // the end of the code has been reached. + end := 31 * (i + 1) + if len(code) < end { + end = len(code) + } + + // Copy the code itself + copy(chunks[i*32+1:], code[31*i:end]) + + // chunk offset = taken from the + // last chunk. + if chunkOffset > 31 { + // skip offset calculation if push + // data covers the whole chunk + chunks[i*32] = 31 + chunkOffset = 1 + continue + } + chunks[32*i] = byte(chunkOffset) + chunkOffset = 0 + + // Check each instruction and update the offset + // it should be 0 unless a PUSHn overflows. + for ; codeOffset < end; codeOffset++ { + if code[codeOffset] >= PUSH1 && code[codeOffset] <= PUSH32 { + codeOffset += int(code[codeOffset] - PUSH1 + 1) + if codeOffset+1 >= 31*(i+1) { + codeOffset++ + chunkOffset = codeOffset - 31*(i+1) + break + } + } + } + } + + return chunks +} diff --git a/turbo/trie/vtree/verkle_utils_test.go b/turbo/trie/vtree/verkle_utils_test.go new file mode 100644 index 00000000000..6b029f4b1d1 --- /dev/null +++ b/turbo/trie/vtree/verkle_utils_test.go @@ -0,0 +1,46 @@ +package vtree + +import ( + "crypto/sha256" + "math/big" + "math/rand" + "testing" +) + +func BenchmarkPedersenHash(b *testing.B) { + var addr, v [32]byte + + b.ResetTimer() + b.ReportAllocs() + + for i := 0; i < b.N; i++ { + rand.Read(v[:]) + rand.Read(addr[:]) + GetTreeKeyCodeSize(addr[:]) + } +} + +func sha256GetTreeKeyCodeSize(addr []byte) []byte { + digest := sha256.New() + digest.Write(addr) + treeIndexBytes := new(big.Int).Bytes() + var payload [32]byte + copy(payload[:len(treeIndexBytes)], treeIndexBytes) + digest.Write(payload[:]) + h := digest.Sum(nil) + h[31] = CodeKeccakLeafKey + return h +} + +func BenchmarkSha256Hash(b *testing.B) { + var addr, v [32]byte + + b.ResetTimer() + b.ReportAllocs() + + for i := 0; i < b.N; i++ { + rand.Read(v[:]) + rand.Read(addr[:]) + sha256GetTreeKeyCodeSize(addr[:]) + } +} diff --git a/turbo/trie/witness_builder.go b/turbo/trie/witness_builder.go index a9a23308692..77d050eff48 100644 --- a/turbo/trie/witness_builder.go +++ b/turbo/trie/witness_builder.go @@ -263,7 +263,7 @@ func (b *WitnessBuilder) makeBlockWitness( if err := b.makeBlockWitness(child, expandKeyHex(hex, byte(i)), limiter, false); err != nil { return err } - mask |= (uint32(1) << uint(i)) + mask |= uint32(1) << uint(i) } } return b.addBranchOp(mask) diff --git a/wmake.ps1 b/wmake.ps1 index b66e7842ea9..6e754e1219d 100644 --- a/wmake.ps1 +++ b/wmake.ps1 @@ -20,9 +20,8 @@ Param( [Alias("target")] [ValidateSet( "clean", - "cons", "db-tools", - "devnettest", + "devnet", "downloader", "erigon", "evm", @@ -70,8 +69,7 @@ if ($BuildTargets.Count -gt 1) { if ($BuildTargets[0] -eq "all") { $BuildTargets = @( - "cons", - "devnettest", + "devnet", "downloader", "erigon", "evm",