diff --git a/.github/CODEOWNERS b/.github/CODEOWNERS index 58c1a4a62..2015604e6 100644 --- a/.github/CODEOWNERS +++ b/.github/CODEOWNERS @@ -9,6 +9,7 @@ cmd/puppeth @karalabe consensus @karalabe core/ @karalabe @holiman @rjl493456442 eth/ @karalabe @holiman @rjl493456442 +eth/catalyst/ @gballet graphql/ @gballet les/ @zsfelfoldi @rjl493456442 light/ @zsfelfoldi @rjl493456442 diff --git a/.github/CONTRIBUTING.md b/.github/CONTRIBUTING.md index f87996cdc..a08542df2 100644 --- a/.github/CONTRIBUTING.md +++ b/.github/CONTRIBUTING.md @@ -30,11 +30,11 @@ Please make sure your contributions adhere to our coding guidelines: Before you submit a feature request, please check and make sure that it isn't possible through some other means. The JavaScript-enabled console is a powerful feature in the right hands. Please check our -[Wiki page](https://github.com/ethereum/go-ethereum/wiki) for more info +[Geth documentation page](https://geth.ethereum.org/docs/) for more info and help. ## Configuration, dependencies, and tests -Please see the [Developers' Guide](https://github.com/ethereum/go-ethereum/wiki/Developers'-Guide) +Please see the [Developers' Guide](https://geth.ethereum.org/docs/developers/devguide) for more details on configuring your environment, managing project dependencies and testing procedures. diff --git a/.github/ISSUE_TEMPLATE.md b/.github/ISSUE_TEMPLATE/bug.md similarity index 50% rename from .github/ISSUE_TEMPLATE.md rename to .github/ISSUE_TEMPLATE/bug.md index 59285e456..2aa2c48a6 100644 --- a/.github/ISSUE_TEMPLATE.md +++ b/.github/ISSUE_TEMPLATE/bug.md @@ -1,8 +1,10 @@ -Hi there, - -Please note that this is an issue tracker reserved for bug reports and feature requests. - -For general questions please use [discord](https://discord.gg/nthXNEv) or the Ethereum stack exchange at https://ethereum.stackexchange.com. +--- +name: Report a bug +about: Something with go-ethereum is not working as expected +title: '' +labels: 'type:bug' +assignees: '' +--- #### System information @@ -24,3 +26,5 @@ Commit hash : (if `develop`) ```` [backtrace] ```` + +When submitting logs: please submit them as text and not screenshots. \ No newline at end of file diff --git a/.github/ISSUE_TEMPLATE/feature.md b/.github/ISSUE_TEMPLATE/feature.md new file mode 100644 index 000000000..aacd885f9 --- /dev/null +++ b/.github/ISSUE_TEMPLATE/feature.md @@ -0,0 +1,17 @@ +--- +name: Request a feature +about: Report a missing feature - e.g. as a step before submitting a PR +title: '' +labels: 'type:feature' +assignees: '' +--- + +# Rationale + +Why should this feature exist? +What are the use-cases? + +# Implementation + +Do you have ideas regarding the implementation of this feature? +Are you willing to implement this feature? \ No newline at end of file diff --git a/.github/ISSUE_TEMPLATE/question.md b/.github/ISSUE_TEMPLATE/question.md new file mode 100644 index 000000000..8f460ab55 --- /dev/null +++ b/.github/ISSUE_TEMPLATE/question.md @@ -0,0 +1,9 @@ +--- +name: Ask a question +about: Something is unclear +title: '' +labels: 'type:docs' +assignees: '' +--- + +This should only be used in very rare cases e.g. if you are not 100% sure if something is a bug or asking a question that leads to improving the documentation. For general questions please use [discord](https://discord.gg/nthXNEv) or the Ethereum stack exchange at https://ethereum.stackexchange.com. diff --git a/.travis.yml b/.travis.yml index 245f52b36..a7db711cc 100644 --- a/.travis.yml +++ b/.travis.yml @@ -5,7 +5,7 @@ jobs: allow_failures: - stage: build os: osx - go: 1.14.x + go: 1.15.x env: - azure-osx - azure-ios @@ -15,8 +15,8 @@ jobs: # This builder only tests code linters on latest version of Go - stage: lint os: linux - dist: xenial - go: 1.15.x + dist: bionic + go: 1.16.x env: - lint git: @@ -24,75 +24,48 @@ jobs: script: - go run build/ci.go lint + # These builders create the Docker sub-images for multi-arch push and each + # will attempt to push the multi-arch image if they are the last builder - stage: build - os: linux - dist: xenial - go: 1.13.x - env: - - GO111MODULE=on - script: - - go run build/ci.go install - - go run build/ci.go test -coverage $TEST_PACKAGES - - - stage: build - os: linux - dist: xenial - go: 1.14.x - env: - - GO111MODULE=on - script: - - go run build/ci.go install - - go run build/ci.go test -coverage $TEST_PACKAGES - - # These are the latest Go versions. - - stage: build + if: type = push os: linux arch: amd64 - dist: xenial - go: 1.15.x + dist: bionic + go: 1.16.x env: - - GO111MODULE=on + - docker + services: + - docker + git: + submodules: false # avoid cloning ethereum/tests + before_install: + - export DOCKER_CLI_EXPERIMENTAL=enabled script: - - go run build/ci.go install - - go run build/ci.go test -coverage $TEST_PACKAGES + - go run build/ci.go docker -image -manifest amd64,arm64 -upload ethereum/client-go - stage: build - if: type = pull_request + if: type = push os: linux arch: arm64 - dist: xenial - go: 1.15.x + dist: bionic + go: 1.16.x env: - - GO111MODULE=on - script: - - go run build/ci.go install - - go run build/ci.go test -coverage $TEST_PACKAGES - - - stage: build - os: osx - osx_image: xcode11.3 - go: 1.15.x - env: - - GO111MODULE=on + - docker + services: + - docker + git: + submodules: false # avoid cloning ethereum/tests + before_install: + - export DOCKER_CLI_EXPERIMENTAL=enabled script: - - echo "Increase the maximum number of open file descriptors on macOS" - - NOFILE=20480 - - sudo sysctl -w kern.maxfiles=$NOFILE - - sudo sysctl -w kern.maxfilesperproc=$NOFILE - - sudo launchctl limit maxfiles $NOFILE $NOFILE - - sudo launchctl limit maxfiles - - ulimit -S -n $NOFILE - - ulimit -n - - unset -f cd # workaround for https://github.com/travis-ci/travis-ci/issues/8703 - - go run build/ci.go install - - go run build/ci.go test -coverage $TEST_PACKAGES + - go run build/ci.go docker -image -manifest amd64,arm64 -upload ethereum/client-go # This builder does the Ubuntu PPA upload - stage: build if: type = push os: linux - dist: xenial - go: 1.15.x + dist: bionic + go: 1.16.x env: - ubuntu-ppa - GO111MODULE=on @@ -109,15 +82,15 @@ jobs: - python-paramiko script: - echo '|1|7SiYPr9xl3uctzovOTj4gMwAC1M=|t6ReES75Bo/PxlOPJ6/GsGbTrM0= ssh-rsa AAAAB3NzaC1yc2EAAAABIwAAAQEA0aKz5UTUndYgIGG7dQBV+HaeuEZJ2xPHo2DS2iSKvUL4xNMSAY4UguNW+pX56nAQmZKIZZ8MaEvSj6zMEDiq6HFfn5JcTlM80UwlnyKe8B8p7Nk06PPQLrnmQt5fh0HmEcZx+JU9TZsfCHPnX7MNz4ELfZE6cFsclClrKim3BHUIGq//t93DllB+h4O9LHjEUsQ1Sr63irDLSutkLJD6RXchjROXkNirlcNVHH/jwLWR5RcYilNX7S5bIkK8NlWPjsn/8Ua5O7I9/YoE97PpO6i73DTGLh5H9JN/SITwCKBkgSDWUt61uPK3Y11Gty7o2lWsBjhBUm2Y38CBsoGmBw==' >> ~/.ssh/known_hosts - - go run build/ci.go debsrc -goversion 1.15 -upload ethereum/ethereum -sftp-user geth-ci -signer "Go Ethereum Linux Builder " + - go run build/ci.go debsrc -upload ethereum/ethereum -sftp-user geth-ci -signer "Go Ethereum Linux Builder " # This builder does the Linux Azure uploads - stage: build if: type = push os: linux - dist: xenial + dist: bionic sudo: required - go: 1.15.x + go: 1.16.x env: - azure-linux - GO111MODULE=on @@ -129,32 +102,32 @@ jobs: - gcc-multilib script: # Build for the primary platforms that Trusty can manage - - go run build/ci.go install - - go run build/ci.go archive -type tar -signer LINUX_SIGNING_KEY -upload gethstore/builds - - go run build/ci.go install -arch 386 - - go run build/ci.go archive -arch 386 -type tar -signer LINUX_SIGNING_KEY -upload gethstore/builds + - go run build/ci.go install -dlgo + - go run build/ci.go archive -type tar -signer LINUX_SIGNING_KEY -signify SIGNIFY_KEY -upload gethstore/builds + - go run build/ci.go install -dlgo -arch 386 + - go run build/ci.go archive -arch 386 -type tar -signer LINUX_SIGNING_KEY -signify SIGNIFY_KEY -upload gethstore/builds # Switch over GCC to cross compilation (breaks 386, hence why do it here only) - sudo -E apt-get -yq --no-install-suggests --no-install-recommends --force-yes install gcc-arm-linux-gnueabi libc6-dev-armel-cross gcc-arm-linux-gnueabihf libc6-dev-armhf-cross gcc-aarch64-linux-gnu libc6-dev-arm64-cross - sudo ln -s /usr/include/asm-generic /usr/include/asm - - GOARM=5 go run build/ci.go install -arch arm -cc arm-linux-gnueabi-gcc - - GOARM=5 go run build/ci.go archive -arch arm -type tar -signer LINUX_SIGNING_KEY -upload gethstore/builds - - GOARM=6 go run build/ci.go install -arch arm -cc arm-linux-gnueabi-gcc - - GOARM=6 go run build/ci.go archive -arch arm -type tar -signer LINUX_SIGNING_KEY -upload gethstore/builds - - GOARM=7 go run build/ci.go install -arch arm -cc arm-linux-gnueabihf-gcc - - GOARM=7 go run build/ci.go archive -arch arm -type tar -signer LINUX_SIGNING_KEY -upload gethstore/builds - - go run build/ci.go install -arch arm64 -cc aarch64-linux-gnu-gcc - - go run build/ci.go archive -arch arm64 -type tar -signer LINUX_SIGNING_KEY -upload gethstore/builds + - GOARM=5 go run build/ci.go install -dlgo -arch arm -cc arm-linux-gnueabi-gcc + - GOARM=5 go run build/ci.go archive -arch arm -type tar -signer LINUX_SIGNING_KEY -signify SIGNIFY_KEY -upload gethstore/builds + - GOARM=6 go run build/ci.go install -dlgo -arch arm -cc arm-linux-gnueabi-gcc + - GOARM=6 go run build/ci.go archive -arch arm -type tar -signer LINUX_SIGNING_KEY -signify SIGNIFY_KEY -upload gethstore/builds + - GOARM=7 go run build/ci.go install -dlgo -arch arm -cc arm-linux-gnueabihf-gcc + - GOARM=7 go run build/ci.go archive -arch arm -type tar -signer LINUX_SIGNING_KEY -signify SIGNIFY_KEY -upload gethstore/builds + - go run build/ci.go install -dlgo -arch arm64 -cc aarch64-linux-gnu-gcc + - go run build/ci.go archive -arch arm64 -type tar -signer LINUX_SIGNING_KEY -signify SIGNIFY_KEY -upload gethstore/builds # This builder does the Linux Azure MIPS xgo uploads - stage: build if: type = push os: linux - dist: xenial + dist: bionic services: - docker - go: 1.15.x + go: 1.16.x env: - azure-linux-mips - GO111MODULE=on @@ -163,38 +136,29 @@ jobs: script: - go run build/ci.go xgo --alltools -- --targets=linux/mips --ldflags '-extldflags "-static"' -v - for bin in build/bin/*-linux-mips; do mv -f "${bin}" "${bin/-linux-mips/}"; done - - go run build/ci.go archive -arch mips -type tar -signer LINUX_SIGNING_KEY -upload gethstore/builds + - go run build/ci.go archive -arch mips -type tar -signer LINUX_SIGNING_KEY -signify SIGNIFY_KEY -upload gethstore/builds - go run build/ci.go xgo --alltools -- --targets=linux/mipsle --ldflags '-extldflags "-static"' -v - for bin in build/bin/*-linux-mipsle; do mv -f "${bin}" "${bin/-linux-mipsle/}"; done - - go run build/ci.go archive -arch mipsle -type tar -signer LINUX_SIGNING_KEY -upload gethstore/builds + - go run build/ci.go archive -arch mipsle -type tar -signer LINUX_SIGNING_KEY -signify SIGNIFY_KEY -upload gethstore/builds - go run build/ci.go xgo --alltools -- --targets=linux/mips64 --ldflags '-extldflags "-static"' -v - for bin in build/bin/*-linux-mips64; do mv -f "${bin}" "${bin/-linux-mips64/}"; done - - go run build/ci.go archive -arch mips64 -type tar -signer LINUX_SIGNING_KEY -upload gethstore/builds + - go run build/ci.go archive -arch mips64 -type tar -signer LINUX_SIGNING_KEY signify SIGNIFY_KEY -upload gethstore/builds - go run build/ci.go xgo --alltools -- --targets=linux/mips64le --ldflags '-extldflags "-static"' -v - for bin in build/bin/*-linux-mips64le; do mv -f "${bin}" "${bin/-linux-mips64le/}"; done - - go run build/ci.go archive -arch mips64le -type tar -signer LINUX_SIGNING_KEY -upload gethstore/builds + - go run build/ci.go archive -arch mips64le -type tar -signer LINUX_SIGNING_KEY -signify SIGNIFY_KEY -upload gethstore/builds # This builder does the Android Maven and Azure uploads - stage: build if: type = push os: linux - dist: xenial + dist: bionic addons: apt: packages: - - oracle-java8-installer - - oracle-java8-set-default - language: android - android: - components: - - platform-tools - - tools - - android-15 - - android-19 - - android-24 + - openjdk-8-jdk env: - azure-android - maven-android @@ -202,25 +166,33 @@ jobs: git: submodules: false # avoid cloning ethereum/tests before_install: - - curl https://dl.google.com/go/go1.15.linux-amd64.tar.gz | tar -xz + # Install Android and it's dependencies manually, Travis is stale + - export JAVA_HOME=/usr/lib/jvm/java-8-openjdk-amd64 + - curl https://dl.google.com/android/repository/commandlinetools-linux-6858069_latest.zip -o android.zip + - unzip -q android.zip -d $HOME/sdk && rm android.zip + - mv $HOME/sdk/cmdline-tools $HOME/sdk/latest && mkdir $HOME/sdk/cmdline-tools && mv $HOME/sdk/latest $HOME/sdk/cmdline-tools + - export PATH=$PATH:$HOME/sdk/cmdline-tools/latest/bin + - export ANDROID_HOME=$HOME/sdk + + - yes | sdkmanager --licenses >/dev/null + - sdkmanager "platform-tools" "platforms;android-15" "platforms;android-19" "platforms;android-24" "ndk-bundle" + + # Install Go to allow building with + - curl https://dl.google.com/go/go1.16.linux-amd64.tar.gz | tar -xz - export PATH=`pwd`/go/bin:$PATH - export GOROOT=`pwd`/go - export GOPATH=$HOME/go script: # Build the Android archive and upload it to Maven Central and Azure - - curl https://dl.google.com/android/repository/android-ndk-r19b-linux-x86_64.zip -o android-ndk-r19b.zip - - unzip -q android-ndk-r19b.zip && rm android-ndk-r19b.zip - - mv android-ndk-r19b $ANDROID_HOME/ndk-bundle - - mkdir -p $GOPATH/src/github.com/ethereum - ln -s `pwd` $GOPATH/src/github.com/ethereum/go-ethereum - - go run build/ci.go aar -signer ANDROID_SIGNING_KEY -deploy https://oss.sonatype.org -upload gethstore/builds + - go run build/ci.go aar -signer ANDROID_SIGNING_KEY -signify SIGNIFY_KEY -deploy https://oss.sonatype.org -upload gethstore/builds # This builder does the OSX Azure, iOS CocoaPods and iOS Azure uploads - stage: build if: type = push os: osx - go: 1.15.x + go: 1.16.x env: - azure-osx - azure-ios @@ -229,8 +201,8 @@ jobs: git: submodules: false # avoid cloning ethereum/tests script: - - go run build/ci.go install - - go run build/ci.go archive -type tar -signer OSX_SIGNING_KEY -upload gethstore/builds + - go run build/ci.go install -dlgo + - go run build/ci.go archive -type tar -signer OSX_SIGNING_KEY -signify SIGNIFY_KEY -upload gethstore/builds # Build the iOS framework and upload it to CocoaPods and Azure - gem uninstall cocoapods -a -x @@ -245,14 +217,45 @@ jobs: # Workaround for https://github.com/golang/go/issues/23749 - export CGO_CFLAGS_ALLOW='-fmodules|-fblocks|-fobjc-arc' - - go run build/ci.go xcode -signer IOS_SIGNING_KEY -deploy trunk -upload gethstore/builds + - go run build/ci.go xcode -signer IOS_SIGNING_KEY -signify SIGNIFY_KEY -deploy trunk -upload gethstore/builds + + # These builders run the tests + - stage: build + os: linux + arch: amd64 + dist: bionic + go: 1.16.x + env: + - GO111MODULE=on + script: + - go run build/ci.go test -coverage $TEST_PACKAGES + + - stage: build + if: type = pull_request + os: linux + arch: arm64 + dist: bionic + go: 1.16.x + env: + - GO111MODULE=on + script: + - go run build/ci.go test -coverage $TEST_PACKAGES + + - stage: build + os: linux + dist: bionic + go: 1.15.x + env: + - GO111MODULE=on + script: + - go run build/ci.go test -coverage $TEST_PACKAGES # This builder does the Azure archive purges to avoid accumulating junk - stage: build if: type = cron os: linux - dist: xenial - go: 1.15.x + dist: bionic + go: 1.16.x env: - azure-purge - GO111MODULE=on diff --git a/Dockerfile b/Dockerfile index d86b77661..e76c5765b 100644 --- a/Dockerfile +++ b/Dockerfile @@ -1,10 +1,15 @@ +# Support setting various labels on the final image +ARG COMMIT="" +ARG VERSION="" +ARG BUILDNUM="" + # Build Geth in a stock Go builder container -FROM golang:1.15-alpine as builder +FROM golang:1.16-alpine as builder -RUN apk add --no-cache make gcc musl-dev linux-headers git +RUN apk add --no-cache gcc musl-dev linux-headers git ADD . /go-ethereum -RUN cd /go-ethereum && make geth +RUN cd /go-ethereum && go run build/ci.go install ./cmd/geth # Pull Geth into a second stage deploy alpine container FROM alpine:latest @@ -14,3 +19,10 @@ COPY --from=builder /go-ethereum/build/bin/geth /usr/local/bin/ EXPOSE 8545 8546 30303 30303/udp ENTRYPOINT ["geth"] + +# Add some metadata labels to help programatic image consumption +ARG COMMIT="" +ARG VERSION="" +ARG BUILDNUM="" + +LABEL commit="$COMMIT" version="$VERSION" buildnum="$BUILDNUM" diff --git a/Dockerfile.alltools b/Dockerfile.alltools index 715213c5d..71f63b7a4 100644 --- a/Dockerfile.alltools +++ b/Dockerfile.alltools @@ -1,10 +1,15 @@ +# Support setting various labels on the final image +ARG COMMIT="" +ARG VERSION="" +ARG BUILDNUM="" + # Build Geth in a stock Go builder container -FROM golang:1.15-alpine as builder +FROM golang:1.16-alpine as builder -RUN apk add --no-cache make gcc musl-dev linux-headers git +RUN apk add --no-cache gcc musl-dev linux-headers git ADD . /go-ethereum -RUN cd /go-ethereum && make all +RUN cd /go-ethereum && go run build/ci.go install # Pull all binaries into a second stage deploy alpine container FROM alpine:latest @@ -13,3 +18,10 @@ RUN apk add --no-cache ca-certificates COPY --from=builder /go-ethereum/build/bin/* /usr/local/bin/ EXPOSE 8545 8546 30303 30303/udp + +# Add some metadata labels to help programatic image consumption +ARG COMMIT="" +ARG VERSION="" +ARG BUILDNUM="" + +LABEL commit="$COMMIT" version="$VERSION" buildnum="$BUILDNUM" diff --git a/Makefile b/Makefile index ecee5f189..cb5a87dad 100644 --- a/Makefile +++ b/Makefile @@ -26,7 +26,7 @@ android: @echo "Import \"$(GOBIN)/geth.aar\" to use the library." @echo "Import \"$(GOBIN)/geth-sources.jar\" to add javadocs" @echo "For more info see https://stackoverflow.com/questions/20994336/android-studio-how-to-attach-javadoc" - + ios: $(GORUN) build/ci.go xcode --local @echo "Done building." @@ -46,12 +46,11 @@ clean: # You need to put $GOBIN (or $GOPATH/bin) in your PATH to use 'go generate'. devtools: - env GOBIN= go get -u golang.org/x/tools/cmd/stringer - env GOBIN= go get -u github.com/kevinburke/go-bindata/go-bindata - env GOBIN= go get -u github.com/fjl/gencodec - env GOBIN= go get -u github.com/golang/protobuf/protoc-gen-go + env GOBIN= go install golang.org/x/tools/cmd/stringer@latest + env GOBIN= go install github.com/kevinburke/go-bindata/go-bindata@latest + env GOBIN= go install github.com/fjl/gencodec@latest + env GOBIN= go install github.com/golang/protobuf/protoc-gen-go@latest env GOBIN= go install ./cmd/abigen - @type "npm" 2> /dev/null || echo 'Please install node.js and npm' @type "solc" 2> /dev/null || echo 'Please install solc' @type "protoc" 2> /dev/null || echo 'Please install protoc' diff --git a/SECURITY.md b/SECURITY.md index bc54ede42..bdce7b8d2 100644 --- a/SECURITY.md +++ b/SECURITY.md @@ -2,31 +2,29 @@ ## Supported Versions -Please see Releases. We recommend to use the most recent released version. +Please see [Releases](https://github.com/ethereum/go-ethereum/releases). We recommend using the [most recently released version](https://github.com/ethereum/go-ethereum/releases/latest). ## Audit reports Audit reports are published in the `docs` folder: https://github.com/ethereum/go-ethereum/tree/master/docs/audits - | Scope | Date | Report Link | | ------- | ------- | ----------- | | `geth` | 20170425 | [pdf](https://github.com/ethereum/go-ethereum/blob/master/docs/audits/2017-04-25_Geth-audit_Truesec.pdf) | | `clef` | 20180914 | [pdf](https://github.com/ethereum/go-ethereum/blob/master/docs/audits/2018-09-14_Clef-audit_NCC.pdf) | - - ## Reporting a Vulnerability **Please do not file a public ticket** mentioning the vulnerability. -To find out how to disclose a vulnerability in Ethereum visit [https://bounty.ethereum.org](https://bounty.ethereum.org) or email bounty@ethereum.org. +To find out how to disclose a vulnerability in Ethereum visit [https://bounty.ethereum.org](https://bounty.ethereum.org) or email bounty@ethereum.org. Please read the [disclosure page](https://github.com/ethereum/go-ethereum/security/advisories?state=published) for more information about publically disclosed security vulnerabilities. + +Use the built-in `geth version-check` feature to check whether the software is affected by any known vulnerability. This command will fetch the latest [`vulnerabilities.json`](https://geth.ethereum.org/docs/vulnerabilities/vulnerabilities.json) file which contains known security vulnerabilities concerning `geth`, and cross-check the data against its own version number. The following key may be used to communicate sensitive information to developers. Fingerprint: `AE96 ED96 9E47 9B00 84F3 E17F E88D 3334 FA5F 6A0A` - ``` -----BEGIN PGP PUBLIC KEY BLOCK----- Version: GnuPG v1 diff --git a/accounts/abi/abi_test.go b/accounts/abi/abi_test.go index ad8acdf52..a022ec5f9 100644 --- a/accounts/abi/abi_test.go +++ b/accounts/abi/abi_test.go @@ -43,6 +43,7 @@ const jsondata = ` { "type" : "function", "name" : "uint64[2]", "inputs" : [ { "name" : "inputs", "type" : "uint64[2]" } ] }, { "type" : "function", "name" : "uint64[]", "inputs" : [ { "name" : "inputs", "type" : "uint64[]" } ] }, { "type" : "function", "name" : "int8", "inputs" : [ { "name" : "inputs", "type" : "int8" } ] }, + { "type" : "function", "name" : "bytes32", "inputs" : [ { "name" : "inputs", "type" : "bytes32" } ] }, { "type" : "function", "name" : "foo", "inputs" : [ { "name" : "inputs", "type" : "uint32" } ] }, { "type" : "function", "name" : "bar", "inputs" : [ { "name" : "inputs", "type" : "uint32" }, { "name" : "string", "type" : "uint16" } ] }, { "type" : "function", "name" : "slice", "inputs" : [ { "name" : "inputs", "type" : "uint32[2]" } ] }, @@ -68,6 +69,7 @@ var ( String, _ = NewType("string", "", nil) Bool, _ = NewType("bool", "", nil) Bytes, _ = NewType("bytes", "", nil) + Bytes32, _ = NewType("bytes32", "", nil) Address, _ = NewType("address", "", nil) Uint64Arr, _ = NewType("uint64[]", "", nil) AddressArr, _ = NewType("address[]", "", nil) @@ -98,6 +100,7 @@ var methods = map[string]Method{ "uint64[]": NewMethod("uint64[]", "uint64[]", Function, "", false, false, []Argument{{"inputs", Uint64Arr, false}}, nil), "uint64[2]": NewMethod("uint64[2]", "uint64[2]", Function, "", false, false, []Argument{{"inputs", Uint64Arr2, false}}, nil), "int8": NewMethod("int8", "int8", Function, "", false, false, []Argument{{"inputs", Int8, false}}, nil), + "bytes32": NewMethod("bytes32", "bytes32", Function, "", false, false, []Argument{{"inputs", Bytes32, false}}, nil), "foo": NewMethod("foo", "foo", Function, "", false, false, []Argument{{"inputs", Uint32, false}}, nil), "bar": NewMethod("bar", "bar", Function, "", false, false, []Argument{{"inputs", Uint32, false}, {"string", Uint16, false}}, nil), "slice": NewMethod("slice", "slice", Function, "", false, false, []Argument{{"inputs", Uint32Arr2, false}}, nil), diff --git a/accounts/abi/bind/auth.go b/accounts/abi/bind/auth.go index c891b0a3e..a4307a952 100644 --- a/accounts/abi/bind/auth.go +++ b/accounts/abi/bind/auth.go @@ -17,10 +17,12 @@ package bind import ( + "context" "crypto/ecdsa" "errors" "io" "io/ioutil" + "math/big" "github.com/ethereum/go-ethereum/accounts" "github.com/ethereum/go-ethereum/accounts/external" @@ -28,11 +30,21 @@ import ( "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/crypto" + "github.com/ethereum/go-ethereum/log" ) +// ErrNoChainID is returned whenever the user failed to specify a chain id. +var ErrNoChainID = errors.New("no chain id specified") + +// ErrNotAuthorized is returned when an account is not properly unlocked. +var ErrNotAuthorized = errors.New("not authorized to sign this account") + // NewTransactor is a utility method to easily create a transaction signer from // an encrypted json key stream and the associated passphrase. +// +// Deprecated: Use NewTransactorWithChainID instead. func NewTransactor(keyin io.Reader, passphrase string) (*TransactOpts, error) { + log.Warn("WARNING: NewTransactor has been deprecated in favour of NewTransactorWithChainID") json, err := ioutil.ReadAll(keyin) if err != nil { return nil, err @@ -45,13 +57,17 @@ func NewTransactor(keyin io.Reader, passphrase string) (*TransactOpts, error) { } // NewKeyStoreTransactor is a utility method to easily create a transaction signer from -// a decrypted key from a keystore. +// an decrypted key from a keystore. +// +// Deprecated: Use NewKeyStoreTransactorWithChainID instead. func NewKeyStoreTransactor(keystore *keystore.KeyStore, account accounts.Account) (*TransactOpts, error) { + log.Warn("WARNING: NewKeyStoreTransactor has been deprecated in favour of NewTransactorWithChainID") + signer := types.HomesteadSigner{} return &TransactOpts{ From: account.Address, - Signer: func(signer types.Signer, address common.Address, tx *types.Transaction) (*types.Transaction, error) { + Signer: func(address common.Address, tx *types.Transaction) (*types.Transaction, error) { if address != account.Address { - return nil, errors.New("not authorized to sign this account") + return nil, ErrNotAuthorized } signature, err := keystore.SignHash(account, signer.Hash(tx).Bytes()) if err != nil { @@ -59,18 +75,23 @@ func NewKeyStoreTransactor(keystore *keystore.KeyStore, account accounts.Account } return tx.WithSignature(signer, signature) }, + Context: context.Background(), }, nil } // NewKeyedTransactor is a utility method to easily create a transaction signer // from a single private key. +// +// Deprecated: Use NewKeyedTransactorWithChainID instead. func NewKeyedTransactor(key *ecdsa.PrivateKey) *TransactOpts { + log.Warn("WARNING: NewKeyedTransactor has been deprecated in favour of NewKeyedTransactorWithChainID") keyAddr := crypto.PubkeyToAddress(key.PublicKey) + signer := types.HomesteadSigner{} return &TransactOpts{ From: keyAddr, - Signer: func(signer types.Signer, address common.Address, tx *types.Transaction) (*types.Transaction, error) { + Signer: func(address common.Address, tx *types.Transaction) (*types.Transaction, error) { if address != keyAddr { - return nil, errors.New("not authorized to sign this account") + return nil, ErrNotAuthorized } signature, err := crypto.Sign(signer.Hash(tx).Bytes(), key) if err != nil { @@ -78,7 +99,69 @@ func NewKeyedTransactor(key *ecdsa.PrivateKey) *TransactOpts { } return tx.WithSignature(signer, signature) }, + Context: context.Background(), + } +} + +// NewTransactorWithChainID is a utility method to easily create a transaction signer from +// an encrypted json key stream and the associated passphrase. +func NewTransactorWithChainID(keyin io.Reader, passphrase string, chainID *big.Int) (*TransactOpts, error) { + json, err := ioutil.ReadAll(keyin) + if err != nil { + return nil, err + } + key, err := keystore.DecryptKey(json, passphrase) + if err != nil { + return nil, err + } + return NewKeyedTransactorWithChainID(key.PrivateKey, chainID) +} + +// NewKeyStoreTransactorWithChainID is a utility method to easily create a transaction signer from +// an decrypted key from a keystore. +func NewKeyStoreTransactorWithChainID(keystore *keystore.KeyStore, account accounts.Account, chainID *big.Int) (*TransactOpts, error) { + if chainID == nil { + return nil, ErrNoChainID + } + signer := types.LatestSignerForChainID(chainID) + return &TransactOpts{ + From: account.Address, + Signer: func(address common.Address, tx *types.Transaction) (*types.Transaction, error) { + if address != account.Address { + return nil, ErrNotAuthorized + } + signature, err := keystore.SignHash(account, signer.Hash(tx).Bytes()) + if err != nil { + return nil, err + } + return tx.WithSignature(signer, signature) + }, + Context: context.Background(), + }, nil +} + +// NewKeyedTransactorWithChainID is a utility method to easily create a transaction signer +// from a single private key. +func NewKeyedTransactorWithChainID(key *ecdsa.PrivateKey, chainID *big.Int) (*TransactOpts, error) { + keyAddr := crypto.PubkeyToAddress(key.PublicKey) + if chainID == nil { + return nil, ErrNoChainID } + signer := types.LatestSignerForChainID(chainID) + return &TransactOpts{ + From: keyAddr, + Signer: func(address common.Address, tx *types.Transaction) (*types.Transaction, error) { + if address != keyAddr { + return nil, ErrNotAuthorized + } + signature, err := crypto.Sign(signer.Hash(tx).Bytes(), key) + if err != nil { + return nil, err + } + return tx.WithSignature(signer, signature) + }, + Context: context.Background(), + }, nil } // NewClefTransactor is a utility method to easily create a transaction signer @@ -86,11 +169,12 @@ func NewKeyedTransactor(key *ecdsa.PrivateKey) *TransactOpts { func NewClefTransactor(clef *external.ExternalSigner, account accounts.Account) *TransactOpts { return &TransactOpts{ From: account.Address, - Signer: func(signer types.Signer, address common.Address, transaction *types.Transaction) (*types.Transaction, error) { + Signer: func(address common.Address, transaction *types.Transaction) (*types.Transaction, error) { if address != account.Address { - return nil, errors.New("not authorized to sign this account") + return nil, ErrNotAuthorized } return clef.SignTx(account, transaction, nil) // Clef enforces its own chain id }, + Context: context.Background(), } } diff --git a/accounts/abi/bind/backend.go b/accounts/abi/bind/backend.go index eed6a44bb..c16990f39 100644 --- a/accounts/abi/bind/backend.go +++ b/accounts/abi/bind/backend.go @@ -32,12 +32,12 @@ var ( // have any code associated with it (i.e. suicided). ErrNoCode = errors.New("no contract code at given address") - // This error is raised when attempting to perform a pending state action + // ErrNoPendingState is raised when attempting to perform a pending state action // on a backend that doesn't implement PendingContractCaller. ErrNoPendingState = errors.New("backend does not support pending state") - // This error is returned by WaitDeployed if contract creation leaves an - // empty contract behind. + // ErrNoCodeAfterDeploy is returned by WaitDeployed if contract creation leaves + // an empty contract behind. ErrNoCodeAfterDeploy = errors.New("no contract code after deployment") ) @@ -47,7 +47,8 @@ type ContractCaller interface { // CodeAt returns the code of the given account. This is needed to differentiate // between contract internal errors and the local chain being out of sync. CodeAt(ctx context.Context, contract common.Address, blockNumber *big.Int) ([]byte, error) - // ContractCall executes an Ethereum contract call with the specified data as the + + // CallContract executes an Ethereum contract call with the specified data as the // input. CallContract(ctx context.Context, call ethereum.CallMsg, blockNumber *big.Int) ([]byte, error) } @@ -58,6 +59,7 @@ type ContractCaller interface { type PendingContractCaller interface { // PendingCodeAt returns the code of the given account in the pending state. PendingCodeAt(ctx context.Context, contract common.Address) ([]byte, error) + // PendingCallContract executes an Ethereum contract call against the pending state. PendingCallContract(ctx context.Context, call ethereum.CallMsg) ([]byte, error) } @@ -67,19 +69,31 @@ type PendingContractCaller interface { // used when the user does not provide some needed values, but rather leaves it up // to the transactor to decide. type ContractTransactor interface { + // HeaderByNumber returns a block header from the current canonical chain. If + // number is nil, the latest known header is returned. + HeaderByNumber(ctx context.Context, number *big.Int) (*types.Header, error) + // PendingCodeAt returns the code of the given account in the pending state. PendingCodeAt(ctx context.Context, account common.Address) ([]byte, error) + // PendingNonceAt retrieves the current pending nonce associated with an account. PendingNonceAt(ctx context.Context, account common.Address) (uint64, error) + // SuggestGasPrice retrieves the currently suggested gas price to allow a timely // execution of a transaction. SuggestGasPrice(ctx context.Context) (*big.Int, error) + + // SuggestGasTipCap retrieves the currently suggested 1559 priority fee to allow + // a timely execution of a transaction. + SuggestGasTipCap(ctx context.Context) (*big.Int, error) + // EstimateGas tries to estimate the gas needed to execute a specific // transaction based on the current pending state of the backend blockchain. // There is no guarantee that this is the true gas limit requirement as other // transactions may be added or removed by miners, but it should provide a basis // for setting a reasonable default. EstimateGas(ctx context.Context, call ethereum.CallMsg) (gas uint64, err error) + // SendTransaction injects the transaction into the pending pool for execution. SendTransaction(ctx context.Context, tx *types.Transaction) error } diff --git a/accounts/abi/bind/backends/simulated.go b/accounts/abi/bind/backends/simulated.go index c7efca440..e410522ac 100644 --- a/accounts/abi/bind/backends/simulated.go +++ b/accounts/abi/bind/backends/simulated.go @@ -74,6 +74,7 @@ type SimulatedBackend struct { // NewSimulatedBackendWithDatabase creates a new binding backend based on the given database // and uses a simulated blockchain for testing purposes. +// A simulated backend always uses chainID 1337. func NewSimulatedBackendWithDatabase(database ethdb.Database, alloc core.GenesisAlloc, gasLimit uint64) *SimulatedBackend { genesis := core.Genesis{Config: params.AllEthashProtocolChanges, GasLimit: gasLimit, Alloc: alloc} genesis.MustCommit(database) @@ -85,12 +86,13 @@ func NewSimulatedBackendWithDatabase(database ethdb.Database, alloc core.Genesis config: genesis.Config, events: filters.NewEventSystem(&filterBackend{database, blockchain}, false), } - backend.rollback() + backend.rollback(blockchain.CurrentBlock()) return backend } // NewSimulatedBackend creates a new binding backend using a simulated blockchain // for testing purposes. +// A simulated backend always uses chainID 1337. func NewSimulatedBackend(alloc core.GenesisAlloc, gasLimit uint64) *SimulatedBackend { return NewSimulatedBackendWithDatabase(rawdb.NewMemoryDatabase(), alloc, gasLimit) } @@ -110,7 +112,9 @@ func (b *SimulatedBackend) Commit() { if _, err := b.blockchain.InsertChain([]*types.Block{b.pendingBlock}); err != nil { panic(err) // This cannot happen unless the simulator is wrong, fail in that case } - b.rollback() + // Using the last inserted block here makes it possible to build on a side + // chain after a fork. + b.rollback(b.pendingBlock) } // Rollback aborts all pending transactions, reverting to the last committed state. @@ -118,15 +122,41 @@ func (b *SimulatedBackend) Rollback() { b.mu.Lock() defer b.mu.Unlock() - b.rollback() + b.rollback(b.blockchain.CurrentBlock()) } -func (b *SimulatedBackend) rollback() { - blocks, _ := core.GenerateChain(b.config, b.blockchain.CurrentBlock(), ethash.NewFaker(), b.database, 1, func(int, *core.BlockGen) {}) - stateDB, _ := b.blockchain.State() +func (b *SimulatedBackend) rollback(parent *types.Block) { + blocks, _ := core.GenerateChain(b.config, parent, ethash.NewFaker(), b.database, 1, func(int, *core.BlockGen) {}) b.pendingBlock = blocks[0] - b.pendingState, _ = state.New(b.pendingBlock.Root(), stateDB.Database(), nil) + b.pendingState, _ = state.New(b.pendingBlock.Root(), b.blockchain.StateCache(), nil) +} + +// Fork creates a side-chain that can be used to simulate reorgs. +// +// This function should be called with the ancestor block where the new side +// chain should be started. Transactions (old and new) can then be applied on +// top and Commit-ed. +// +// Note, the side-chain will only become canonical (and trigger the events) when +// it becomes longer. Until then CallContract will still operate on the current +// canonical chain. +// +// There is a % chance that the side chain becomes canonical at the same length +// to simulate live network behavior. +func (b *SimulatedBackend) Fork(ctx context.Context, parent common.Hash) error { + b.mu.Lock() + defer b.mu.Unlock() + + if len(b.pendingBlock.Transactions()) != 0 { + return errors.New("pending block dirty") + } + block, err := b.blockByHash(ctx, parent) + if err != nil { + return err + } + b.rollback(block) + return nil } // stateByBlockNumber retrieves a state by a given blocknumber. @@ -134,7 +164,7 @@ func (b *SimulatedBackend) stateByBlockNumber(ctx context.Context, blockNumber * if blockNumber == nil || blockNumber.Cmp(b.blockchain.CurrentBlock().Number()) == 0 { return b.blockchain.State() } - block, err := b.blockByNumberNoLock(ctx, blockNumber) + block, err := b.blockByNumber(ctx, blockNumber) if err != nil { return nil, err } @@ -227,6 +257,11 @@ func (b *SimulatedBackend) BlockByHash(ctx context.Context, hash common.Hash) (* b.mu.Lock() defer b.mu.Unlock() + return b.blockByHash(ctx, hash) +} + +// blockByHash retrieves a block based on the block hash without Locking. +func (b *SimulatedBackend) blockByHash(ctx context.Context, hash common.Hash) (*types.Block, error) { if hash == b.pendingBlock.Hash() { return b.pendingBlock, nil } @@ -245,12 +280,12 @@ func (b *SimulatedBackend) BlockByNumber(ctx context.Context, number *big.Int) ( b.mu.Lock() defer b.mu.Unlock() - return b.blockByNumberNoLock(ctx, number) + return b.blockByNumber(ctx, number) } -// blockByNumberNoLock retrieves a block from the database by number, caching it +// blockByNumber retrieves a block from the database by number, caching it // (associated with its hash) if found without Lock. -func (b *SimulatedBackend) blockByNumberNoLock(ctx context.Context, number *big.Int) (*types.Block, error) { +func (b *SimulatedBackend) blockByNumber(ctx context.Context, number *big.Int) (*types.Block, error) { if number == nil || number.Cmp(b.pendingBlock.Number()) == 0 { return b.blockchain.CurrentBlock(), nil } @@ -430,6 +465,12 @@ func (b *SimulatedBackend) SuggestGasPrice(ctx context.Context) (*big.Int, error return big.NewInt(1), nil } +// SuggestGasTipCap implements ContractTransactor.SuggestGasTipCap. Since the simulated +// chain doesn't have miners, we just return a gas tip of 1 for any call. +func (b *SimulatedBackend) SuggestGasTipCap(ctx context.Context) (*big.Int, error) { + return big.NewInt(1), nil +} + // EstimateGas executes the requested code against the currently pending block/state and // returns the used amount of gas. func (b *SimulatedBackend) EstimateGas(ctx context.Context, call ethereum.CallMsg) (uint64, error) { @@ -447,8 +488,19 @@ func (b *SimulatedBackend) EstimateGas(ctx context.Context, call ethereum.CallMs } else { hi = b.pendingBlock.GasLimit() } + // Normalize the max fee per gas the call is willing to spend. + var feeCap *big.Int + if call.GasPrice != nil && (call.GasFeeCap != nil || call.GasTipCap != nil) { + return 0, errors.New("both gasPrice and (maxFeePerGas or maxPriorityFeePerGas) specified") + } else if call.GasPrice != nil { + feeCap = call.GasPrice + } else if call.GasFeeCap != nil { + feeCap = call.GasFeeCap + } else { + feeCap = common.Big0 + } // Recap the highest gas allowance with account's balance. - if call.GasPrice != nil && call.GasPrice.BitLen() != 0 { + if feeCap.BitLen() != 0 { balance := b.pendingState.GetBalance(call.From) // from can't be nil available := new(big.Int).Set(balance) if call.Value != nil { @@ -457,14 +509,14 @@ func (b *SimulatedBackend) EstimateGas(ctx context.Context, call ethereum.CallMs } available.Sub(available, call.Value) } - allowance := new(big.Int).Div(available, call.GasPrice) + allowance := new(big.Int).Div(available, feeCap) if allowance.IsUint64() && hi > allowance.Uint64() { transfer := call.Value if transfer == nil { transfer = new(big.Int) } log.Warn("Gas estimation capped by limited funds", "original", hi, "balance", balance, - "sent", transfer, "gasprice", call.GasPrice, "fundable", allowance) + "sent", transfer, "feecap", feeCap, "fundable", allowance) hi = allowance.Uint64() } } @@ -479,7 +531,7 @@ func (b *SimulatedBackend) EstimateGas(ctx context.Context, call ethereum.CallMs b.pendingState.RevertToSnapshot(snapshot) if err != nil { - if err == core.ErrIntrinsicGas { + if errors.Is(err, core.ErrIntrinsicGas) { return true, nil, nil // Special case, raise gas limit } return true, nil, err // Bail out @@ -526,10 +578,38 @@ func (b *SimulatedBackend) EstimateGas(ctx context.Context, call ethereum.CallMs // callContract implements common code between normal and pending contract calls. // state is modified during execution, make sure to copy it if necessary. func (b *SimulatedBackend) callContract(ctx context.Context, call ethereum.CallMsg, block *types.Block, stateDB *state.StateDB) (*core.ExecutionResult, error) { - // Ensure message is initialized properly. - if call.GasPrice == nil { - call.GasPrice = big.NewInt(1) + // Gas prices post 1559 need to be initialized + if call.GasPrice != nil && (call.GasFeeCap != nil || call.GasTipCap != nil) { + return nil, errors.New("both gasPrice and (maxFeePerGas or maxPriorityFeePerGas) specified") + } + head := b.blockchain.CurrentHeader() + if !b.blockchain.Config().IsLondon(head.Number) { + // If there's no basefee, then it must be a non-1559 execution + if call.GasPrice == nil { + call.GasPrice = new(big.Int) + } + call.GasFeeCap, call.GasTipCap = call.GasPrice, call.GasPrice + } else { + // A basefee is provided, necessitating 1559-type execution + if call.GasPrice != nil { + // User specified the legacy gas field, convert to 1559 gas typing + call.GasFeeCap, call.GasTipCap = call.GasPrice, call.GasPrice + } else { + // User specified 1559 gas feilds (or none), use those + if call.GasFeeCap == nil { + call.GasFeeCap = new(big.Int) + } + if call.GasTipCap == nil { + call.GasTipCap = new(big.Int) + } + // Backfill the legacy gasPrice for EVM execution, unless we're all zeroes + call.GasPrice = new(big.Int) + if call.GasFeeCap.BitLen() > 0 || call.GasTipCap.BitLen() > 0 { + call.GasPrice = math.BigMin(new(big.Int).Add(call.GasTipCap, head.BaseFee), call.GasFeeCap) + } + } } + // Ensure message is initialized properly. if call.Gas == 0 { call.Gas = 50000000 } @@ -542,10 +622,11 @@ func (b *SimulatedBackend) callContract(ctx context.Context, call ethereum.CallM // Execute the call. msg := callMsg{call} - evmContext := core.NewEVMContext(msg, block.Header(), b.blockchain, nil) + txContext := core.NewEVMTxContext(msg) + evmContext := core.NewEVMBlockContext(block.Header(), b.blockchain, nil) // Create a new environment which holds all relevant information // about the transaction and calling mechanisms. - vmEnv := vm.NewEVM(evmContext, stateDB, b.config, vm.Config{}) + vmEnv := vm.NewEVM(evmContext, txContext, stateDB, b.config, vm.Config{NoBaseFee: true}) gasPool := new(core.GasPool).AddGas(math.MaxUint64) return core.NewStateTransition(vmEnv, msg, gasPool).TransitionDb() @@ -557,7 +638,14 @@ func (b *SimulatedBackend) SendTransaction(ctx context.Context, tx *types.Transa b.mu.Lock() defer b.mu.Unlock() - sender, err := types.Sender(types.NewEIP155Signer(b.config.ChainID), tx) + // Get the last block + block, err := b.blockByHash(ctx, b.pendingBlock.ParentHash()) + if err != nil { + panic("could not fetch parent") + } + // Check transaction validity + signer := types.MakeSigner(b.blockchain.Config(), block.Number()) + sender, err := types.Sender(signer, tx) if err != nil { panic(fmt.Errorf("invalid transaction: %v", err)) } @@ -565,8 +653,8 @@ func (b *SimulatedBackend) SendTransaction(ctx context.Context, tx *types.Transa if tx.Nonce() != nonce { panic(fmt.Errorf("invalid transaction nonce: got %d, want %d", tx.Nonce(), nonce)) } - - blocks, _ := core.GenerateChain(b.config, b.blockchain.CurrentBlock(), ethash.NewFaker(), b.database, 1, func(number int, block *core.BlockGen) { + // Include tx in chain + blocks, _ := core.GenerateChain(b.config, block, ethash.NewFaker(), b.database, 1, func(number int, block *core.BlockGen) { for _, tx := range b.pendingBlock.Transactions() { block.AddTxWithChain(b.blockchain, tx) } @@ -705,14 +793,17 @@ type callMsg struct { ethereum.CallMsg } -func (m callMsg) From() common.Address { return m.CallMsg.From } -func (m callMsg) Nonce() uint64 { return 0 } -func (m callMsg) CheckNonce() bool { return false } -func (m callMsg) To() *common.Address { return m.CallMsg.To } -func (m callMsg) GasPrice() *big.Int { return m.CallMsg.GasPrice } -func (m callMsg) Gas() uint64 { return m.CallMsg.Gas } -func (m callMsg) Value() *big.Int { return m.CallMsg.Value } -func (m callMsg) Data() []byte { return m.CallMsg.Data } +func (m callMsg) From() common.Address { return m.CallMsg.From } +func (m callMsg) Nonce() uint64 { return 0 } +func (m callMsg) IsFake() bool { return true } +func (m callMsg) To() *common.Address { return m.CallMsg.To } +func (m callMsg) GasPrice() *big.Int { return m.CallMsg.GasPrice } +func (m callMsg) GasFeeCap() *big.Int { return m.CallMsg.GasFeeCap } +func (m callMsg) GasTipCap() *big.Int { return m.CallMsg.GasTipCap } +func (m callMsg) Gas() uint64 { return m.CallMsg.Gas } +func (m callMsg) Value() *big.Int { return m.CallMsg.Value } +func (m callMsg) Data() []byte { return m.CallMsg.Data } +func (m callMsg) AccessList() types.AccessList { return m.CallMsg.AccessList } // filterBackend implements filters.Backend to support filtering for logs without // taking bloom-bits acceleration structures into account. diff --git a/accounts/abi/bind/backends/simulated_test.go b/accounts/abi/bind/backends/simulated_test.go index e2597cca0..613267810 100644 --- a/accounts/abi/bind/backends/simulated_test.go +++ b/accounts/abi/bind/backends/simulated_test.go @@ -21,6 +21,7 @@ import ( "context" "errors" "math/big" + "math/rand" "reflect" "strings" "testing" @@ -39,7 +40,7 @@ import ( func TestSimulatedBackend(t *testing.T) { var gasLimit uint64 = 8000029 key, _ := crypto.GenerateKey() // nolint: gosec - auth := bind.NewKeyedTransactor(key) + auth, _ := bind.NewKeyedTransactorWithChainID(key, big.NewInt(1337)) genAlloc := make(core.GenesisAlloc) genAlloc[auth.From] = core.GenesisAccount{Balance: big.NewInt(9223372036854775807)} @@ -58,9 +59,12 @@ func TestSimulatedBackend(t *testing.T) { } // generate a transaction and confirm you can retrieve it + head, _ := sim.HeaderByNumber(context.Background(), nil) // Should be child's, good enough + gasPrice := new(big.Int).Add(head.BaseFee, big.NewInt(1)) + code := `6060604052600a8060106000396000f360606040526008565b00` var gas uint64 = 3000000 - tx := types.NewContractCreation(0, big.NewInt(0), gas, big.NewInt(1), common.FromHex(code)) + tx := types.NewContractCreation(0, big.NewInt(0), gas, gasPrice, common.FromHex(code)) tx, _ = types.SignTx(tx, types.HomesteadSigner{}, key) err = sim.SendTransaction(context.Background(), tx) @@ -110,14 +114,14 @@ var expectedReturn = []byte{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 func simTestBackend(testAddr common.Address) *SimulatedBackend { return NewSimulatedBackend( core.GenesisAlloc{ - testAddr: {Balance: big.NewInt(10000000000)}, + testAddr: {Balance: big.NewInt(10000000000000000)}, }, 10000000, ) } func TestNewSimulatedBackend(t *testing.T) { testAddr := crypto.PubkeyToAddress(testKey.PublicKey) - expectedBal := big.NewInt(10000000000) + expectedBal := big.NewInt(10000000000000000) sim := simTestBackend(testAddr) defer sim.Close() @@ -136,7 +140,7 @@ func TestNewSimulatedBackend(t *testing.T) { } } -func TestSimulatedBackend_AdjustTime(t *testing.T) { +func TestAdjustTime(t *testing.T) { sim := NewSimulatedBackend( core.GenesisAlloc{}, 10000000, ) @@ -153,11 +157,15 @@ func TestSimulatedBackend_AdjustTime(t *testing.T) { } } -func TestNewSimulatedBackend_AdjustTimeFail(t *testing.T) { +func TestNewAdjustTimeFail(t *testing.T) { testAddr := crypto.PubkeyToAddress(testKey.PublicKey) sim := simTestBackend(testAddr) + // Create tx and send - tx := types.NewTransaction(0, testAddr, big.NewInt(1000), params.TxGas, big.NewInt(1), nil) + head, _ := sim.HeaderByNumber(context.Background(), nil) // Should be child's, good enough + gasPrice := new(big.Int).Add(head.BaseFee, big.NewInt(1)) + + tx := types.NewTransaction(0, testAddr, big.NewInt(1000), params.TxGas, gasPrice, nil) signedTx, err := types.SignTx(tx, types.HomesteadSigner{}, testKey) if err != nil { t.Errorf("could not sign tx: %v", err) @@ -178,7 +186,7 @@ func TestNewSimulatedBackend_AdjustTimeFail(t *testing.T) { t.Errorf("adjusted time not equal to a minute. prev: %v, new: %v", prevTime, newTime) } // Put a transaction after adjusting time - tx2 := types.NewTransaction(1, testAddr, big.NewInt(1000), params.TxGas, big.NewInt(1), nil) + tx2 := types.NewTransaction(1, testAddr, big.NewInt(1000), params.TxGas, gasPrice, nil) signedTx2, err := types.SignTx(tx2, types.HomesteadSigner{}, testKey) if err != nil { t.Errorf("could not sign tx: %v", err) @@ -191,9 +199,9 @@ func TestNewSimulatedBackend_AdjustTimeFail(t *testing.T) { } } -func TestSimulatedBackend_BalanceAt(t *testing.T) { +func TestBalanceAt(t *testing.T) { testAddr := crypto.PubkeyToAddress(testKey.PublicKey) - expectedBal := big.NewInt(10000000000) + expectedBal := big.NewInt(10000000000000000) sim := simTestBackend(testAddr) defer sim.Close() bgCtx := context.Background() @@ -208,7 +216,7 @@ func TestSimulatedBackend_BalanceAt(t *testing.T) { } } -func TestSimulatedBackend_BlockByHash(t *testing.T) { +func TestBlockByHash(t *testing.T) { sim := NewSimulatedBackend( core.GenesisAlloc{}, 10000000, ) @@ -229,7 +237,7 @@ func TestSimulatedBackend_BlockByHash(t *testing.T) { } } -func TestSimulatedBackend_BlockByNumber(t *testing.T) { +func TestBlockByNumber(t *testing.T) { sim := NewSimulatedBackend( core.GenesisAlloc{}, 10000000, ) @@ -264,7 +272,7 @@ func TestSimulatedBackend_BlockByNumber(t *testing.T) { } } -func TestSimulatedBackend_NonceAt(t *testing.T) { +func TestNonceAt(t *testing.T) { testAddr := crypto.PubkeyToAddress(testKey.PublicKey) sim := simTestBackend(testAddr) @@ -281,7 +289,10 @@ func TestSimulatedBackend_NonceAt(t *testing.T) { } // create a signed transaction to send - tx := types.NewTransaction(nonce, testAddr, big.NewInt(1000), params.TxGas, big.NewInt(1), nil) + head, _ := sim.HeaderByNumber(context.Background(), nil) // Should be child's, good enough + gasPrice := new(big.Int).Add(head.BaseFee, big.NewInt(1)) + + tx := types.NewTransaction(nonce, testAddr, big.NewInt(1000), params.TxGas, gasPrice, nil) signedTx, err := types.SignTx(tx, types.HomesteadSigner{}, testKey) if err != nil { t.Errorf("could not sign tx: %v", err) @@ -314,7 +325,7 @@ func TestSimulatedBackend_NonceAt(t *testing.T) { } } -func TestSimulatedBackend_SendTransaction(t *testing.T) { +func TestSendTransaction(t *testing.T) { testAddr := crypto.PubkeyToAddress(testKey.PublicKey) sim := simTestBackend(testAddr) @@ -322,7 +333,10 @@ func TestSimulatedBackend_SendTransaction(t *testing.T) { bgCtx := context.Background() // create a signed transaction to send - tx := types.NewTransaction(uint64(0), testAddr, big.NewInt(1000), params.TxGas, big.NewInt(1), nil) + head, _ := sim.HeaderByNumber(context.Background(), nil) // Should be child's, good enough + gasPrice := new(big.Int).Add(head.BaseFee, big.NewInt(1)) + + tx := types.NewTransaction(uint64(0), testAddr, big.NewInt(1000), params.TxGas, gasPrice, nil) signedTx, err := types.SignTx(tx, types.HomesteadSigner{}, testKey) if err != nil { t.Errorf("could not sign tx: %v", err) @@ -345,19 +359,22 @@ func TestSimulatedBackend_SendTransaction(t *testing.T) { } } -func TestSimulatedBackend_TransactionByHash(t *testing.T) { +func TestTransactionByHash(t *testing.T) { testAddr := crypto.PubkeyToAddress(testKey.PublicKey) sim := NewSimulatedBackend( core.GenesisAlloc{ - testAddr: {Balance: big.NewInt(10000000000)}, + testAddr: {Balance: big.NewInt(10000000000000000)}, }, 10000000, ) defer sim.Close() bgCtx := context.Background() // create a signed transaction to send - tx := types.NewTransaction(uint64(0), testAddr, big.NewInt(1000), params.TxGas, big.NewInt(1), nil) + head, _ := sim.HeaderByNumber(context.Background(), nil) // Should be child's, good enough + gasPrice := new(big.Int).Add(head.BaseFee, big.NewInt(1)) + + tx := types.NewTransaction(uint64(0), testAddr, big.NewInt(1000), params.TxGas, gasPrice, nil) signedTx, err := types.SignTx(tx, types.HomesteadSigner{}, testKey) if err != nil { t.Errorf("could not sign tx: %v", err) @@ -396,7 +413,7 @@ func TestSimulatedBackend_TransactionByHash(t *testing.T) { } } -func TestSimulatedBackend_EstimateGas(t *testing.T) { +func TestEstimateGas(t *testing.T) { /* pragma solidity ^0.6.4; contract GasEstimation { @@ -411,7 +428,7 @@ func TestSimulatedBackend_EstimateGas(t *testing.T) { key, _ := crypto.GenerateKey() addr := crypto.PubkeyToAddress(key.PublicKey) - opts := bind.NewKeyedTransactor(key) + opts, _ := bind.NewKeyedTransactorWithChainID(key, big.NewInt(1337)) sim := NewSimulatedBackend(core.GenesisAlloc{addr: {Balance: big.NewInt(params.Ether)}}, 10000000) defer sim.Close() @@ -514,7 +531,7 @@ func TestSimulatedBackend_EstimateGas(t *testing.T) { } } -func TestSimulatedBackend_EstimateGasWithPrice(t *testing.T) { +func TestEstimateGasWithPrice(t *testing.T) { key, _ := crypto.GenerateKey() addr := crypto.PubkeyToAddress(key.PublicKey) @@ -533,7 +550,7 @@ func TestSimulatedBackend_EstimateGasWithPrice(t *testing.T) { To: &recipient, Gas: 0, GasPrice: big.NewInt(0), - Value: big.NewInt(1000), + Value: big.NewInt(100000000000), Data: nil, }, 21000, nil}, @@ -541,8 +558,8 @@ func TestSimulatedBackend_EstimateGasWithPrice(t *testing.T) { From: addr, To: &recipient, Gas: 0, - GasPrice: big.NewInt(1000), - Value: big.NewInt(1000), + GasPrice: big.NewInt(100000000000), + Value: big.NewInt(100000000000), Data: nil, }, 21000, nil}, @@ -560,28 +577,51 @@ func TestSimulatedBackend_EstimateGasWithPrice(t *testing.T) { To: &recipient, Gas: 0, GasPrice: big.NewInt(2e14), // gascost = 4.2ether - Value: big.NewInt(1000), + Value: big.NewInt(100000000000), Data: nil, }, 21000, errors.New("gas required exceeds allowance (10999)")}, // 10999=(2.2ether-1000wei)/(2e14) - } - for _, c := range cases { + + {"EstimateEIP1559WithHighFees", ethereum.CallMsg{ + From: addr, + To: &addr, + Gas: 0, + GasFeeCap: big.NewInt(1e14), // maxgascost = 2.1ether + GasTipCap: big.NewInt(1), + Value: big.NewInt(1e17), // the remaining balance for fee is 2.1ether + Data: nil, + }, params.TxGas, nil}, + + {"EstimateEIP1559WithSuperHighFees", ethereum.CallMsg{ + From: addr, + To: &addr, + Gas: 0, + GasFeeCap: big.NewInt(1e14), // maxgascost = 2.1ether + GasTipCap: big.NewInt(1), + Value: big.NewInt(1e17 + 1), // the remaining balance for fee is 2.1ether + Data: nil, + }, params.TxGas, errors.New("gas required exceeds allowance (20999)")}, // 20999=(2.2ether-0.1ether-1wei)/(1e14) + } + for i, c := range cases { got, err := sim.EstimateGas(context.Background(), c.message) if c.expectError != nil { if err == nil { - t.Fatalf("Expect error, got nil") + t.Fatalf("test %d: expect error, got nil", i) } if c.expectError.Error() != err.Error() { - t.Fatalf("Expect error, want %v, got %v", c.expectError, err) + t.Fatalf("test %d: expect error, want %v, got %v", i, c.expectError, err) } continue } + if c.expectError == nil && err != nil { + t.Fatalf("test %d: didn't expect error, got %v", i, err) + } if got != c.expect { - t.Fatalf("Gas estimation mismatch, want %d, got %d", c.expect, got) + t.Fatalf("test %d: gas estimation mismatch, want %d, got %d", i, c.expect, got) } } } -func TestSimulatedBackend_HeaderByHash(t *testing.T) { +func TestHeaderByHash(t *testing.T) { testAddr := crypto.PubkeyToAddress(testKey.PublicKey) sim := simTestBackend(testAddr) @@ -602,7 +642,7 @@ func TestSimulatedBackend_HeaderByHash(t *testing.T) { } } -func TestSimulatedBackend_HeaderByNumber(t *testing.T) { +func TestHeaderByNumber(t *testing.T) { testAddr := crypto.PubkeyToAddress(testKey.PublicKey) sim := simTestBackend(testAddr) @@ -649,7 +689,7 @@ func TestSimulatedBackend_HeaderByNumber(t *testing.T) { } } -func TestSimulatedBackend_TransactionCount(t *testing.T) { +func TestTransactionCount(t *testing.T) { testAddr := crypto.PubkeyToAddress(testKey.PublicKey) sim := simTestBackend(testAddr) @@ -668,9 +708,11 @@ func TestSimulatedBackend_TransactionCount(t *testing.T) { if count != 0 { t.Errorf("expected transaction count of %v does not match actual count of %v", 0, count) } - // create a signed transaction to send - tx := types.NewTransaction(uint64(0), testAddr, big.NewInt(1000), params.TxGas, big.NewInt(1), nil) + head, _ := sim.HeaderByNumber(context.Background(), nil) // Should be child's, good enough + gasPrice := new(big.Int).Add(head.BaseFee, big.NewInt(1)) + + tx := types.NewTransaction(uint64(0), testAddr, big.NewInt(1000), params.TxGas, gasPrice, nil) signedTx, err := types.SignTx(tx, types.HomesteadSigner{}, testKey) if err != nil { t.Errorf("could not sign tx: %v", err) @@ -699,7 +741,7 @@ func TestSimulatedBackend_TransactionCount(t *testing.T) { } } -func TestSimulatedBackend_TransactionInBlock(t *testing.T) { +func TestTransactionInBlock(t *testing.T) { testAddr := crypto.PubkeyToAddress(testKey.PublicKey) sim := simTestBackend(testAddr) @@ -723,9 +765,11 @@ func TestSimulatedBackend_TransactionInBlock(t *testing.T) { if pendingNonce != uint64(0) { t.Errorf("expected pending nonce of 0 got %v", pendingNonce) } - // create a signed transaction to send - tx := types.NewTransaction(uint64(0), testAddr, big.NewInt(1000), params.TxGas, big.NewInt(1), nil) + head, _ := sim.HeaderByNumber(context.Background(), nil) // Should be child's, good enough + gasPrice := new(big.Int).Add(head.BaseFee, big.NewInt(1)) + + tx := types.NewTransaction(uint64(0), testAddr, big.NewInt(1000), params.TxGas, gasPrice, nil) signedTx, err := types.SignTx(tx, types.HomesteadSigner{}, testKey) if err != nil { t.Errorf("could not sign tx: %v", err) @@ -762,7 +806,7 @@ func TestSimulatedBackend_TransactionInBlock(t *testing.T) { } } -func TestSimulatedBackend_PendingNonceAt(t *testing.T) { +func TestPendingNonceAt(t *testing.T) { testAddr := crypto.PubkeyToAddress(testKey.PublicKey) sim := simTestBackend(testAddr) @@ -780,7 +824,10 @@ func TestSimulatedBackend_PendingNonceAt(t *testing.T) { } // create a signed transaction to send - tx := types.NewTransaction(uint64(0), testAddr, big.NewInt(1000), params.TxGas, big.NewInt(1), nil) + head, _ := sim.HeaderByNumber(context.Background(), nil) // Should be child's, good enough + gasPrice := new(big.Int).Add(head.BaseFee, big.NewInt(1)) + + tx := types.NewTransaction(uint64(0), testAddr, big.NewInt(1000), params.TxGas, gasPrice, nil) signedTx, err := types.SignTx(tx, types.HomesteadSigner{}, testKey) if err != nil { t.Errorf("could not sign tx: %v", err) @@ -803,7 +850,7 @@ func TestSimulatedBackend_PendingNonceAt(t *testing.T) { } // make a new transaction with a nonce of 1 - tx = types.NewTransaction(uint64(1), testAddr, big.NewInt(1000), params.TxGas, big.NewInt(1), nil) + tx = types.NewTransaction(uint64(1), testAddr, big.NewInt(1000), params.TxGas, gasPrice, nil) signedTx, err = types.SignTx(tx, types.HomesteadSigner{}, testKey) if err != nil { t.Errorf("could not sign tx: %v", err) @@ -824,7 +871,7 @@ func TestSimulatedBackend_PendingNonceAt(t *testing.T) { } } -func TestSimulatedBackend_TransactionReceipt(t *testing.T) { +func TestTransactionReceipt(t *testing.T) { testAddr := crypto.PubkeyToAddress(testKey.PublicKey) sim := simTestBackend(testAddr) @@ -832,7 +879,10 @@ func TestSimulatedBackend_TransactionReceipt(t *testing.T) { bgCtx := context.Background() // create a signed transaction to send - tx := types.NewTransaction(uint64(0), testAddr, big.NewInt(1000), params.TxGas, big.NewInt(1), nil) + head, _ := sim.HeaderByNumber(context.Background(), nil) // Should be child's, good enough + gasPrice := new(big.Int).Add(head.BaseFee, big.NewInt(1)) + + tx := types.NewTransaction(uint64(0), testAddr, big.NewInt(1000), params.TxGas, gasPrice, nil) signedTx, err := types.SignTx(tx, types.HomesteadSigner{}, testKey) if err != nil { t.Errorf("could not sign tx: %v", err) @@ -855,7 +905,7 @@ func TestSimulatedBackend_TransactionReceipt(t *testing.T) { } } -func TestSimulatedBackend_SuggestGasPrice(t *testing.T) { +func TestSuggestGasPrice(t *testing.T) { sim := NewSimulatedBackend( core.GenesisAlloc{}, 10000000, @@ -871,7 +921,7 @@ func TestSimulatedBackend_SuggestGasPrice(t *testing.T) { } } -func TestSimulatedBackend_PendingCodeAt(t *testing.T) { +func TestPendingCodeAt(t *testing.T) { testAddr := crypto.PubkeyToAddress(testKey.PublicKey) sim := simTestBackend(testAddr) defer sim.Close() @@ -888,7 +938,7 @@ func TestSimulatedBackend_PendingCodeAt(t *testing.T) { if err != nil { t.Errorf("could not get code at test addr: %v", err) } - auth := bind.NewKeyedTransactor(testKey) + auth, _ := bind.NewKeyedTransactorWithChainID(testKey, big.NewInt(1337)) contractAddr, tx, contract, err := bind.DeployContract(auth, parsed, common.FromHex(abiBin), sim) if err != nil { t.Errorf("could not deploy contract: %v tx: %v contract: %v", err, tx, contract) @@ -907,7 +957,7 @@ func TestSimulatedBackend_PendingCodeAt(t *testing.T) { } } -func TestSimulatedBackend_CodeAt(t *testing.T) { +func TestCodeAt(t *testing.T) { testAddr := crypto.PubkeyToAddress(testKey.PublicKey) sim := simTestBackend(testAddr) defer sim.Close() @@ -924,7 +974,7 @@ func TestSimulatedBackend_CodeAt(t *testing.T) { if err != nil { t.Errorf("could not get code at test addr: %v", err) } - auth := bind.NewKeyedTransactor(testKey) + auth, _ := bind.NewKeyedTransactorWithChainID(testKey, big.NewInt(1337)) contractAddr, tx, contract, err := bind.DeployContract(auth, parsed, common.FromHex(abiBin), sim) if err != nil { t.Errorf("could not deploy contract: %v tx: %v contract: %v", err, tx, contract) @@ -946,7 +996,7 @@ func TestSimulatedBackend_CodeAt(t *testing.T) { // When receive("X") is called with sender 0x00... and value 1, it produces this tx receipt: // receipt{status=1 cgas=23949 bloom=00000000004000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000800000000000000000000000000000000000040200000000000000000000000000000000001000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000080000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000 logs=[log: b6818c8064f645cd82d99b59a1a267d6d61117ef [75fd880d39c1daf53b6547ab6cb59451fc6452d27caa90e5b6649dd8293b9eed] 000000000000000000000000376c47978271565f56deb45495afa69e59c16ab200000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000060000000000000000000000000000000000000000000000000000000000000000158 9ae378b6d4409eada347a5dc0c180f186cb62dc68fcc0f043425eb917335aa28 0 95d429d309bb9d753954195fe2d69bd140b4ae731b9b5b605c34323de162cf00 0]} -func TestSimulatedBackend_PendingAndCallContract(t *testing.T) { +func TestPendingAndCallContract(t *testing.T) { testAddr := crypto.PubkeyToAddress(testKey.PublicKey) sim := simTestBackend(testAddr) defer sim.Close() @@ -956,7 +1006,7 @@ func TestSimulatedBackend_PendingAndCallContract(t *testing.T) { if err != nil { t.Errorf("could not get code at test addr: %v", err) } - contractAuth := bind.NewKeyedTransactor(testKey) + contractAuth, _ := bind.NewKeyedTransactorWithChainID(testKey, big.NewInt(1337)) addr, _, _, err := bind.DeployContract(contractAuth, parsed, common.FromHex(abiBin), sim) if err != nil { t.Errorf("could not deploy contract: %v", err) @@ -1030,7 +1080,7 @@ contract Reverter { } } }*/ -func TestSimulatedBackend_CallContractRevert(t *testing.T) { +func TestCallContractRevert(t *testing.T) { testAddr := crypto.PubkeyToAddress(testKey.PublicKey) sim := simTestBackend(testAddr) defer sim.Close() @@ -1043,7 +1093,7 @@ func TestSimulatedBackend_CallContractRevert(t *testing.T) { if err != nil { t.Errorf("could not get code at test addr: %v", err) } - contractAuth := bind.NewKeyedTransactor(testKey) + contractAuth, _ := bind.NewKeyedTransactorWithChainID(testKey, big.NewInt(1337)) addr, _, _, err := bind.DeployContract(contractAuth, parsed, common.FromHex(reverterBin), sim) if err != nil { t.Errorf("could not deploy contract: %v", err) @@ -1114,3 +1164,175 @@ func TestSimulatedBackend_CallContractRevert(t *testing.T) { sim.Commit() } } + +// TestFork check that the chain length after a reorg is correct. +// Steps: +// 1. Save the current block which will serve as parent for the fork. +// 2. Mine n blocks with n ∈ [0, 20]. +// 3. Assert that the chain length is n. +// 4. Fork by using the parent block as ancestor. +// 5. Mine n+1 blocks which should trigger a reorg. +// 6. Assert that the chain length is n+1. +// Since Commit() was called 2n+1 times in total, +// having a chain length of just n+1 means that a reorg occurred. +func TestFork(t *testing.T) { + testAddr := crypto.PubkeyToAddress(testKey.PublicKey) + sim := simTestBackend(testAddr) + defer sim.Close() + // 1. + parent := sim.blockchain.CurrentBlock() + // 2. + n := int(rand.Int31n(21)) + for i := 0; i < n; i++ { + sim.Commit() + } + // 3. + if sim.blockchain.CurrentBlock().NumberU64() != uint64(n) { + t.Error("wrong chain length") + } + // 4. + sim.Fork(context.Background(), parent.Hash()) + // 5. + for i := 0; i < n+1; i++ { + sim.Commit() + } + // 6. + if sim.blockchain.CurrentBlock().NumberU64() != uint64(n+1) { + t.Error("wrong chain length") + } +} + +/* +Example contract to test event emission: + +pragma solidity >=0.7.0 <0.9.0; +contract Callable { + event Called(); + function Call() public { emit Called(); } +} +*/ +const callableAbi = "[{\"anonymous\":false,\"inputs\":[],\"name\":\"Called\",\"type\":\"event\"},{\"inputs\":[],\"name\":\"Call\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"}]" + +const callableBin = "6080604052348015600f57600080fd5b5060998061001e6000396000f3fe6080604052348015600f57600080fd5b506004361060285760003560e01c806334e2292114602d575b600080fd5b60336035565b005b7f81fab7a4a0aa961db47eefc81f143a5220e8c8495260dd65b1356f1d19d3c7b860405160405180910390a156fea2646970667358221220029436d24f3ac598ceca41d4d712e13ced6d70727f4cdc580667de66d2f51d8b64736f6c63430008010033" + +// TestForkLogsReborn check that the simulated reorgs +// correctly remove and reborn logs. +// Steps: +// 1. Deploy the Callable contract. +// 2. Set up an event subscription. +// 3. Save the current block which will serve as parent for the fork. +// 4. Send a transaction. +// 5. Check that the event was included. +// 6. Fork by using the parent block as ancestor. +// 7. Mine two blocks to trigger a reorg. +// 8. Check that the event was removed. +// 9. Re-send the transaction and mine a block. +// 10. Check that the event was reborn. +func TestForkLogsReborn(t *testing.T) { + testAddr := crypto.PubkeyToAddress(testKey.PublicKey) + sim := simTestBackend(testAddr) + defer sim.Close() + // 1. + parsed, _ := abi.JSON(strings.NewReader(callableAbi)) + auth, _ := bind.NewKeyedTransactorWithChainID(testKey, big.NewInt(1337)) + _, _, contract, err := bind.DeployContract(auth, parsed, common.FromHex(callableBin), sim) + if err != nil { + t.Errorf("deploying contract: %v", err) + } + sim.Commit() + // 2. + logs, sub, err := contract.WatchLogs(nil, "Called") + if err != nil { + t.Errorf("watching logs: %v", err) + } + defer sub.Unsubscribe() + // 3. + parent := sim.blockchain.CurrentBlock() + // 4. + tx, err := contract.Transact(auth, "Call") + if err != nil { + t.Errorf("transacting: %v", err) + } + sim.Commit() + // 5. + log := <-logs + if log.TxHash != tx.Hash() { + t.Error("wrong event tx hash") + } + if log.Removed { + t.Error("Event should be included") + } + // 6. + if err := sim.Fork(context.Background(), parent.Hash()); err != nil { + t.Errorf("forking: %v", err) + } + // 7. + sim.Commit() + sim.Commit() + // 8. + log = <-logs + if log.TxHash != tx.Hash() { + t.Error("wrong event tx hash") + } + if !log.Removed { + t.Error("Event should be removed") + } + // 9. + if err := sim.SendTransaction(context.Background(), tx); err != nil { + t.Errorf("sending transaction: %v", err) + } + sim.Commit() + // 10. + log = <-logs + if log.TxHash != tx.Hash() { + t.Error("wrong event tx hash") + } + if log.Removed { + t.Error("Event should be included") + } +} + +// TestForkResendTx checks that re-sending a TX after a fork +// is possible and does not cause a "nonce mismatch" panic. +// Steps: +// 1. Save the current block which will serve as parent for the fork. +// 2. Send a transaction. +// 3. Check that the TX is included in block 1. +// 4. Fork by using the parent block as ancestor. +// 5. Mine a block, Re-send the transaction and mine another one. +// 6. Check that the TX is now included in block 2. +func TestForkResendTx(t *testing.T) { + testAddr := crypto.PubkeyToAddress(testKey.PublicKey) + sim := simTestBackend(testAddr) + defer sim.Close() + // 1. + parent := sim.blockchain.CurrentBlock() + // 2. + head, _ := sim.HeaderByNumber(context.Background(), nil) // Should be child's, good enough + gasPrice := new(big.Int).Add(head.BaseFee, big.NewInt(1)) + + _tx := types.NewTransaction(0, testAddr, big.NewInt(1000), params.TxGas, gasPrice, nil) + tx, _ := types.SignTx(_tx, types.HomesteadSigner{}, testKey) + sim.SendTransaction(context.Background(), tx) + sim.Commit() + // 3. + receipt, _ := sim.TransactionReceipt(context.Background(), tx.Hash()) + if h := receipt.BlockNumber.Uint64(); h != 1 { + t.Errorf("TX included in wrong block: %d", h) + } + // 4. + if err := sim.Fork(context.Background(), parent.Hash()); err != nil { + t.Errorf("forking: %v", err) + } + // 5. + sim.Commit() + if err := sim.SendTransaction(context.Background(), tx); err != nil { + t.Errorf("sending transaction: %v", err) + } + sim.Commit() + // 6. + receipt, _ = sim.TransactionReceipt(context.Background(), tx.Hash()) + if h := receipt.BlockNumber.Uint64(); h != 2 { + t.Errorf("TX included in wrong block: %d", h) + } +} diff --git a/accounts/abi/bind/base.go b/accounts/abi/bind/base.go index 0216f6b54..354632b25 100644 --- a/accounts/abi/bind/base.go +++ b/accounts/abi/bind/base.go @@ -21,6 +21,8 @@ import ( "errors" "fmt" "math/big" + "strings" + "sync" "github.com/ethereum/go-ethereum" "github.com/ethereum/go-ethereum/accounts/abi" @@ -32,7 +34,7 @@ import ( // SignerFn is a signer function callback when a contract requires a method to // sign the transaction before submission. -type SignerFn func(types.Signer, common.Address, *types.Transaction) (*types.Transaction, error) +type SignerFn func(common.Address, *types.Transaction) (*types.Transaction, error) // CallOpts is the collection of options to fine tune a contract call request. type CallOpts struct { @@ -49,11 +51,15 @@ type TransactOpts struct { Nonce *big.Int // Nonce to use for the transaction execution (nil = use pending state) Signer SignerFn // Method to use for signing the transaction (mandatory) - Value *big.Int // Funds to transfer along the transaction (nil = 0 = no funds) - GasPrice *big.Int // Gas price to use for the transaction execution (nil = gas price oracle) - GasLimit uint64 // Gas limit to set for the transaction execution (0 = estimate) + Value *big.Int // Funds to transfer along the transaction (nil = 0 = no funds) + GasPrice *big.Int // Gas price to use for the transaction execution (nil = gas price oracle) + GasFeeCap *big.Int // Gas fee cap to use for the 1559 transaction execution (nil = gas price oracle) + GasTipCap *big.Int // Gas priority fee cap to use for the 1559 transaction execution (nil = gas price oracle) + GasLimit uint64 // Gas limit to set for the transaction execution (0 = estimate) Context context.Context // Network context to support cancellation and timeouts (nil = no timeout) + + NoSend bool // Do all transact steps but do not send the transaction } // FilterOpts is the collection of options to fine tune filtering for events @@ -72,6 +78,29 @@ type WatchOpts struct { Context context.Context // Network context to support cancellation and timeouts (nil = no timeout) } +// MetaData collects all metadata for a bound contract. +type MetaData struct { + mu sync.Mutex + Sigs map[string]string + Bin string + ABI string + ab *abi.ABI +} + +func (m *MetaData) GetAbi() (*abi.ABI, error) { + m.mu.Lock() + defer m.mu.Unlock() + if m.ab != nil { + return m.ab, nil + } + if parsed, err := abi.JSON(strings.NewReader(m.ABI)); err != nil { + return nil, err + } else { + m.ab = &parsed + } + return m.ab, nil +} + // BoundContract is the base wrapper object that reflects a contract on the // Ethereum network. It contains a collection of methods that are used by the // higher level contract bindings to operate. @@ -152,7 +181,10 @@ func (c *BoundContract) Call(opts *CallOpts, results *[]interface{}, method stri } } else { output, err = c.caller.CallContract(ctx, msg, opts.BlockNumber) - if err == nil && len(output) == 0 { + if err != nil { + return err + } + if len(output) == 0 { // Make sure we have a contract to operate on, and bail out otherwise. if code, err = c.caller.CodeAt(ctx, c.address, opts.BlockNumber); err != nil { return err @@ -218,12 +250,42 @@ func (c *BoundContract) transact(opts *TransactOpts, contract *common.Address, i } else { nonce = opts.Nonce.Uint64() } - // Figure out the gas allowance and gas price values - gasPrice := opts.GasPrice - if gasPrice == nil { - gasPrice, err = c.transactor.SuggestGasPrice(ensureContext(opts.Context)) - if err != nil { - return nil, fmt.Errorf("failed to suggest gas price: %v", err) + // Figure out reasonable gas price values + if opts.GasPrice != nil && (opts.GasFeeCap != nil || opts.GasTipCap != nil) { + return nil, errors.New("both gasPrice and (maxFeePerGas or maxPriorityFeePerGas) specified") + } + head, err := c.transactor.HeaderByNumber(ensureContext(opts.Context), nil) + if err != nil { + return nil, err + } + if head.BaseFee != nil && opts.GasPrice == nil { + if opts.GasTipCap == nil { + tip, err := c.transactor.SuggestGasTipCap(ensureContext(opts.Context)) + if err != nil { + return nil, err + } + opts.GasTipCap = tip + } + if opts.GasFeeCap == nil { + gasFeeCap := new(big.Int).Add( + opts.GasTipCap, + new(big.Int).Mul(head.BaseFee, big.NewInt(2)), + ) + opts.GasFeeCap = gasFeeCap + } + if opts.GasFeeCap.Cmp(opts.GasTipCap) < 0 { + return nil, fmt.Errorf("maxFeePerGas (%v) < maxPriorityFeePerGas (%v)", opts.GasFeeCap, opts.GasTipCap) + } + } else { + if opts.GasFeeCap != nil || opts.GasTipCap != nil { + return nil, errors.New("maxFeePerGas or maxPriorityFeePerGas specified but london is not active yet") + } + if opts.GasPrice == nil { + price, err := c.transactor.SuggestGasPrice(ensureContext(opts.Context)) + if err != nil { + return nil, err + } + opts.GasPrice = price } } gasLimit := opts.GasLimit @@ -237,7 +299,7 @@ func (c *BoundContract) transact(opts *TransactOpts, contract *common.Address, i } } // If the contract surely has code (or code is not needed), estimate the transaction - msg := ethereum.CallMsg{From: opts.From, To: contract, GasPrice: gasPrice, Value: value, Data: input} + msg := ethereum.CallMsg{From: opts.From, To: contract, GasPrice: opts.GasPrice, GasTipCap: opts.GasTipCap, GasFeeCap: opts.GasFeeCap, Value: value, Data: input} gasLimit, err = c.transactor.EstimateGas(ensureContext(opts.Context), msg) if err != nil { return nil, fmt.Errorf("failed to estimate gas needed: %v", err) @@ -245,18 +307,42 @@ func (c *BoundContract) transact(opts *TransactOpts, contract *common.Address, i } // Create the transaction, sign it and schedule it for execution var rawTx *types.Transaction - if contract == nil { - rawTx = types.NewContractCreation(nonce, value, gasLimit, gasPrice, input) + if opts.GasFeeCap == nil { + baseTx := &types.LegacyTx{ + Nonce: nonce, + GasPrice: opts.GasPrice, + Gas: gasLimit, + Value: value, + Data: input, + } + if contract != nil { + baseTx.To = &c.address + } + rawTx = types.NewTx(baseTx) } else { - rawTx = types.NewTransaction(nonce, c.address, value, gasLimit, gasPrice, input) + baseTx := &types.DynamicFeeTx{ + Nonce: nonce, + GasFeeCap: opts.GasFeeCap, + GasTipCap: opts.GasTipCap, + Gas: gasLimit, + Value: value, + Data: input, + } + if contract != nil { + baseTx.To = &c.address + } + rawTx = types.NewTx(baseTx) } if opts.Signer == nil { return nil, errors.New("no signer to authorize the transaction with") } - signedTx, err := opts.Signer(types.HomesteadSigner{}, opts.From, rawTx) + signedTx, err := opts.Signer(opts.From, rawTx) if err != nil { return nil, err } + if opts.NoSend { + return signedTx, nil + } if err := c.transactor.SendTransaction(ensureContext(opts.Context), signedTx); err != nil { return nil, err } @@ -379,7 +465,7 @@ func (c *BoundContract) UnpackLogIntoMap(out map[string]interface{}, event strin // user specified it as such. func ensureContext(ctx context.Context) context.Context { if ctx == nil { - return context.TODO() + return context.Background() } return ctx } diff --git a/accounts/abi/bind/bind_test.go b/accounts/abi/bind/bind_test.go index 8bfbf30b5..d49b436db 100644 --- a/accounts/abi/bind/bind_test.go +++ b/accounts/abi/bind/bind_test.go @@ -296,9 +296,9 @@ var bindTests = []struct { ` // Generate a new random account and a funded simulator key, _ := crypto.GenerateKey() - auth := bind.NewKeyedTransactor(key) + auth, _ := bind.NewKeyedTransactorWithChainID(key, big.NewInt(1337)) - sim := backends.NewSimulatedBackend(core.GenesisAlloc{auth.From: {Balance: big.NewInt(10000000000)}}, 10000000) + sim := backends.NewSimulatedBackend(core.GenesisAlloc{auth.From: {Balance: big.NewInt(10000000000000000)}}, 10000000) defer sim.Close() // Deploy an interaction tester contract and call a transaction on it @@ -351,9 +351,9 @@ var bindTests = []struct { ` // Generate a new random account and a funded simulator key, _ := crypto.GenerateKey() - auth := bind.NewKeyedTransactor(key) + auth, _ := bind.NewKeyedTransactorWithChainID(key, big.NewInt(1337)) - sim := backends.NewSimulatedBackend(core.GenesisAlloc{auth.From: {Balance: big.NewInt(10000000000)}}, 10000000) + sim := backends.NewSimulatedBackend(core.GenesisAlloc{auth.From: {Balance: big.NewInt(10000000000000000)}}, 10000000) defer sim.Close() // Deploy a tuple tester contract and execute a structured call on it @@ -397,9 +397,9 @@ var bindTests = []struct { ` // Generate a new random account and a funded simulator key, _ := crypto.GenerateKey() - auth := bind.NewKeyedTransactor(key) + auth, _ := bind.NewKeyedTransactorWithChainID(key, big.NewInt(1337)) - sim := backends.NewSimulatedBackend(core.GenesisAlloc{auth.From: {Balance: big.NewInt(10000000000)}}, 10000000) + sim := backends.NewSimulatedBackend(core.GenesisAlloc{auth.From: {Balance: big.NewInt(10000000000000000)}}, 10000000) defer sim.Close() // Deploy a tuple tester contract and execute a structured call on it @@ -455,9 +455,9 @@ var bindTests = []struct { ` // Generate a new random account and a funded simulator key, _ := crypto.GenerateKey() - auth := bind.NewKeyedTransactor(key) + auth, _ := bind.NewKeyedTransactorWithChainID(key, big.NewInt(1337)) - sim := backends.NewSimulatedBackend(core.GenesisAlloc{auth.From: {Balance: big.NewInt(10000000000)}}, 10000000) + sim := backends.NewSimulatedBackend(core.GenesisAlloc{auth.From: {Balance: big.NewInt(10000000000000000)}}, 10000000) defer sim.Close() // Deploy a slice tester contract and execute a n array call on it @@ -503,9 +503,9 @@ var bindTests = []struct { ` // Generate a new random account and a funded simulator key, _ := crypto.GenerateKey() - auth := bind.NewKeyedTransactor(key) + auth, _ := bind.NewKeyedTransactorWithChainID(key, big.NewInt(1337)) - sim := backends.NewSimulatedBackend(core.GenesisAlloc{auth.From: {Balance: big.NewInt(10000000000)}}, 10000000) + sim := backends.NewSimulatedBackend(core.GenesisAlloc{auth.From: {Balance: big.NewInt(10000000000000000)}}, 10000000) defer sim.Close() // Deploy a default method invoker contract and execute its default method @@ -529,6 +529,70 @@ var bindTests = []struct { nil, nil, }, + // Tests that structs are correctly unpacked + { + + `Structs`, + ` + pragma solidity ^0.6.5; + pragma experimental ABIEncoderV2; + contract Structs { + struct A { + bytes32 B; + } + + function F() public view returns (A[] memory a, uint256[] memory c, bool[] memory d) { + A[] memory a = new A[](2); + a[0].B = bytes32(uint256(1234) << 96); + uint256[] memory c; + bool[] memory d; + return (a, c, d); + } + + function G() public view returns (A[] memory a) { + A[] memory a = new A[](2); + a[0].B = bytes32(uint256(1234) << 96); + return a; + } + } + `, + []string{`608060405234801561001057600080fd5b50610278806100206000396000f3fe608060405234801561001057600080fd5b50600436106100365760003560e01c806328811f591461003b5780636fecb6231461005b575b600080fd5b610043610070565b604051610052939291906101a0565b60405180910390f35b6100636100d6565b6040516100529190610186565b604080516002808252606082810190935282918291829190816020015b610095610131565b81526020019060019003908161008d575050805190915061026960611b9082906000906100be57fe5b60209081029190910101515293606093508392509050565b6040805160028082526060828101909352829190816020015b6100f7610131565b8152602001906001900390816100ef575050805190915061026960611b90829060009061012057fe5b602090810291909101015152905090565b60408051602081019091526000815290565b815260200190565b6000815180845260208085019450808401835b8381101561017b578151518752958201959082019060010161015e565b509495945050505050565b600060208252610199602083018461014b565b9392505050565b6000606082526101b3606083018661014b565b6020838203818501528186516101c98185610239565b91508288019350845b818110156101f3576101e5838651610143565b9484019492506001016101d2565b505084810360408601528551808252908201925081860190845b8181101561022b57825115158552938301939183019160010161020d565b509298975050505050505050565b9081526020019056fea2646970667358221220eb85327e285def14230424c52893aebecec1e387a50bb6b75fc4fdbed647f45f64736f6c63430006050033`}, + []string{`[{"inputs":[],"name":"F","outputs":[{"components":[{"internalType":"bytes32","name":"B","type":"bytes32"}],"internalType":"structStructs.A[]","name":"a","type":"tuple[]"},{"internalType":"uint256[]","name":"c","type":"uint256[]"},{"internalType":"bool[]","name":"d","type":"bool[]"}],"stateMutability":"view","type":"function"},{"inputs":[],"name":"G","outputs":[{"components":[{"internalType":"bytes32","name":"B","type":"bytes32"}],"internalType":"structStructs.A[]","name":"a","type":"tuple[]"}],"stateMutability":"view","type":"function"}]`}, + ` + "math/big" + + "github.com/ethereum/go-ethereum/accounts/abi/bind" + "github.com/ethereum/go-ethereum/accounts/abi/bind/backends" + "github.com/ethereum/go-ethereum/core" + "github.com/ethereum/go-ethereum/crypto" + `, + ` + // Generate a new random account and a funded simulator + key, _ := crypto.GenerateKey() + auth, _ := bind.NewKeyedTransactorWithChainID(key, big.NewInt(1337)) + + sim := backends.NewSimulatedBackend(core.GenesisAlloc{auth.From: {Balance: big.NewInt(10000000000000000)}}, 10000000) + defer sim.Close() + + // Deploy a structs method invoker contract and execute its default method + _, _, structs, err := DeployStructs(auth, sim) + if err != nil { + t.Fatalf("Failed to deploy defaulter contract: %v", err) + } + sim.Commit() + opts := bind.CallOpts{} + if _, err := structs.F(&opts); err != nil { + t.Fatalf("Failed to invoke F method: %v", err) + } + if _, err := structs.G(&opts); err != nil { + t.Fatalf("Failed to invoke G method: %v", err) + } + `, + nil, + nil, + nil, + nil, + }, // Tests that non-existent contracts are reported as such (though only simulator test) { `NonExistent`, @@ -569,6 +633,45 @@ var bindTests = []struct { nil, nil, }, + { + `NonExistentStruct`, + ` + contract NonExistentStruct { + function Struct() public view returns(uint256 a, uint256 b) { + return (10, 10); + } + } + `, + []string{`6080604052348015600f57600080fd5b5060888061001e6000396000f3fe6080604052348015600f57600080fd5b506004361060285760003560e01c8063d5f6622514602d575b600080fd5b6033604c565b6040805192835260208301919091528051918290030190f35b600a809156fea264697066735822beefbeefbeefbeefbeefbeefbeefbeefbeefbeefbeefbeefbeefbeefbeefbeefbeef64736f6c6343decafe0033`}, + []string{`[{"inputs":[],"name":"Struct","outputs":[{"internalType":"uint256","name":"a","type":"uint256"},{"internalType":"uint256","name":"b","type":"uint256"}],"stateMutability":"pure","type":"function"}]`}, + ` + "github.com/ethereum/go-ethereum/accounts/abi/bind" + "github.com/ethereum/go-ethereum/accounts/abi/bind/backends" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core" + `, + ` + // Create a simulator and wrap a non-deployed contract + + sim := backends.NewSimulatedBackend(core.GenesisAlloc{}, uint64(10000000000)) + defer sim.Close() + + nonexistent, err := NewNonExistentStruct(common.Address{}, sim) + if err != nil { + t.Fatalf("Failed to access non-existent contract: %v", err) + } + // Ensure that contract calls fail with the appropriate error + if res, err := nonexistent.Struct(nil); err == nil { + t.Fatalf("Call succeeded on non-existent contract: %v", res) + } else if (err != bind.ErrNoCode) { + t.Fatalf("Error mismatch: have %v, want %v", err, bind.ErrNoCode) + } + `, + nil, + nil, + nil, + nil, + }, // Tests that gas estimation works for contracts with weird gas mechanics too. { `FunkyGasPattern`, @@ -598,9 +701,9 @@ var bindTests = []struct { ` // Generate a new random account and a funded simulator key, _ := crypto.GenerateKey() - auth := bind.NewKeyedTransactor(key) + auth, _ := bind.NewKeyedTransactorWithChainID(key, big.NewInt(1337)) - sim := backends.NewSimulatedBackend(core.GenesisAlloc{auth.From: {Balance: big.NewInt(10000000000)}}, 10000000) + sim := backends.NewSimulatedBackend(core.GenesisAlloc{auth.From: {Balance: big.NewInt(10000000000000000)}}, 10000000) defer sim.Close() // Deploy a funky gas pattern contract @@ -648,9 +751,9 @@ var bindTests = []struct { ` // Generate a new random account and a funded simulator key, _ := crypto.GenerateKey() - auth := bind.NewKeyedTransactor(key) + auth, _ := bind.NewKeyedTransactorWithChainID(key, big.NewInt(1337)) - sim := backends.NewSimulatedBackend(core.GenesisAlloc{auth.From: {Balance: big.NewInt(10000000000)}}, 10000000) + sim := backends.NewSimulatedBackend(core.GenesisAlloc{auth.From: {Balance: big.NewInt(10000000000000000)}}, 10000000) defer sim.Close() // Deploy a sender tester contract and execute a structured call on it @@ -723,9 +826,9 @@ var bindTests = []struct { ` // Generate a new random account and a funded simulator key, _ := crypto.GenerateKey() - auth := bind.NewKeyedTransactor(key) + auth, _ := bind.NewKeyedTransactorWithChainID(key, big.NewInt(1337)) - sim := backends.NewSimulatedBackend(core.GenesisAlloc{auth.From: {Balance: big.NewInt(10000000000)}}, 10000000) + sim := backends.NewSimulatedBackend(core.GenesisAlloc{auth.From: {Balance: big.NewInt(10000000000000000)}}, 10000000) defer sim.Close() // Deploy a underscorer tester contract and execute a structured call on it @@ -817,9 +920,9 @@ var bindTests = []struct { ` // Generate a new random account and a funded simulator key, _ := crypto.GenerateKey() - auth := bind.NewKeyedTransactor(key) + auth, _ := bind.NewKeyedTransactorWithChainID(key, big.NewInt(1337)) - sim := backends.NewSimulatedBackend(core.GenesisAlloc{auth.From: {Balance: big.NewInt(10000000000)}}, 10000000) + sim := backends.NewSimulatedBackend(core.GenesisAlloc{auth.From: {Balance: big.NewInt(10000000000000000)}}, 10000000) defer sim.Close() // Deploy an eventer contract @@ -1007,9 +1110,9 @@ var bindTests = []struct { ` // Generate a new random account and a funded simulator key, _ := crypto.GenerateKey() - auth := bind.NewKeyedTransactor(key) + auth, _ := bind.NewKeyedTransactorWithChainID(key, big.NewInt(1337)) - sim := backends.NewSimulatedBackend(core.GenesisAlloc{auth.From: {Balance: big.NewInt(10000000000)}}, 10000000) + sim := backends.NewSimulatedBackend(core.GenesisAlloc{auth.From: {Balance: big.NewInt(10000000000000000)}}, 10000000) defer sim.Close() //deploy the test contract @@ -1142,9 +1245,9 @@ var bindTests = []struct { ` key, _ := crypto.GenerateKey() - auth := bind.NewKeyedTransactor(key) + auth, _ := bind.NewKeyedTransactorWithChainID(key, big.NewInt(1337)) - sim := backends.NewSimulatedBackend(core.GenesisAlloc{auth.From: {Balance: big.NewInt(10000000000)}}, 10000000) + sim := backends.NewSimulatedBackend(core.GenesisAlloc{auth.From: {Balance: big.NewInt(10000000000000000)}}, 10000000) defer sim.Close() _, _, contract, err := DeployTuple(auth, sim) @@ -1284,9 +1387,9 @@ var bindTests = []struct { ` // Generate a new random account and a funded simulator key, _ := crypto.GenerateKey() - auth := bind.NewKeyedTransactor(key) + auth, _ := bind.NewKeyedTransactorWithChainID(key, big.NewInt(1337)) - sim := backends.NewSimulatedBackend(core.GenesisAlloc{auth.From: {Balance: big.NewInt(10000000000)}}, 10000000) + sim := backends.NewSimulatedBackend(core.GenesisAlloc{auth.From: {Balance: big.NewInt(10000000000000000)}}, 10000000) defer sim.Close() //deploy the test contract @@ -1350,8 +1453,8 @@ var bindTests = []struct { ` // Initialize test accounts key, _ := crypto.GenerateKey() - auth := bind.NewKeyedTransactor(key) - sim := backends.NewSimulatedBackend(core.GenesisAlloc{auth.From: {Balance: big.NewInt(10000000000)}}, 10000000) + auth, _ := bind.NewKeyedTransactorWithChainID(key, big.NewInt(1337)) + sim := backends.NewSimulatedBackend(core.GenesisAlloc{auth.From: {Balance: big.NewInt(10000000000000000)}}, 10000000) defer sim.Close() // deploy the test contract @@ -1441,10 +1544,10 @@ var bindTests = []struct { addr := crypto.PubkeyToAddress(key.PublicKey) // Deploy registrar contract - sim := backends.NewSimulatedBackend(core.GenesisAlloc{addr: {Balance: big.NewInt(1000000000)}}, 10000000) + sim := backends.NewSimulatedBackend(core.GenesisAlloc{addr: {Balance: big.NewInt(10000000000000000)}}, 10000000) defer sim.Close() - transactOpts := bind.NewKeyedTransactor(key) + transactOpts, _ := bind.NewKeyedTransactorWithChainID(key, big.NewInt(1337)) _, _, _, err := DeployIdentifierCollision(transactOpts, sim) if err != nil { t.Fatalf("failed to deploy contract: %v", err) @@ -1503,10 +1606,10 @@ var bindTests = []struct { addr := crypto.PubkeyToAddress(key.PublicKey) // Deploy registrar contract - sim := backends.NewSimulatedBackend(core.GenesisAlloc{addr: {Balance: big.NewInt(1000000000)}}, 10000000) + sim := backends.NewSimulatedBackend(core.GenesisAlloc{addr: {Balance: big.NewInt(10000000000000000)}}, 10000000) defer sim.Close() - transactOpts := bind.NewKeyedTransactor(key) + transactOpts, _ := bind.NewKeyedTransactorWithChainID(key, big.NewInt(1337)) _, _, c1, err := DeployContractOne(transactOpts, sim) if err != nil { t.Fatal("Failed to deploy contract") @@ -1563,9 +1666,9 @@ var bindTests = []struct { ` // Generate a new random account and a funded simulator key, _ := crypto.GenerateKey() - auth := bind.NewKeyedTransactor(key) + auth, _ := bind.NewKeyedTransactorWithChainID(key, big.NewInt(1337)) - sim := backends.NewSimulatedBackend(core.GenesisAlloc{auth.From: {Balance: big.NewInt(10000000000)}}, 10000000) + sim := backends.NewSimulatedBackend(core.GenesisAlloc{auth.From: {Balance: big.NewInt(10000000000000000)}}, 10000000) defer sim.Close() // Deploy a tester contract and execute a structured call on it @@ -1601,11 +1704,7 @@ var bindTests = []struct { contract NewFallbacks { event Fallback(bytes data); fallback() external { - bytes memory data; - assembly { - calldatacopy(data, 0, calldatasize()) - } - emit Fallback(data); + emit Fallback(msg.data); } event Received(address addr, uint value); @@ -1614,7 +1713,7 @@ var bindTests = []struct { } } `, - []string{"60806040523480156100115760006000fd5b50610017565b61016e806100266000396000f3fe60806040526004361061000d575b36610081575b7f88a5966d370b9919b20f3e2c13ff65706f196a4e32cc2c12bf57088f885258743334604051808373ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff1681526020018281526020019250505060405180910390a15b005b34801561008e5760006000fd5b505b606036600082377f9043988963722edecc2099c75b0af0ff76af14ffca42ed6bce059a20a2a9f986816040518080602001828103825283818151815260200191508051906020019080838360005b838110156100fa5780820151818401525b6020810190506100de565b50505050905090810190601f1680156101275780820380516001836020036101000a031916815260200191505b509250505060405180910390a1505b00fea26469706673582212205643ca37f40c2b352dc541f42e9e6720de065de756324b7fcc9fb1d67eda4a7d64736f6c63430006040033"}, + []string{"6080604052348015600f57600080fd5b506101078061001f6000396000f3fe608060405236605f577f88a5966d370b9919b20f3e2c13ff65706f196a4e32cc2c12bf57088f885258743334604051808373ffffffffffffffffffffffffffffffffffffffff1681526020018281526020019250505060405180910390a1005b348015606a57600080fd5b507f9043988963722edecc2099c75b0af0ff76af14ffca42ed6bce059a20a2a9f98660003660405180806020018281038252848482818152602001925080828437600081840152601f19601f820116905080830192505050935050505060405180910390a100fea26469706673582212201f994dcfbc53bf610b19176f9a361eafa77b447fd9c796fa2c615dfd0aaf3b8b64736f6c634300060c0033"}, []string{`[{"anonymous":false,"inputs":[{"indexed":false,"internalType":"bytes","name":"data","type":"bytes"}],"name":"Fallback","type":"event"},{"anonymous":false,"inputs":[{"indexed":false,"internalType":"address","name":"addr","type":"address"},{"indexed":false,"internalType":"uint256","name":"value","type":"uint256"}],"name":"Received","type":"event"},{"stateMutability":"nonpayable","type":"fallback"},{"stateMutability":"payable","type":"receive"}]`}, ` "bytes" @@ -1629,10 +1728,10 @@ var bindTests = []struct { key, _ := crypto.GenerateKey() addr := crypto.PubkeyToAddress(key.PublicKey) - sim := backends.NewSimulatedBackend(core.GenesisAlloc{addr: {Balance: big.NewInt(1000000000)}}, 1000000) + sim := backends.NewSimulatedBackend(core.GenesisAlloc{addr: {Balance: big.NewInt(10000000000000000)}}, 1000000) defer sim.Close() - opts := bind.NewKeyedTransactor(key) + opts, _ := bind.NewKeyedTransactorWithChainID(key, big.NewInt(1337)) _, _, c, err := DeployNewFallbacks(opts, sim) if err != nil { t.Fatalf("Failed to deploy contract: %v", err) @@ -1662,6 +1761,7 @@ var bindTests = []struct { } // Test fallback function + gotEvent = false opts.Value = nil calldata := []byte{0x01, 0x02, 0x03} c.Fallback(opts, calldata) @@ -1746,11 +1846,16 @@ func TestGolangBindings(t *testing.T) { t.Fatalf("failed to convert binding test to modules: %v\n%s", err, out) } pwd, _ := os.Getwd() - replacer := exec.Command(gocmd, "mod", "edit", "-replace", "github.com/ethereum/go-ethereum="+filepath.Join(pwd, "..", "..", "..")) // Repo root + replacer := exec.Command(gocmd, "mod", "edit", "-x", "-require", "github.com/ethereum/go-ethereum@v0.0.0", "-replace", "github.com/ethereum/go-ethereum="+filepath.Join(pwd, "..", "..", "..")) // Repo root replacer.Dir = pkg if out, err := replacer.CombinedOutput(); err != nil { t.Fatalf("failed to replace binding test dependency to current source tree: %v\n%s", err, out) } + tidier := exec.Command(gocmd, "mod", "tidy") + tidier.Dir = pkg + if out, err := tidier.CombinedOutput(); err != nil { + t.Fatalf("failed to tidy Go module file: %v\n%s", err, out) + } // Test the entire package and report any failures cmd := exec.Command(gocmd, "test", "-v", "-count", "1") cmd.Dir = pkg diff --git a/accounts/abi/bind/template.go b/accounts/abi/bind/template.go index 5329b3ebc..492bad8c5 100644 --- a/accounts/abi/bind/template.go +++ b/accounts/abi/bind/template.go @@ -90,6 +90,7 @@ package {{.Package}} import ( "math/big" "strings" + "errors" ethereum "github.com/ethereum/go-ethereum" "github.com/ethereum/go-ethereum/accounts/abi" @@ -101,6 +102,7 @@ import ( // Reference imports to suppress errors if they are not otherwise used. var ( + _ = errors.New _ = big.NewInt _ = strings.NewReader _ = ethereum.NotFound @@ -120,32 +122,48 @@ var ( {{end}} {{range $contract := .Contracts}} + // {{.Type}}MetaData contains all meta data concerning the {{.Type}} contract. + var {{.Type}}MetaData = &bind.MetaData{ + ABI: "{{.InputABI}}", + {{if $contract.FuncSigs -}} + Sigs: map[string]string{ + {{range $strsig, $binsig := .FuncSigs}}"{{$binsig}}": "{{$strsig}}", + {{end}} + }, + {{end -}} + {{if .InputBin -}} + Bin: "0x{{.InputBin}}", + {{end}} + } // {{.Type}}ABI is the input ABI used to generate the binding from. - const {{.Type}}ABI = "{{.InputABI}}" + // Deprecated: Use {{.Type}}MetaData.ABI instead. + var {{.Type}}ABI = {{.Type}}MetaData.ABI {{if $contract.FuncSigs}} + // Deprecated: Use {{.Type}}MetaData.Sigs instead. // {{.Type}}FuncSigs maps the 4-byte function signature to its string representation. - var {{.Type}}FuncSigs = map[string]string{ - {{range $strsig, $binsig := .FuncSigs}}"{{$binsig}}": "{{$strsig}}", - {{end}} - } + var {{.Type}}FuncSigs = {{.Type}}MetaData.Sigs {{end}} {{if .InputBin}} // {{.Type}}Bin is the compiled bytecode used for deploying new contracts. - var {{.Type}}Bin = "0x{{.InputBin}}" + // Deprecated: Use {{.Type}}MetaData.Bin instead. + var {{.Type}}Bin = {{.Type}}MetaData.Bin // Deploy{{.Type}} deploys a new Ethereum contract, binding an instance of {{.Type}} to it. func Deploy{{.Type}}(auth *bind.TransactOpts, backend bind.ContractBackend {{range .Constructor.Inputs}}, {{.Name}} {{bindtype .Type $structs}}{{end}}) (common.Address, *types.Transaction, *{{.Type}}, error) { - parsed, err := abi.JSON(strings.NewReader({{.Type}}ABI)) + parsed, err := {{.Type}}MetaData.GetAbi() if err != nil { return common.Address{}, nil, nil, err } + if parsed == nil { + return common.Address{}, nil, nil, errors.New("GetABI returned nil") + } {{range $pattern, $name := .Libraries}} {{decapitalise $name}}Addr, _, _, _ := Deploy{{capitalise $name}}(auth, backend) {{$contract.Type}}Bin = strings.Replace({{$contract.Type}}Bin, "__${{$pattern}}$__", {{decapitalise $name}}Addr.String()[2:], -1) {{end}} - address, tx, contract, err := bind.DeployContract(auth, parsed, common.FromHex({{.Type}}Bin), backend {{range .Constructor.Inputs}}, {{.Name}}{{end}}) + address, tx, contract, err := bind.DeployContract(auth, *parsed, common.FromHex({{.Type}}Bin), backend {{range .Constructor.Inputs}}, {{.Name}}{{end}}) if err != nil { return common.Address{}, nil, nil, err } @@ -304,8 +322,11 @@ var ( err := _{{$contract.Type}}.contract.Call(opts, &out, "{{.Original.Name}}" {{range .Normalized.Inputs}}, {{.Name}}{{end}}) {{if .Structured}} outstruct := new(struct{ {{range .Normalized.Outputs}} {{.Name}} {{bindtype .Type $structs}}; {{end}} }) + if err != nil { + return *outstruct, err + } {{range $i, $t := .Normalized.Outputs}} - outstruct.{{.Name}} = out[{{$i}}].({{bindtype .Type $structs}}){{end}} + outstruct.{{.Name}} = *abi.ConvertType(out[{{$i}}], new({{bindtype .Type $structs}})).(*{{bindtype .Type $structs}}){{end}} return *outstruct, err {{else}} @@ -541,6 +562,7 @@ var ( if err := _{{$contract.Type}}.contract.UnpackLog(event, "{{.Original.Name}}", log); err != nil { return nil, err } + event.Raw = log return event, nil } diff --git a/accounts/abi/bind/util_test.go b/accounts/abi/bind/util_test.go index 9f9b7a000..75fbc91ce 100644 --- a/accounts/abi/bind/util_test.go +++ b/accounts/abi/bind/util_test.go @@ -56,14 +56,17 @@ func TestWaitDeployed(t *testing.T) { for name, test := range waitDeployedTests { backend := backends.NewSimulatedBackend( core.GenesisAlloc{ - crypto.PubkeyToAddress(testKey.PublicKey): {Balance: big.NewInt(10000000000)}, + crypto.PubkeyToAddress(testKey.PublicKey): {Balance: big.NewInt(10000000000000000)}, }, 10000000, ) defer backend.Close() - // Create the transaction. - tx := types.NewContractCreation(0, big.NewInt(0), test.gas, big.NewInt(1), common.FromHex(test.code)) + // Create the transaction + head, _ := backend.HeaderByNumber(context.Background(), nil) // Should be child's, good enough + gasPrice := new(big.Int).Add(head.BaseFee, big.NewInt(1)) + + tx := types.NewContractCreation(0, big.NewInt(0), test.gas, gasPrice, common.FromHex(test.code)) tx, _ = types.SignTx(tx, types.HomesteadSigner{}, testKey) // Wait for it to get mined in the background. @@ -99,15 +102,18 @@ func TestWaitDeployed(t *testing.T) { func TestWaitDeployedCornerCases(t *testing.T) { backend := backends.NewSimulatedBackend( core.GenesisAlloc{ - crypto.PubkeyToAddress(testKey.PublicKey): {Balance: big.NewInt(10000000000)}, + crypto.PubkeyToAddress(testKey.PublicKey): {Balance: big.NewInt(10000000000000000)}, }, 10000000, ) defer backend.Close() + head, _ := backend.HeaderByNumber(context.Background(), nil) // Should be child's, good enough + gasPrice := new(big.Int).Add(head.BaseFee, big.NewInt(1)) + // Create a transaction to an account. code := "6060604052600a8060106000396000f360606040526008565b00" - tx := types.NewTransaction(0, common.HexToAddress("0x01"), big.NewInt(0), 3000000, big.NewInt(1), common.FromHex(code)) + tx := types.NewTransaction(0, common.HexToAddress("0x01"), big.NewInt(0), 3000000, gasPrice, common.FromHex(code)) tx, _ = types.SignTx(tx, types.HomesteadSigner{}, testKey) ctx, cancel := context.WithCancel(context.Background()) defer cancel() @@ -119,7 +125,7 @@ func TestWaitDeployedCornerCases(t *testing.T) { } // Create a transaction that is not mined. - tx = types.NewContractCreation(1, big.NewInt(0), 3000000, big.NewInt(1), common.FromHex(code)) + tx = types.NewContractCreation(1, big.NewInt(0), 3000000, gasPrice, common.FromHex(code)) tx, _ = types.SignTx(tx, types.HomesteadSigner{}, testKey) go func() { diff --git a/accounts/abi/reflect_test.go b/accounts/abi/reflect_test.go index bac4cd953..cf13a79da 100644 --- a/accounts/abi/reflect_test.go +++ b/accounts/abi/reflect_test.go @@ -202,12 +202,12 @@ func TestConvertType(t *testing.T) { fields = append(fields, reflect.StructField{ Name: "X", Type: reflect.TypeOf(new(big.Int)), - Tag: reflect.StructTag("json:\"" + "x" + "\""), + Tag: "json:\"" + "x" + "\"", }) fields = append(fields, reflect.StructField{ Name: "Y", Type: reflect.TypeOf(new(big.Int)), - Tag: reflect.StructTag("json:\"" + "y" + "\""), + Tag: "json:\"" + "y" + "\"", }) val := reflect.New(reflect.StructOf(fields)) val.Elem().Field(0).Set(reflect.ValueOf(big.NewInt(1))) diff --git a/accounts/abi/type_test.go b/accounts/abi/type_test.go index 48df3aa38..8c3aedca6 100644 --- a/accounts/abi/type_test.go +++ b/accounts/abi/type_test.go @@ -255,7 +255,7 @@ func TestTypeCheck(t *testing.T) { {"bytes", nil, [2]byte{0, 1}, "abi: cannot use array as type slice as argument"}, {"bytes", nil, common.Hash{1}, "abi: cannot use array as type slice as argument"}, {"string", nil, "hello world", ""}, - {"string", nil, string(""), ""}, + {"string", nil, "", ""}, {"string", nil, []byte{}, "abi: cannot use slice as type string as argument"}, {"bytes32[]", nil, [][32]byte{{}}, ""}, {"function", nil, [24]byte{}, ""}, diff --git a/accounts/accounts.go b/accounts/accounts.go index 5f28319de..c503e40c7 100644 --- a/accounts/accounts.go +++ b/accounts/accounts.go @@ -21,7 +21,7 @@ import ( "fmt" "math/big" - ethereum "github.com/ethereum/go-ethereum" + "github.com/ethereum/go-ethereum" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/event" @@ -114,7 +114,7 @@ type Wallet interface { SignData(account Account, mimeType string, data []byte) ([]byte, error) // SignDataWithPassphrase is identical to SignData, but also takes a password - // NOTE: there's an chance that an erroneous call might mistake the two strings, and + // NOTE: there's a chance that an erroneous call might mistake the two strings, and // supply password in the mimetype field, or vice versa. Thus, an implementation // should never echo the mimetype or return the mimetype in the error-response SignDataWithPassphrase(account Account, passphrase, mimeType string, data []byte) ([]byte, error) @@ -128,7 +128,7 @@ type Wallet interface { // a password to decrypt the account, or a PIN code o verify the transaction), // an AuthNeededError instance will be returned, containing infos for the user // about which fields or actions are needed. The user may retry by providing - // the needed details via SignHashWithPassphrase, or by other means (e.g. unlock + // the needed details via SignTextWithPassphrase, or by other means (e.g. unlock // the account in a keystore). // // This method should return the signature in 'canonical' format, with v 0 or 1 diff --git a/accounts/external/backend.go b/accounts/external/backend.go index ffa62e10e..b3cd959c8 100644 --- a/accounts/external/backend.go +++ b/accounts/external/backend.go @@ -29,7 +29,7 @@ import ( "github.com/ethereum/go-ethereum/event" "github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/rpc" - "github.com/ethereum/go-ethereum/signer/core" + "github.com/ethereum/go-ethereum/signer/core/apitypes" ) type ExternalBackend struct { @@ -196,6 +196,10 @@ type signTransactionResult struct { Tx *types.Transaction `json:"tx"` } +// SignTx sends the transaction to the external signer. +// If chainID is nil, or tx.ChainID is zero, the chain ID will be assigned +// by the external signer. For non-legacy transactions, the chain ID of the +// transaction overrides the chainID parameter. func (api *ExternalSigner) SignTx(account accounts.Account, tx *types.Transaction, chainID *big.Int) (*types.Transaction, error) { data := hexutil.Bytes(tx.Data()) var to *common.MixedcaseAddress @@ -203,14 +207,36 @@ func (api *ExternalSigner) SignTx(account accounts.Account, tx *types.Transactio t := common.NewMixedcaseAddress(*tx.To()) to = &t } - args := &core.SendTxArgs{ - Data: &data, - Nonce: hexutil.Uint64(tx.Nonce()), - Value: hexutil.Big(*tx.Value()), - Gas: hexutil.Uint64(tx.Gas()), - GasPrice: hexutil.Big(*tx.GasPrice()), - To: to, - From: common.NewMixedcaseAddress(account.Address), + args := &apitypes.SendTxArgs{ + Data: &data, + Nonce: hexutil.Uint64(tx.Nonce()), + Value: hexutil.Big(*tx.Value()), + Gas: hexutil.Uint64(tx.Gas()), + To: to, + From: common.NewMixedcaseAddress(account.Address), + } + switch tx.Type() { + case types.LegacyTxType, types.AccessListTxType: + args.GasPrice = (*hexutil.Big)(tx.GasPrice()) + case types.DynamicFeeTxType: + args.MaxFeePerGas = (*hexutil.Big)(tx.GasFeeCap()) + args.MaxPriorityFeePerGas = (*hexutil.Big)(tx.GasTipCap()) + default: + return nil, fmt.Errorf("unsupported tx type %d", tx.Type()) + } + // We should request the default chain id that we're operating with + // (the chain we're executing on) + if chainID != nil && chainID.Sign() != 0 { + args.ChainID = (*hexutil.Big)(chainID) + } + if tx.Type() != types.LegacyTxType { + // However, if the user asked for a particular chain id, then we should + // use that instead. + if tx.ChainId().Sign() != 0 { + args.ChainID = (*hexutil.Big)(tx.ChainId()) + } + accessList := tx.AccessList() + args.AccessList = &accessList } var res signTransactionResult if err := api.client.Call(&res, "account_signTransaction", args); err != nil { diff --git a/accounts/hd.go b/accounts/hd.go index 75c476110..54acea3b2 100644 --- a/accounts/hd.go +++ b/accounts/hd.go @@ -150,3 +150,31 @@ func (path *DerivationPath) UnmarshalJSON(b []byte) error { *path, err = ParseDerivationPath(dp) return err } + +// DefaultIterator creates a BIP-32 path iterator, which progresses by increasing the last component: +// i.e. m/44'/60'/0'/0/0, m/44'/60'/0'/0/1, m/44'/60'/0'/0/2, ... m/44'/60'/0'/0/N. +func DefaultIterator(base DerivationPath) func() DerivationPath { + path := make(DerivationPath, len(base)) + copy(path[:], base[:]) + // Set it back by one, so the first call gives the first result + path[len(path)-1]-- + return func() DerivationPath { + path[len(path)-1]++ + return path + } +} + +// LedgerLiveIterator creates a bip44 path iterator for Ledger Live. +// Ledger Live increments the third component rather than the fifth component +// i.e. m/44'/60'/0'/0/0, m/44'/60'/1'/0/0, m/44'/60'/2'/0/0, ... m/44'/60'/N'/0/0. +func LedgerLiveIterator(base DerivationPath) func() DerivationPath { + path := make(DerivationPath, len(base)) + copy(path[:], base[:]) + // Set it back by one, so the first call gives the first result + path[2]-- + return func() DerivationPath { + // ledgerLivePathIterator iterates on the third component + path[2]++ + return path + } +} diff --git a/accounts/hd_test.go b/accounts/hd_test.go index 3156a487e..0743bbe66 100644 --- a/accounts/hd_test.go +++ b/accounts/hd_test.go @@ -17,6 +17,7 @@ package accounts import ( + "fmt" "reflect" "testing" ) @@ -77,3 +78,41 @@ func TestHDPathParsing(t *testing.T) { } } } + +func testDerive(t *testing.T, next func() DerivationPath, expected []string) { + t.Helper() + for i, want := range expected { + if have := next(); fmt.Sprintf("%v", have) != want { + t.Errorf("step %d, have %v, want %v", i, have, want) + } + } +} + +func TestHdPathIteration(t *testing.T) { + testDerive(t, DefaultIterator(DefaultBaseDerivationPath), + []string{ + "m/44'/60'/0'/0/0", "m/44'/60'/0'/0/1", + "m/44'/60'/0'/0/2", "m/44'/60'/0'/0/3", + "m/44'/60'/0'/0/4", "m/44'/60'/0'/0/5", + "m/44'/60'/0'/0/6", "m/44'/60'/0'/0/7", + "m/44'/60'/0'/0/8", "m/44'/60'/0'/0/9", + }) + + testDerive(t, DefaultIterator(LegacyLedgerBaseDerivationPath), + []string{ + "m/44'/60'/0'/0", "m/44'/60'/0'/1", + "m/44'/60'/0'/2", "m/44'/60'/0'/3", + "m/44'/60'/0'/4", "m/44'/60'/0'/5", + "m/44'/60'/0'/6", "m/44'/60'/0'/7", + "m/44'/60'/0'/8", "m/44'/60'/0'/9", + }) + + testDerive(t, LedgerLiveIterator(DefaultBaseDerivationPath), + []string{ + "m/44'/60'/0'/0/0", "m/44'/60'/1'/0/0", + "m/44'/60'/2'/0/0", "m/44'/60'/3'/0/0", + "m/44'/60'/4'/0/0", "m/44'/60'/5'/0/0", + "m/44'/60'/6'/0/0", "m/44'/60'/7'/0/0", + "m/44'/60'/8'/0/0", "m/44'/60'/9'/0/0", + }) +} diff --git a/accounts/keystore/account_cache.go b/accounts/keystore/account_cache.go index 8f660e282..a3ec6e9c5 100644 --- a/accounts/keystore/account_cache.go +++ b/accounts/keystore/account_cache.go @@ -262,7 +262,7 @@ func (ac *accountCache) scanAccounts() error { switch { case err != nil: log.Debug("Failed to decode keystore key", "path", path, "err", err) - case (addr == common.Address{}): + case addr == common.Address{}: log.Debug("Failed to decode keystore key", "path", path, "err", "missing or zero address") default: return &accounts.Account{ diff --git a/accounts/keystore/key.go b/accounts/keystore/key.go index 84d8df0c5..2b815ce0f 100644 --- a/accounts/keystore/key.go +++ b/accounts/keystore/key.go @@ -32,7 +32,7 @@ import ( "github.com/ethereum/go-ethereum/accounts" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/crypto" - "github.com/pborman/uuid" + "github.com/google/uuid" ) const ( @@ -110,7 +110,10 @@ func (k *Key) UnmarshalJSON(j []byte) (err error) { } u := new(uuid.UUID) - *u = uuid.Parse(keyJSON.Id) + *u, err = uuid.Parse(keyJSON.Id) + if err != nil { + return err + } k.Id = *u addr, err := hex.DecodeString(keyJSON.Address) if err != nil { @@ -128,7 +131,10 @@ func (k *Key) UnmarshalJSON(j []byte) (err error) { } func newKeyFromECDSA(privateKeyECDSA *ecdsa.PrivateKey) *Key { - id := uuid.NewRandom() + id, err := uuid.NewRandom() + if err != nil { + panic(fmt.Sprintf("Could not create random uuid: %v", err)) + } key := &Key{ Id: id, Address: crypto.PubkeyToAddress(privateKeyECDSA.PublicKey), diff --git a/accounts/keystore/keystore.go b/accounts/keystore/keystore.go index 9d5e2cf6a..88dcfbeb6 100644 --- a/accounts/keystore/keystore.go +++ b/accounts/keystore/keystore.go @@ -283,11 +283,9 @@ func (ks *KeyStore) SignTx(a accounts.Account, tx *types.Transaction, chainID *b if !found { return nil, ErrLocked } - // Depending on the presence of the chain ID, sign with EIP155 or homestead - if chainID != nil { - return types.SignTx(tx, types.NewEIP155Signer(chainID), unlockedKey.PrivateKey) - } - return types.SignTx(tx, types.HomesteadSigner{}, unlockedKey.PrivateKey) + // Depending on the presence of the chain ID, sign with 2718 or homestead + signer := types.LatestSignerForChainID(chainID) + return types.SignTx(tx, signer, unlockedKey.PrivateKey) } // SignHashWithPassphrase signs hash if the private key matching the given address @@ -310,12 +308,9 @@ func (ks *KeyStore) SignTxWithPassphrase(a accounts.Account, passphrase string, return nil, err } defer zeroKey(key.PrivateKey) - - // Depending on the presence of the chain ID, sign with EIP155 or homestead - if chainID != nil { - return types.SignTx(tx, types.NewEIP155Signer(chainID), key.PrivateKey) - } - return types.SignTx(tx, types.HomesteadSigner{}, key.PrivateKey) + // Depending on the presence of the chain ID, sign with or without replay protection. + signer := types.LatestSignerForChainID(chainID) + return types.SignTx(tx, signer, key.PrivateKey) } // Unlock unlocks the given account indefinitely. diff --git a/accounts/keystore/passphrase.go b/accounts/keystore/passphrase.go index 89cdf0bfc..3b3e63188 100644 --- a/accounts/keystore/passphrase.go +++ b/accounts/keystore/passphrase.go @@ -42,7 +42,7 @@ import ( "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common/math" "github.com/ethereum/go-ethereum/crypto" - "github.com/pborman/uuid" + "github.com/google/uuid" "golang.org/x/crypto/pbkdf2" "golang.org/x/crypto/scrypt" ) @@ -228,9 +228,12 @@ func DecryptKey(keyjson []byte, auth string) (*Key, error) { return nil, err } key := crypto.ToECDSAUnsafe(keyBytes) - + id, err := uuid.FromBytes(keyId) + if err != nil { + return nil, err + } return &Key{ - Id: uuid.UUID(keyId), + Id: id, Address: crypto.PubkeyToAddress(key.PublicKey), PrivateKey: key, }, nil @@ -276,7 +279,11 @@ func decryptKeyV3(keyProtected *encryptedKeyJSONV3, auth string) (keyBytes []byt if keyProtected.Version != version { return nil, nil, fmt.Errorf("version not supported: %v", keyProtected.Version) } - keyId = uuid.Parse(keyProtected.Id) + keyUUID, err := uuid.Parse(keyProtected.Id) + if err != nil { + return nil, nil, err + } + keyId = keyUUID[:] plainText, err := DecryptDataV3(keyProtected.Crypto, auth) if err != nil { return nil, nil, err @@ -285,7 +292,11 @@ func decryptKeyV3(keyProtected *encryptedKeyJSONV3, auth string) (keyBytes []byt } func decryptKeyV1(keyProtected *encryptedKeyJSONV1, auth string) (keyBytes []byte, keyId []byte, err error) { - keyId = uuid.Parse(keyProtected.Id) + keyUUID, err := uuid.Parse(keyProtected.Id) + if err != nil { + return nil, nil, err + } + keyId = keyUUID[:] mac, err := hex.DecodeString(keyProtected.Crypto.MAC) if err != nil { return nil, nil, err diff --git a/accounts/keystore/presale.go b/accounts/keystore/presale.go index 03055245f..0664dc2cd 100644 --- a/accounts/keystore/presale.go +++ b/accounts/keystore/presale.go @@ -27,7 +27,7 @@ import ( "github.com/ethereum/go-ethereum/accounts" "github.com/ethereum/go-ethereum/crypto" - "github.com/pborman/uuid" + "github.com/google/uuid" "golang.org/x/crypto/pbkdf2" ) @@ -37,7 +37,10 @@ func importPreSaleKey(keyStore keyStore, keyJSON []byte, password string) (accou if err != nil { return accounts.Account{}, nil, err } - key.Id = uuid.NewRandom() + key.Id, err = uuid.NewRandom() + if err != nil { + return accounts.Account{}, nil, err + } a := accounts.Account{ Address: key.Address, URL: accounts.URL{ @@ -86,7 +89,7 @@ func decryptPreSaleKey(fileContent []byte, password string) (key *Key, err error ecKey := crypto.ToECDSAUnsafe(ethPriv) key = &Key{ - Id: nil, + Id: uuid.UUID{}, Address: crypto.PubkeyToAddress(ecKey.PublicKey), PrivateKey: ecKey, } diff --git a/accounts/keystore/wallet.go b/accounts/keystore/wallet.go index 498067d49..1066095f6 100644 --- a/accounts/keystore/wallet.go +++ b/accounts/keystore/wallet.go @@ -19,7 +19,7 @@ package keystore import ( "math/big" - ethereum "github.com/ethereum/go-ethereum" + "github.com/ethereum/go-ethereum" "github.com/ethereum/go-ethereum/accounts" "github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/crypto" @@ -58,7 +58,7 @@ func (w *keystoreWallet) Open(passphrase string) error { return nil } func (w *keystoreWallet) Close() error { return nil } // Accounts implements accounts.Wallet, returning an account list consisting of -// a single account that the plain kestore wallet contains. +// a single account that the plain keystore wallet contains. func (w *keystoreWallet) Accounts() []accounts.Account { return []accounts.Account{w.account} } @@ -93,12 +93,12 @@ func (w *keystoreWallet) signHash(account accounts.Account, hash []byte) ([]byte return w.keystore.SignHash(account, hash) } -// SignData signs keccak256(data). The mimetype parameter describes the type of data being signed +// SignData signs keccak256(data). The mimetype parameter describes the type of data being signed. func (w *keystoreWallet) SignData(account accounts.Account, mimeType string, data []byte) ([]byte, error) { return w.signHash(account, crypto.Keccak256(data)) } -// SignDataWithPassphrase signs keccak256(data). The mimetype parameter describes the type of data being signed +// SignDataWithPassphrase signs keccak256(data). The mimetype parameter describes the type of data being signed. func (w *keystoreWallet) SignDataWithPassphrase(account accounts.Account, passphrase, mimeType string, data []byte) ([]byte, error) { // Make sure the requested account is contained within if !w.Contains(account) { @@ -108,12 +108,14 @@ func (w *keystoreWallet) SignDataWithPassphrase(account accounts.Account, passph return w.keystore.SignHashWithPassphrase(account, passphrase, crypto.Keccak256(data)) } +// SignText implements accounts.Wallet, attempting to sign the hash of +// the given text with the given account. func (w *keystoreWallet) SignText(account accounts.Account, text []byte) ([]byte, error) { return w.signHash(account, accounts.TextHash(text)) } // SignTextWithPassphrase implements accounts.Wallet, attempting to sign the -// given hash with the given account using passphrase as extra authentication. +// hash of the given text with the given account using passphrase as extra authentication. func (w *keystoreWallet) SignTextWithPassphrase(account accounts.Account, passphrase string, text []byte) ([]byte, error) { // Make sure the requested account is contained within if !w.Contains(account) { diff --git a/accounts/scwallet/README.md b/accounts/scwallet/README.md index cfca916b3..4313d9c6b 100644 --- a/accounts/scwallet/README.md +++ b/accounts/scwallet/README.md @@ -31,12 +31,16 @@ Write down the URL (`keycard://044def09` in this example). Then ask `geth` to open the wallet: ``` - > personal.openWallet("keycard://044def09") - Please enter the pairing password: + > personal.openWallet("keycard://044def09", "pairing password") ``` - Enter the pairing password that you have received during card initialization. Same with the PIN that you will subsequently be - asked for. + The pairing password has been generated during the card initialization process. + + The process needs to be repeated once more with the PIN: + + ``` + > personal.openWallet("keycard://044def09", "PIN number") + ``` If everything goes well, you should see your new account when typing `personal` on the console: diff --git a/accounts/scwallet/securechannel.go b/accounts/scwallet/securechannel.go index 9b70c69dc..10887a8b4 100644 --- a/accounts/scwallet/securechannel.go +++ b/accounts/scwallet/securechannel.go @@ -20,6 +20,7 @@ import ( "bytes" "crypto/aes" "crypto/cipher" + "crypto/elliptic" "crypto/rand" "crypto/sha256" "crypto/sha512" @@ -27,7 +28,6 @@ import ( "github.com/ethereum/go-ethereum/crypto" pcsc "github.com/gballet/go-libpcsclite" - "github.com/wsddn/go-ecdh" "golang.org/x/crypto/pbkdf2" "golang.org/x/text/unicode/norm" ) @@ -63,26 +63,19 @@ type SecureChannelSession struct { // NewSecureChannelSession creates a new secure channel for the given card and public key. func NewSecureChannelSession(card *pcsc.Card, keyData []byte) (*SecureChannelSession, error) { // Generate an ECDSA keypair for ourselves - gen := ecdh.NewEllipticECDH(crypto.S256()) - private, public, err := gen.GenerateKey(rand.Reader) + key, err := crypto.GenerateKey() if err != nil { return nil, err } - - cardPublic, ok := gen.Unmarshal(keyData) - if !ok { - return nil, fmt.Errorf("could not unmarshal public key from card") - } - - secret, err := gen.GenerateSharedSecret(private, cardPublic) + cardPublic, err := crypto.UnmarshalPubkey(keyData) if err != nil { - return nil, err + return nil, fmt.Errorf("could not unmarshal public key from card: %v", err) } - + secret, _ := key.Curve.ScalarMult(cardPublic.X, cardPublic.Y, key.D.Bytes()) return &SecureChannelSession{ card: card, - secret: secret, - publicKey: gen.Marshal(public), + secret: secret.Bytes(), + publicKey: elliptic.Marshal(crypto.S256(), key.PublicKey.X, key.PublicKey.Y), }, nil } diff --git a/accounts/scwallet/wallet.go b/accounts/scwallet/wallet.go index 85fae8c11..b4d229bc0 100644 --- a/accounts/scwallet/wallet.go +++ b/accounts/scwallet/wallet.go @@ -33,7 +33,7 @@ import ( "sync" "time" - ethereum "github.com/ethereum/go-ethereum" + "github.com/ethereum/go-ethereum" "github.com/ethereum/go-ethereum/accounts" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/core/types" @@ -699,7 +699,7 @@ func (w *Wallet) signHash(account accounts.Account, hash []byte) ([]byte, error) // the needed details via SignTxWithPassphrase, or by other means (e.g. unlock // the account in a keystore). func (w *Wallet) SignTx(account accounts.Account, tx *types.Transaction, chainID *big.Int) (*types.Transaction, error) { - signer := types.NewEIP155Signer(chainID) + signer := types.LatestSignerForChainID(chainID) hash := signer.Hash(tx) sig, err := w.signHash(account, hash[:]) if err != nil { diff --git a/accounts/url.go b/accounts/url.go index a5add1021..12a84414a 100644 --- a/accounts/url.go +++ b/accounts/url.go @@ -64,7 +64,7 @@ func (u URL) String() string { func (u URL) TerminalString() string { url := u.String() if len(url) > 32 { - return url[:31] + "…" + return url[:31] + ".." } return url } diff --git a/accounts/usbwallet/ledger.go b/accounts/usbwallet/ledger.go index 64eae64f6..3de3b4091 100644 --- a/accounts/usbwallet/ledger.go +++ b/accounts/usbwallet/ledger.go @@ -52,8 +52,10 @@ const ( ledgerOpRetrieveAddress ledgerOpcode = 0x02 // Returns the public key and Ethereum address for a given BIP 32 path ledgerOpSignTransaction ledgerOpcode = 0x04 // Signs an Ethereum transaction after having the user validate the parameters ledgerOpGetConfiguration ledgerOpcode = 0x06 // Returns specific wallet application configuration + ledgerOpSignTypedMessage ledgerOpcode = 0x0c // Signs an Ethereum message following the EIP 712 specification ledgerP1DirectlyFetchAddress ledgerParam1 = 0x00 // Return address directly from the wallet + ledgerP1InitTypedMessageData ledgerParam1 = 0x00 // First chunk of Typed Message data ledgerP1InitTransactionData ledgerParam1 = 0x00 // First transaction data block for signing ledgerP1ContTransactionData ledgerParam1 = 0x80 // Subsequent transaction data block for signing ledgerP2DiscardAddressChainCode ledgerParam2 = 0x00 // Do not return the chain code along with the address @@ -162,7 +164,7 @@ func (w *ledgerDriver) SignTx(path accounts.DerivationPath, tx *types.Transactio return common.Address{}, nil, accounts.ErrWalletClosed } // Ensure the wallet is capable of signing the given transaction - if chainID != nil && w.version[0] <= 1 && w.version[2] <= 2 { + if chainID != nil && w.version[0] <= 1 && w.version[1] <= 0 && w.version[2] <= 2 { //lint:ignore ST1005 brand name displayed on the console return common.Address{}, nil, fmt.Errorf("Ledger v%d.%d.%d doesn't support signing this transaction, please update to v1.0.3 at least", w.version[0], w.version[1], w.version[2]) } @@ -170,6 +172,24 @@ func (w *ledgerDriver) SignTx(path accounts.DerivationPath, tx *types.Transactio return w.ledgerSign(path, tx, chainID) } +// SignTypedMessage implements usbwallet.driver, sending the message to the Ledger and +// waiting for the user to sign or deny the transaction. +// +// Note: this was introduced in the ledger 1.5.0 firmware +func (w *ledgerDriver) SignTypedMessage(path accounts.DerivationPath, domainHash []byte, messageHash []byte) ([]byte, error) { + // If the Ethereum app doesn't run, abort + if w.offline() { + return nil, accounts.ErrWalletClosed + } + // Ensure the wallet is capable of signing the given transaction + if w.version[0] < 1 && w.version[1] < 5 { + //lint:ignore ST1005 brand name displayed on the console + return nil, fmt.Errorf("Ledger version >= 1.5.0 required for EIP-712 signing (found version v%d.%d.%d)", w.version[0], w.version[1], w.version[2]) + } + // All infos gathered and metadata checks out, request signing + return w.ledgerSignTypedMessage(path, domainHash, messageHash) +} + // ledgerVersion retrieves the current version of the Ethereum wallet app running // on the Ledger wallet. // @@ -367,6 +387,68 @@ func (w *ledgerDriver) ledgerSign(derivationPath []uint32, tx *types.Transaction return sender, signed, nil } +// ledgerSignTypedMessage sends the transaction to the Ledger wallet, and waits for the user +// to confirm or deny the transaction. +// +// The signing protocol is defined as follows: +// +// CLA | INS | P1 | P2 | Lc | Le +// ----+-----+----+-----------------------------+-----+--- +// E0 | 0C | 00 | implementation version : 00 | variable | variable +// +// Where the input is: +// +// Description | Length +// -------------------------------------------------+---------- +// Number of BIP 32 derivations to perform (max 10) | 1 byte +// First derivation index (big endian) | 4 bytes +// ... | 4 bytes +// Last derivation index (big endian) | 4 bytes +// domain hash | 32 bytes +// message hash | 32 bytes +// +// +// +// And the output data is: +// +// Description | Length +// ------------+--------- +// signature V | 1 byte +// signature R | 32 bytes +// signature S | 32 bytes +func (w *ledgerDriver) ledgerSignTypedMessage(derivationPath []uint32, domainHash []byte, messageHash []byte) ([]byte, error) { + // Flatten the derivation path into the Ledger request + path := make([]byte, 1+4*len(derivationPath)) + path[0] = byte(len(derivationPath)) + for i, component := range derivationPath { + binary.BigEndian.PutUint32(path[1+4*i:], component) + } + // Create the 712 message + payload := append(path, domainHash...) + payload = append(payload, messageHash...) + + // Send the request and wait for the response + var ( + op = ledgerP1InitTypedMessageData + reply []byte + err error + ) + + // Send the message over, ensuring it's processed correctly + reply, err = w.ledgerExchange(ledgerOpSignTypedMessage, op, 0, payload) + + if err != nil { + return nil, err + } + + // Extract the Ethereum signature and do a sanity validation + if len(reply) != crypto.SignatureLength { + return nil, errors.New("reply lacks signature") + } + signature := append(reply[1:], reply[0]) + return signature, nil +} + // ledgerExchange performs a data exchange with the Ledger wallet, sending it a // message and retrieving the response. // diff --git a/accounts/usbwallet/trezor.go b/accounts/usbwallet/trezor.go index 1892097ba..c2182b88d 100644 --- a/accounts/usbwallet/trezor.go +++ b/accounts/usbwallet/trezor.go @@ -185,6 +185,10 @@ func (w *trezorDriver) SignTx(path accounts.DerivationPath, tx *types.Transactio return w.trezorSign(path, tx, chainID) } +func (w *trezorDriver) SignTypedMessage(path accounts.DerivationPath, domainHash []byte, messageHash []byte) ([]byte, error) { + return nil, accounts.ErrNotSupported +} + // trezorDerive sends a derivation request to the Trezor device and returns the // Ethereum address located on that path. func (w *trezorDriver) trezorDerive(derivationPath []uint32) (common.Address, error) { @@ -255,9 +259,11 @@ func (w *trezorDriver) trezorSign(derivationPath []uint32, tx *types.Transaction if chainID == nil { signer = new(types.HomesteadSigner) } else { + // Trezor backend does not support typed transactions yet. signer = types.NewEIP155Signer(chainID) signature[64] -= byte(chainID.Uint64()*2 + 35) } + // Inject the final signature into the transaction and sanity check the sender signed, err := tx.WithSignature(signer, signature) if err != nil { diff --git a/accounts/usbwallet/wallet.go b/accounts/usbwallet/wallet.go index e39c6bdf3..b6f181448 100644 --- a/accounts/usbwallet/wallet.go +++ b/accounts/usbwallet/wallet.go @@ -25,7 +25,7 @@ import ( "sync" "time" - ethereum "github.com/ethereum/go-ethereum" + "github.com/ethereum/go-ethereum" "github.com/ethereum/go-ethereum/accounts" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/core/types" @@ -67,6 +67,8 @@ type driver interface { // SignTx sends the transaction to the USB device and waits for the user to confirm // or deny the transaction. SignTx(path accounts.DerivationPath, tx *types.Transaction, chainID *big.Int) (common.Address, *types.Transaction, error) + + SignTypedMessage(path accounts.DerivationPath, messageHash []byte, domainHash []byte) ([]byte, error) } // wallet represents the common functionality shared by all USB hardware @@ -524,7 +526,46 @@ func (w *wallet) signHash(account accounts.Account, hash []byte) ([]byte, error) // SignData signs keccak256(data). The mimetype parameter describes the type of data being signed func (w *wallet) SignData(account accounts.Account, mimeType string, data []byte) ([]byte, error) { - return w.signHash(account, crypto.Keccak256(data)) + + // Unless we are doing 712 signing, simply dispatch to signHash + if !(mimeType == accounts.MimetypeTypedData && len(data) == 66 && data[0] == 0x19 && data[1] == 0x01) { + return w.signHash(account, crypto.Keccak256(data)) + } + + // dispatch to 712 signing if the mimetype is TypedData and the format matches + w.stateLock.RLock() // Comms have own mutex, this is for the state fields + defer w.stateLock.RUnlock() + + // If the wallet is closed, abort + if w.device == nil { + return nil, accounts.ErrWalletClosed + } + // Make sure the requested account is contained within + path, ok := w.paths[account.Address] + if !ok { + return nil, accounts.ErrUnknownAccount + } + // All infos gathered and metadata checks out, request signing + <-w.commsLock + defer func() { w.commsLock <- struct{}{} }() + + // Ensure the device isn't screwed with while user confirmation is pending + // TODO(karalabe): remove if hotplug lands on Windows + w.hub.commsLock.Lock() + w.hub.commsPend++ + w.hub.commsLock.Unlock() + + defer func() { + w.hub.commsLock.Lock() + w.hub.commsPend-- + w.hub.commsLock.Unlock() + }() + // Sign the transaction + signature, err := w.driver.SignTypedMessage(path, data[2:34], data[34:66]) + if err != nil { + return nil, err + } + return signature, nil } // SignDataWithPassphrase implements accounts.Wallet, attempting to sign the given diff --git a/appveyor.yml b/appveyor.yml index 7d6bf8763..a72163382 100644 --- a/appveyor.yml +++ b/appveyor.yml @@ -1,41 +1,29 @@ -os: Visual Studio 2015 - -# Clone directly into GOPATH. -clone_folder: C:\gopath\src\github.com\ethereum\go-ethereum +os: Visual Studio 2019 clone_depth: 5 version: "{branch}.{build}" environment: - global: - GO111MODULE: on - GOPATH: C:\gopath - CC: gcc.exe matrix: + # We use gcc from MSYS2 because it is the most recent compiler version available on + # AppVeyor. Note: gcc.exe only works properly if the corresponding bin/ directory is + # contained in PATH. - GETH_ARCH: amd64 - MSYS2_ARCH: x86_64 - MSYS2_BITS: 64 - MSYSTEM: MINGW64 - PATH: C:\msys64\mingw64\bin\;C:\Program Files (x86)\NSIS\;%PATH% + GETH_CC: C:\msys64\mingw64\bin\gcc.exe + PATH: C:\msys64\mingw64\bin;C:\Program Files (x86)\NSIS\;%PATH% - GETH_ARCH: 386 - MSYS2_ARCH: i686 - MSYS2_BITS: 32 - MSYSTEM: MINGW32 - PATH: C:\msys64\mingw32\bin\;C:\Program Files (x86)\NSIS\;%PATH% + GETH_CC: C:\msys64\mingw32\bin\gcc.exe + PATH: C:\msys64\mingw32\bin;C:\Program Files (x86)\NSIS\;%PATH% install: - - git submodule update --init - - rmdir C:\go /s /q - - appveyor DownloadFile https://dl.google.com/go/go1.15.windows-%GETH_ARCH%.zip - - 7z x go1.15.windows-%GETH_ARCH%.zip -y -oC:\ > NUL + - git submodule update --init --depth 1 - go version - - gcc --version + - "%GETH_CC% --version" build_script: - - go run build\ci.go install + - go run build\ci.go install -dlgo -arch %GETH_ARCH% -cc %GETH_CC% after_build: - - go run build\ci.go archive -type zip -signer WINDOWS_SIGNING_KEY -upload gethstore/builds - - go run build\ci.go nsis -signer WINDOWS_SIGNING_KEY -upload gethstore/builds + - go run build\ci.go archive -arch %GETH_ARCH% -type zip -signer WINDOWS_SIGNING_KEY -upload gethstore/builds + - go run build\ci.go nsis -arch %GETH_ARCH% -signer WINDOWS_SIGNING_KEY -upload gethstore/builds test_script: - - set CGO_ENABLED=1 - - go run build\ci.go test -coverage + - go run build\ci.go test -dlgo -arch %GETH_ARCH% -cc %GETH_CC% -coverage diff --git a/build/checksums.txt b/build/checksums.txt index 39f855cd0..e667b30ce 100644 --- a/build/checksums.txt +++ b/build/checksums.txt @@ -1,20 +1,33 @@ # This file contains sha256 checksums of optional build dependencies. -69438f7ed4f532154ffaf878f3dfd83747e7a00b70b3556eddabf7aaee28ac3a go1.15.src.tar.gz +ae4f6b6e2a1677d31817984655a762074b5356da50fb58722b99104870d43503 go1.16.4.src.tar.gz +18fe94775763db3878717393b6d41371b0b45206055e49b3838328120c977d13 go1.16.4.darwin-amd64.tar.gz +cb6b972cc42e669f3585c648198cd5b6f6d7a0811d413ad64b50c02ba06ccc3a go1.16.4.darwin-arm64.tar.gz +cd1b146ef6e9006f27dd99e9687773e7fef30e8c985b7d41bff33e955a3bb53a go1.16.4.linux-386.tar.gz +7154e88f5a8047aad4b80ebace58a059e36e7e2e4eb3b383127a28c711b4ff59 go1.16.4.linux-amd64.tar.gz +8b18eb05ddda2652d69ab1b1dd1f40dd731799f43c6a58b512ad01ae5b5bba21 go1.16.4.linux-arm64.tar.gz +a53391a800ddec749ee90d38992babb27b95cfb864027350c737b9aa8e069494 go1.16.4.linux-armv6l.tar.gz +e75c0b114a09eb5499874162b208931dc260de0fedaeedac8621bf263c974605 go1.16.4.windows-386.zip +d40139b7ade8a3008e3240a6f86fe8f899a9c465c917e11dac8758af216f5eb0 go1.16.4.windows-amd64.zip +7cf2bc8a175d6d656861165bfc554f92dc78d2abf5afe5631db3579555d97409 go1.16.4.freebsd-386.tar.gz +ccdd2b76de1941b60734408fda0d750aaa69330d8a07430eed4c56bdb3502f6f go1.16.4.freebsd-amd64.tar.gz +80cfac566e344096a8df8f37bbd21f89e76a6fbe601406565d71a87a665fc125 go1.16.4.linux-ppc64le.tar.gz +d6431881b3573dc29ecc24fbeab5e5ec25d8c9273aa543769c86a1a3bbac1ddf go1.16.4.linux-s390x.tar.gz -d998a84eea42f2271aca792a7b027ca5c1edfcba229e8e5a844c9ac3f336df35 golangci-lint-1.27.0-linux-armv7.tar.gz -bf781f05b0d393b4bf0a327d9e62926949a4f14d7774d950c4e009fc766ed1d4 golangci-lint.exe-1.27.0-windows-amd64.zip -bf781f05b0d393b4bf0a327d9e62926949a4f14d7774d950c4e009fc766ed1d4 golangci-lint-1.27.0-windows-amd64.zip -0e2a57d6ba709440d3ed018ef1037465fa010ed02595829092860e5cf863042e golangci-lint-1.27.0-freebsd-386.tar.gz -90205fc42ab5ed0096413e790d88ac9b4ed60f4c47e576d13dc0660f7ed4b013 golangci-lint-1.27.0-linux-arm64.tar.gz -8d345e4e88520e21c113d81978e89ad77fc5b13bfdf20e5bca86b83fc4261272 golangci-lint-1.27.0-linux-amd64.tar.gz -cc619634a77f18dc73df2a0725be13116d64328dc35131ca1737a850d6f76a59 golangci-lint-1.27.0-freebsd-armv7.tar.gz -fe683583cfc9eeec83e498c0d6159d87b5e1919dbe4b6c3b3913089642906069 golangci-lint-1.27.0-linux-s390x.tar.gz -058f5579bee75bdaacbaf75b75e1369f7ad877fd8b3b145aed17a17545de913e golangci-lint-1.27.0-freebsd-armv6.tar.gz -38e1e3dadbe3f56ab62b4de82ee0b88e8fad966d8dfd740a26ef94c2edef9818 golangci-lint-1.27.0-linux-armv6.tar.gz -071b34af5516f4e1ddcaea6011e18208f4f043e1af8ba21eeccad4585cb3d095 golangci-lint.exe-1.27.0-windows-386.zip -071b34af5516f4e1ddcaea6011e18208f4f043e1af8ba21eeccad4585cb3d095 golangci-lint-1.27.0-windows-386.zip -5f37e2b33914ecddb7cad38186ef4ec61d88172fc04f930fa0267c91151ff306 golangci-lint-1.27.0-linux-386.tar.gz -4d94cfb51fdebeb205f1d5a349ac2b683c30591c5150708073c1c329e15965f0 golangci-lint-1.27.0-freebsd-amd64.tar.gz -52572ba8ff07d5169c2365d3de3fec26dc55a97522094d13d1596199580fa281 golangci-lint-1.27.0-linux-ppc64le.tar.gz -3fb1a1683a29c6c0a8cd76135f62b606fbdd538d5a7aeab94af1af70ffdc2fd4 golangci-lint-1.27.0-darwin-amd64.tar.gz +7e9a47ab540aa3e8472fbf8120d28bed3b9d9cf625b955818e8bc69628d7187c golangci-lint-1.39.0-darwin-amd64.tar.gz +574daa2c9c299b01672a6daeb1873b5f12e413cdb6dc0e30f2ff163956778064 golangci-lint-1.39.0-darwin-arm64.tar.gz +6225f7014987324ab78e9b511f294e3f25be013728283c33918c67c8576d543e golangci-lint-1.39.0-freebsd-386.tar.gz +6b3e76e1e5eaf0159411c8e2727f8d533989d3bb19f10e9caa6e0b9619ee267d golangci-lint-1.39.0-freebsd-amd64.tar.gz +a301cacfff87ed9b00313d95278533c25a4527a06b040a17d969b4b7e1b8a90d golangci-lint-1.39.0-freebsd-armv7.tar.gz +25bfd96a29c3112f508d5e4fc860dbad7afce657233c343acfa20715717d51e7 golangci-lint-1.39.0-freebsd-armv6.tar.gz +9687e4ff15545cfc722b0e46107a94195166a505023b48a316579af25ad09505 golangci-lint-1.39.0-linux-armv7.tar.gz +a7fa7ab2bfc99cbe5e5bcbf5684f5a997f920afbbe2f253d2feb1001d5e3c8b3 golangci-lint-1.39.0-linux-armv6.tar.gz +c8f9634115beddb4ed9129c1f7ecd4c97c99d07aeef33e3707234097eeb51b7b golangci-lint-1.39.0-linux-mips64le.tar.gz +d1234c213b74751f1af413302dde0e9a6d4d29aecef034af7abb07dc1b6e887f golangci-lint-1.39.0-linux-arm64.tar.gz +df25d9267168323b163147acb823ab0215a8a3bb6898a4a9320afdfedde66817 golangci-lint-1.39.0-linux-386.tar.gz +1767e75fba357b7651b1a796d38453558f371c60af805505ec99e166908c04b5 golangci-lint-1.39.0-linux-ppc64le.tar.gz +25fd75bf3186b3d930ecae10185689968fd18fd8fa6f9f555d6beb04348c20f6 golangci-lint-1.39.0-linux-s390x.tar.gz +3a73aa7468087caa62673c8adea99b4e4dff846dc72707222db85f8679b40cbf golangci-lint-1.39.0-linux-amd64.tar.gz +578caceccf81739bda67dbfec52816709d03608c6878888ecdc0e186a094a41b golangci-lint-1.39.0-linux-mips64.tar.gz +494b66ba0e32c8ddf6c4f6b1d05729b110900f6017eda943057e43598c17d7a8 golangci-lint-1.39.0-windows-386.zip +52ec2e13a3cbb47147244dff8cfc35103563deb76e0459133058086fc35fb2c7 golangci-lint-1.39.0-windows-amd64.zip diff --git a/build/ci.go b/build/ci.go index ab153eb85..f99074d95 100644 --- a/build/ci.go +++ b/build/ci.go @@ -26,7 +26,7 @@ Available commands are: install [ -arch architecture ] [ -cc compiler ] [ packages... ] -- builds packages and executables test [ -coverage ] [ packages... ] -- runs the tests lint -- runs certain pre-selected linters - archive [ -arch architecture ] [ -type zip|tar ] [ -signer key-envvar ] [ -upload dest ] -- archives build artifacts + archive [ -arch architecture ] [ -type zip|tar ] [ -signer key-envvar ] [ -signify key-envvar ] [ -upload dest ] -- archives build artifacts importkeys -- imports signing keys from env debsrc [ -signer key-id ] [ -upload dest ] -- creates a debian source package nsis -- creates a Windows NSIS installer @@ -46,19 +46,20 @@ import ( "encoding/base64" "flag" "fmt" - "go/parser" - "go/token" "io/ioutil" "log" "os" "os/exec" + "path" "path/filepath" "regexp" "runtime" + "strconv" "strings" "time" "github.com/cespare/cp" + "github.com/ethereum/go-ethereum/crypto/signify" "github.com/ethereum/go-ethereum/internal/build" "github.com/ethereum/go-ethereum/params" ) @@ -115,7 +116,6 @@ var ( } // A debian package is created for all executables listed here. - debEthereum = debPackage{ Name: "ethereum", Version: params.Version, @@ -135,19 +135,25 @@ var ( // Note: artful is unsupported because it was officially deprecated on Launchpad. // Note: cosmic is unsupported because it was officially deprecated on Launchpad. // Note: disco is unsupported because it was officially deprecated on Launchpad. + // Note: eoan is unsupported because it was officially deprecated on Launchpad. debDistroGoBoots = map[string]string{ - "trusty": "golang-1.11", - "xenial": "golang-go", - "bionic": "golang-go", - "eoan": "golang-go", - "focal": "golang-go", - "groovy": "golang-go", + "trusty": "golang-1.11", + "xenial": "golang-go", + "bionic": "golang-go", + "focal": "golang-go", + "groovy": "golang-go", + "hirsute": "golang-go", } debGoBootPaths = map[string]string{ "golang-1.11": "/usr/lib/go-1.11", "golang-go": "/usr/lib/go", } + + // This is the version of go that will be downloaded by + // + // go run ci.go install -dlgo + dlgoVersion = "1.16.4" ) var GOBIN, _ = filepath.Abs(filepath.Join("build", "bin")) @@ -177,6 +183,8 @@ func main() { doLint(os.Args[2:]) case "archive": doArchive(os.Args[2:]) + case "docker": + doDocker(os.Args[2:]) case "debsrc": doDebianSource(os.Args[2:]) case "nsis": @@ -198,131 +206,95 @@ func main() { func doInstall(cmdline []string) { var ( + dlgo = flag.Bool("dlgo", false, "Download Go and build with it") arch = flag.String("arch", "", "Architecture to cross build for") cc = flag.String("cc", "", "C compiler to cross build with") ) flag.CommandLine.Parse(cmdline) - env := build.Env() - // Check Go version. People regularly open issues about compilation - // failure with outdated Go. This should save them the trouble. - if !strings.Contains(runtime.Version(), "devel") { - // Figure out the minor version number since we can't textually compare (1.10 < 1.9) - var minor int - fmt.Sscanf(strings.TrimPrefix(runtime.Version(), "go1."), "%d", &minor) - - if minor < 13 { - log.Println("You have Go version", runtime.Version()) - log.Println("go-ethereum requires at least Go version 1.13 and cannot") - log.Println("be compiled with an earlier version. Please upgrade your Go installation.") - os.Exit(1) - } - } - // Compile packages given as arguments, or everything if there are no arguments. - packages := []string{"./..."} - if flag.NArg() > 0 { - packages = flag.Args() + // Configure the toolchain. + tc := build.GoToolchain{GOARCH: *arch, CC: *cc} + if *dlgo { + csdb := build.MustLoadChecksums("build/checksums.txt") + tc.Root = build.DownloadGo(csdb, dlgoVersion) } - if *arch == "" || *arch == runtime.GOARCH { - goinstall := goTool("install", buildFlags(env)...) - if runtime.GOARCH == "arm64" { - goinstall.Args = append(goinstall.Args, "-p", "1") - } - goinstall.Args = append(goinstall.Args, "-trimpath") - goinstall.Args = append(goinstall.Args, "-v") - goinstall.Args = append(goinstall.Args, packages...) - build.MustRun(goinstall) - return + // Configure the build. + env := build.Env() + gobuild := tc.Go("build", buildFlags(env)...) + + // arm64 CI builders are memory-constrained and can't handle concurrent builds, + // better disable it. This check isn't the best, it should probably + // check for something in env instead. + if env.CI && runtime.GOARCH == "arm64" { + gobuild.Args = append(gobuild.Args, "-p", "1") } - // Seems we are cross compiling, work around forbidden GOBIN - goinstall := goToolArch(*arch, *cc, "install", buildFlags(env)...) - goinstall.Args = append(goinstall.Args, "-trimpath") - goinstall.Args = append(goinstall.Args, "-v") - goinstall.Args = append(goinstall.Args, []string{"-buildmode", "archive"}...) - goinstall.Args = append(goinstall.Args, packages...) - build.MustRun(goinstall) + // We use -trimpath to avoid leaking local paths into the built executables. + gobuild.Args = append(gobuild.Args, "-trimpath") - if cmds, err := ioutil.ReadDir("cmd"); err == nil { - for _, cmd := range cmds { - pkgs, err := parser.ParseDir(token.NewFileSet(), filepath.Join(".", "cmd", cmd.Name()), nil, parser.PackageClauseOnly) - if err != nil { - log.Fatal(err) - } - for name := range pkgs { - if name == "main" { - gobuild := goToolArch(*arch, *cc, "build", buildFlags(env)...) - gobuild.Args = append(gobuild.Args, "-v") - gobuild.Args = append(gobuild.Args, []string{"-o", executablePath(cmd.Name())}...) - gobuild.Args = append(gobuild.Args, "."+string(filepath.Separator)+filepath.Join("cmd", cmd.Name())) - build.MustRun(gobuild) - break - } - } - } + // Show packages during build. + gobuild.Args = append(gobuild.Args, "-v") + + // Now we choose what we're even building. + // Default: collect all 'main' packages in cmd/ and build those. + packages := flag.Args() + if len(packages) == 0 { + packages = build.FindMainPackages("./cmd") + } + + // Do the build! + for _, pkg := range packages { + args := make([]string, len(gobuild.Args)) + copy(args, gobuild.Args) + args = append(args, "-o", executablePath(path.Base(pkg))) + args = append(args, pkg) + build.MustRun(&exec.Cmd{Path: gobuild.Path, Args: args, Env: gobuild.Env}) } } +// buildFlags returns the go tool flags for building. func buildFlags(env build.Environment) (flags []string) { var ld []string if env.Commit != "" { ld = append(ld, "-X", "main.gitCommit="+env.Commit) ld = append(ld, "-X", "main.gitDate="+env.Date) } + // Strip DWARF on darwin. This used to be required for certain things, + // and there is no downside to this, so we just keep doing it. if runtime.GOOS == "darwin" { ld = append(ld, "-s") } - if len(ld) > 0 { flags = append(flags, "-ldflags", strings.Join(ld, " ")) } return flags } -func goTool(subcmd string, args ...string) *exec.Cmd { - return goToolArch(runtime.GOARCH, os.Getenv("CC"), subcmd, args...) -} - -func goToolArch(arch string, cc string, subcmd string, args ...string) *exec.Cmd { - cmd := build.GoTool(subcmd, args...) - if arch == "" || arch == runtime.GOARCH { - cmd.Env = append(cmd.Env, "GOBIN="+GOBIN) - } else { - cmd.Env = append(cmd.Env, "CGO_ENABLED=1") - cmd.Env = append(cmd.Env, "GOARCH="+arch) - } - if cc != "" { - cmd.Env = append(cmd.Env, "CC="+cc) - } - for _, e := range os.Environ() { - if strings.HasPrefix(e, "GOBIN=") { - continue - } - cmd.Env = append(cmd.Env, e) - } - return cmd -} - // Running The Tests // // "tests" also includes static analysis tools such as vet. func doTest(cmdline []string) { - coverage := flag.Bool("coverage", false, "Whether to record code coverage") - verbose := flag.Bool("v", false, "Whether to log verbosely") + var ( + dlgo = flag.Bool("dlgo", false, "Download Go and build with it") + arch = flag.String("arch", "", "Run tests for given architecture") + cc = flag.String("cc", "", "Sets C compiler binary") + coverage = flag.Bool("coverage", false, "Whether to record code coverage") + verbose = flag.Bool("v", false, "Whether to log verbosely") + ) flag.CommandLine.Parse(cmdline) - env := build.Env() - packages := []string{"./..."} - if len(flag.CommandLine.Args()) > 0 { - packages = flag.CommandLine.Args() + // Configure the toolchain. + tc := build.GoToolchain{GOARCH: *arch, CC: *cc} + if *dlgo { + csdb := build.MustLoadChecksums("build/checksums.txt") + tc.Root = build.DownloadGo(csdb, dlgoVersion) } + gotest := tc.Go("test") - // Run the actual tests. // Test a single package at a time. CI builders are slow // and some tests run into timeouts under load. - gotest := goTool("test", buildFlags(env)...) gotest.Args = append(gotest.Args, "-p", "1") if *coverage { gotest.Args = append(gotest.Args, "-covermode=atomic", "-cover") @@ -331,6 +303,10 @@ func doTest(cmdline []string) { gotest.Args = append(gotest.Args, "-v") } + packages := []string{"./..."} + if len(flag.CommandLine.Args()) > 0 { + packages = flag.CommandLine.Args() + } gotest.Args = append(gotest.Args, packages...) build.MustRun(gotest) } @@ -354,7 +330,7 @@ func doLint(cmdline []string) { // downloadLinter downloads and unpacks golangci-lint. func downloadLinter(cachedir string) string { - const version = "1.27.0" + const version = "1.39.0" csdb := build.MustLoadChecksums("build/checksums.txt") base := fmt.Sprintf("golangci-lint-%s-%s-%s", version, runtime.GOOS, runtime.GOARCH) @@ -363,7 +339,7 @@ func downloadLinter(cachedir string) string { if err := csdb.DownloadFile(url, archivePath); err != nil { log.Fatal(err) } - if err := build.ExtractTarballArchive(archivePath, cachedir); err != nil { + if err := build.ExtractArchive(archivePath, cachedir); err != nil { log.Fatal(err) } return filepath.Join(cachedir, base, "golangci-lint") @@ -372,11 +348,12 @@ func downloadLinter(cachedir string) string { // Release Packaging func doArchive(cmdline []string) { var ( - arch = flag.String("arch", runtime.GOARCH, "Architecture cross packaging") - atype = flag.String("type", "zip", "Type of archive to write (zip|tar)") - signer = flag.String("signer", "", `Environment variable holding the signing key (e.g. LINUX_SIGNING_KEY)`) - upload = flag.String("upload", "", `Destination to upload the archives (usually "gethstore/builds")`) - ext string + arch = flag.String("arch", runtime.GOARCH, "Architecture cross packaging") + atype = flag.String("type", "zip", "Type of archive to write (zip|tar)") + signer = flag.String("signer", "", `Environment variable holding the signing key (e.g. LINUX_SIGNING_KEY)`) + signify = flag.String("signify", "", `Environment variable holding the signify key (e.g. LINUX_SIGNIFY_KEY)`) + upload = flag.String("upload", "", `Destination to upload the archives (usually "gethstore/builds")`) + ext string ) flag.CommandLine.Parse(cmdline) switch *atype { @@ -389,8 +366,7 @@ func doArchive(cmdline []string) { } var ( - env = build.Env() - + env = build.Env() basegeth = archiveBasename(*arch, params.ArchiveVersion(env.Commit)) geth = "geth-" + basegeth + ext alltools = "geth-alltools-" + basegeth + ext @@ -403,7 +379,7 @@ func doArchive(cmdline []string) { log.Fatal(err) } for _, archive := range []string{geth, alltools} { - if err := archiveUpload(archive, *upload, *signer); err != nil { + if err := archiveUpload(archive, *upload, *signer, *signify); err != nil { log.Fatal(err) } } @@ -423,7 +399,7 @@ func archiveBasename(arch string, archiveVersion string) string { return platform + "-" + archiveVersion } -func archiveUpload(archive string, blobstore string, signer string) error { +func archiveUpload(archive string, blobstore string, signer string, signifyVar string) error { // If signing was requested, generate the signature files if signer != "" { key := getenvBase64(signer) @@ -431,6 +407,14 @@ func archiveUpload(archive string, blobstore string, signer string) error { return err } } + if signifyVar != "" { + key := os.Getenv(signifyVar) + untrustedComment := "verify with geth-release.pub" + trustedComment := fmt.Sprintf("%s (%s)", archive, time.Now().UTC().Format(time.RFC1123)) + if err := signify.SignFile(archive, archive+".sig", key, untrustedComment, trustedComment); err != nil { + return err + } + } // If uploading to Azure was requested, push the archive possibly with its signature if blobstore != "" { auth := build.AzureBlobstoreConfig{ @@ -446,6 +430,11 @@ func archiveUpload(archive string, blobstore string, signer string) error { return err } } + if signifyVar != "" { + if err := build.AzureBlobstoreUpload(archive+".sig", filepath.Base(archive+".sig"), auth); err != nil { + return err + } + } } return nil } @@ -453,33 +442,199 @@ func archiveUpload(archive string, blobstore string, signer string) error { // skips archiving for some build configurations. func maybeSkipArchive(env build.Environment) { if env.IsPullRequest { - log.Printf("skipping because this is a PR build") + log.Printf("skipping archive creation because this is a PR build") os.Exit(0) } if env.IsCronJob { - log.Printf("skipping because this is a cron job") + log.Printf("skipping archive creation because this is a cron job") os.Exit(0) } if env.Branch != "master" && !strings.HasPrefix(env.Tag, "v1.") { - log.Printf("skipping because branch %q, tag %q is not on the whitelist", env.Branch, env.Tag) + log.Printf("skipping archive creation because branch %q, tag %q is not on the inclusion list", env.Branch, env.Tag) os.Exit(0) } } +// Builds the docker images and optionally uploads them to Docker Hub. +func doDocker(cmdline []string) { + var ( + image = flag.Bool("image", false, `Whether to build and push an arch specific docker image`) + manifest = flag.String("manifest", "", `Push a multi-arch docker image for the specified architectures (usually "amd64,arm64")`) + upload = flag.String("upload", "", `Where to upload the docker image (usually "ethereum/client-go")`) + ) + flag.CommandLine.Parse(cmdline) + + // Skip building and pushing docker images for PR builds + env := build.Env() + maybeSkipArchive(env) + + // Retrieve the upload credentials and authenticate + user := getenvBase64("DOCKER_HUB_USERNAME") + pass := getenvBase64("DOCKER_HUB_PASSWORD") + + if len(user) > 0 && len(pass) > 0 { + auther := exec.Command("docker", "login", "-u", string(user), "--password-stdin") + auther.Stdin = bytes.NewReader(pass) + build.MustRun(auther) + } + // Retrieve the version infos to build and push to the following paths: + // - ethereum/client-go:latest - Pushes to the master branch, Geth only + // - ethereum/client-go:stable - Version tag publish on GitHub, Geth only + // - ethereum/client-go:alltools-latest - Pushes to the master branch, Geth & tools + // - ethereum/client-go:alltools-stable - Version tag publish on GitHub, Geth & tools + // - ethereum/client-go:release-. - Version tag publish on GitHub, Geth only + // - ethereum/client-go:alltools-release-. - Version tag publish on GitHub, Geth & tools + // - ethereum/client-go:v.. - Version tag publish on GitHub, Geth only + // - ethereum/client-go:alltools-v.. - Version tag publish on GitHub, Geth & tools + var tags []string + + switch { + case env.Branch == "master": + tags = []string{"latest"} + case strings.HasPrefix(env.Tag, "v1."): + tags = []string{"stable", fmt.Sprintf("release-1.%d", params.VersionMinor), "v" + params.Version} + } + // If architecture specific image builds are requested, build and push them + if *image { + build.MustRunCommand("docker", "build", "--build-arg", "COMMIT="+env.Commit, "--build-arg", "VERSION="+params.VersionWithMeta, "--build-arg", "BUILDNUM="+env.Buildnum, "--tag", fmt.Sprintf("%s:TAG", *upload), ".") + build.MustRunCommand("docker", "build", "--build-arg", "COMMIT="+env.Commit, "--build-arg", "VERSION="+params.VersionWithMeta, "--build-arg", "BUILDNUM="+env.Buildnum, "--tag", fmt.Sprintf("%s:alltools-TAG", *upload), "-f", "Dockerfile.alltools", ".") + + // Tag and upload the images to Docker Hub + for _, tag := range tags { + gethImage := fmt.Sprintf("%s:%s-%s", *upload, tag, runtime.GOARCH) + toolImage := fmt.Sprintf("%s:alltools-%s-%s", *upload, tag, runtime.GOARCH) + + // If the image already exists (non version tag), check the build + // number to prevent overwriting a newer commit if concurrent builds + // are running. This is still a tiny bit racey if two published are + // done at the same time, but that's extremely unlikely even on the + // master branch. + for _, img := range []string{gethImage, toolImage} { + if exec.Command("docker", "pull", img).Run() != nil { + continue // Generally the only failure is a missing image, which is good + } + buildnum, err := exec.Command("docker", "inspect", "--format", "{{index .Config.Labels \"buildnum\"}}", img).CombinedOutput() + if err != nil { + log.Fatalf("Failed to inspect container: %v\nOutput: %s", err, string(buildnum)) + } + buildnum = bytes.TrimSpace(buildnum) + + if len(buildnum) > 0 && len(env.Buildnum) > 0 { + oldnum, err := strconv.Atoi(string(buildnum)) + if err != nil { + log.Fatalf("Failed to parse old image build number: %v", err) + } + newnum, err := strconv.Atoi(env.Buildnum) + if err != nil { + log.Fatalf("Failed to parse current build number: %v", err) + } + if oldnum > newnum { + log.Fatalf("Current build number %d not newer than existing %d", newnum, oldnum) + } else { + log.Printf("Updating %s from build %d to %d", img, oldnum, newnum) + } + } + } + build.MustRunCommand("docker", "image", "tag", fmt.Sprintf("%s:TAG", *upload), gethImage) + build.MustRunCommand("docker", "image", "tag", fmt.Sprintf("%s:alltools-TAG", *upload), toolImage) + build.MustRunCommand("docker", "push", gethImage) + build.MustRunCommand("docker", "push", toolImage) + } + } + // If multi-arch image manifest push is requested, assemble it + if len(*manifest) != 0 { + // Since different architectures are pushed by different builders, wait + // until all required images are updated. + var mismatch bool + for i := 0; i < 2; i++ { // 2 attempts, second is race check + mismatch = false // hope there's no mismatch now + + for _, tag := range tags { + for _, arch := range strings.Split(*manifest, ",") { + gethImage := fmt.Sprintf("%s:%s-%s", *upload, tag, arch) + toolImage := fmt.Sprintf("%s:alltools-%s-%s", *upload, tag, arch) + + for _, img := range []string{gethImage, toolImage} { + if out, err := exec.Command("docker", "pull", img).CombinedOutput(); err != nil { + log.Printf("Required image %s unavailable: %v\nOutput: %s", img, err, out) + mismatch = true + break + } + buildnum, err := exec.Command("docker", "inspect", "--format", "{{index .Config.Labels \"buildnum\"}}", img).CombinedOutput() + if err != nil { + log.Fatalf("Failed to inspect container: %v\nOutput: %s", err, string(buildnum)) + } + buildnum = bytes.TrimSpace(buildnum) + + if string(buildnum) != env.Buildnum { + log.Printf("Build number mismatch on %s: want %s, have %s", img, env.Buildnum, buildnum) + mismatch = true + break + } + } + if mismatch { + break + } + } + if mismatch { + break + } + } + if mismatch { + // Build numbers mismatching, retry in a short time to + // avoid concurrent failes in both publisher images. If + // however the retry failed too, it means the concurrent + // builder is still crunching, let that do the publish. + if i == 0 { + time.Sleep(30 * time.Second) + } + continue + } + break + } + if mismatch { + log.Println("Relinquishing publish to other builder") + return + } + // Assemble and push the Geth manifest image + for _, tag := range tags { + gethImage := fmt.Sprintf("%s:%s", *upload, tag) + + var gethSubImages []string + for _, arch := range strings.Split(*manifest, ",") { + gethSubImages = append(gethSubImages, gethImage+"-"+arch) + } + build.MustRunCommand("docker", append([]string{"manifest", "create", gethImage}, gethSubImages...)...) + build.MustRunCommand("docker", "manifest", "push", gethImage) + } + // Assemble and push the alltools manifest image + for _, tag := range tags { + toolImage := fmt.Sprintf("%s:alltools-%s", *upload, tag) + + var toolSubImages []string + for _, arch := range strings.Split(*manifest, ",") { + toolSubImages = append(toolSubImages, toolImage+"-"+arch) + } + build.MustRunCommand("docker", append([]string{"manifest", "create", toolImage}, toolSubImages...)...) + build.MustRunCommand("docker", "manifest", "push", toolImage) + } + } +} + // Debian Packaging func doDebianSource(cmdline []string) { var ( - goversion = flag.String("goversion", "", `Go version to build with (will be included in the source package)`) - cachedir = flag.String("cachedir", "./build/cache", `Filesystem path to cache the downloaded Go bundles at`) - signer = flag.String("signer", "", `Signing key name, also used as package author`) - upload = flag.String("upload", "", `Where to upload the source package (usually "ethereum/ethereum")`) - sshUser = flag.String("sftp-user", "", `Username for SFTP upload (usually "geth-ci")`) - workdir = flag.String("workdir", "", `Output directory for packages (uses temp dir if unset)`) - now = time.Now() + cachedir = flag.String("cachedir", "./build/cache", `Filesystem path to cache the downloaded Go bundles at`) + signer = flag.String("signer", "", `Signing key name, also used as package author`) + upload = flag.String("upload", "", `Where to upload the source package (usually "ethereum/ethereum")`) + sshUser = flag.String("sftp-user", "", `Username for SFTP upload (usually "geth-ci")`) + workdir = flag.String("workdir", "", `Output directory for packages (uses temp dir if unset)`) + now = time.Now() ) flag.CommandLine.Parse(cmdline) *workdir = makeWorkdir(*workdir) env := build.Env() + tc := new(build.GoToolchain) maybeSkipArchive(env) // Import the signing key. @@ -490,15 +645,15 @@ func doDebianSource(cmdline []string) { } // Download and verify the Go source package. - gobundle := downloadGoSources(*goversion, *cachedir) + gobundle := downloadGoSources(*cachedir) // Download all the dependencies needed to build the sources and run the ci script - srcdepfetch := goTool("install", "-n", "./...") - srcdepfetch.Env = append(os.Environ(), "GOPATH="+filepath.Join(*workdir, "modgopath")) + srcdepfetch := tc.Go("mod", "download") + srcdepfetch.Env = append(srcdepfetch.Env, "GOPATH="+filepath.Join(*workdir, "modgopath")) build.MustRun(srcdepfetch) - cidepfetch := goTool("run", "./build/ci.go") - cidepfetch.Env = append(os.Environ(), "GOPATH="+filepath.Join(*workdir, "modgopath")) + cidepfetch := tc.Go("run", "./build/ci.go") + cidepfetch.Env = append(cidepfetch.Env, "GOPATH="+filepath.Join(*workdir, "modgopath")) cidepfetch.Run() // Command fails, don't care, we only need the deps to start it // Create Debian packages and upload them. @@ -509,7 +664,7 @@ func doDebianSource(cmdline []string) { pkgdir := stageDebianSource(*workdir, meta) // Add Go source code - if err := build.ExtractTarballArchive(gobundle, pkgdir); err != nil { + if err := build.ExtractArchive(gobundle, pkgdir); err != nil { log.Fatalf("Failed to extract Go sources: %v", err) } if err := os.Rename(filepath.Join(pkgdir, "go"), filepath.Join(pkgdir, ".go")); err != nil { @@ -526,24 +681,26 @@ func doDebianSource(cmdline []string) { build.MustRun(debuild) var ( - basename = fmt.Sprintf("%s_%s", meta.Name(), meta.VersionString()) - source = filepath.Join(*workdir, basename+".tar.xz") - dsc = filepath.Join(*workdir, basename+".dsc") - changes = filepath.Join(*workdir, basename+"_source.changes") + basename = fmt.Sprintf("%s_%s", meta.Name(), meta.VersionString()) + source = filepath.Join(*workdir, basename+".tar.xz") + dsc = filepath.Join(*workdir, basename+".dsc") + changes = filepath.Join(*workdir, basename+"_source.changes") + buildinfo = filepath.Join(*workdir, basename+"_source.buildinfo") ) if *signer != "" { build.MustRunCommand("debsign", changes) } if *upload != "" { - ppaUpload(*workdir, *upload, *sshUser, []string{source, dsc, changes}) + ppaUpload(*workdir, *upload, *sshUser, []string{source, dsc, changes, buildinfo}) } } } } -func downloadGoSources(version string, cachedir string) string { +// downloadGoSources downloads the Go source tarball. +func downloadGoSources(cachedir string) string { csdb := build.MustLoadChecksums("build/checksums.txt") - file := fmt.Sprintf("go%s.src.tar.gz", version) + file := fmt.Sprintf("go%s.src.tar.gz", dlgoVersion) url := "https://dl.google.com/go/" + file dst := filepath.Join(cachedir, file) if err := csdb.DownloadFile(url, dst); err != nil { @@ -747,6 +904,7 @@ func doWindowsInstaller(cmdline []string) { var ( arch = flag.String("arch", runtime.GOARCH, "Architecture for cross build packaging") signer = flag.String("signer", "", `Environment variable holding the signing key (e.g. WINDOWS_SIGNING_KEY)`) + signify = flag.String("signify key", "", `Environment variable holding the signify signing key (e.g. WINDOWS_SIGNIFY_KEY)`) upload = flag.String("upload", "", `Destination to upload the archives (usually "gethstore/builds")`) workdir = flag.String("workdir", "", `Output directory for packages (uses temp dir if unset)`) ) @@ -808,7 +966,7 @@ func doWindowsInstaller(cmdline []string) { filepath.Join(*workdir, "geth.nsi"), ) // Sign and publish installer. - if err := archiveUpload(installer, *upload, *signer); err != nil { + if err := archiveUpload(installer, *upload, *signer, *signify); err != nil { log.Fatal(err) } } @@ -817,20 +975,31 @@ func doWindowsInstaller(cmdline []string) { func doAndroidArchive(cmdline []string) { var ( - local = flag.Bool("local", false, `Flag whether we're only doing a local build (skip Maven artifacts)`) - signer = flag.String("signer", "", `Environment variable holding the signing key (e.g. ANDROID_SIGNING_KEY)`) - deploy = flag.String("deploy", "", `Destination to deploy the archive (usually "https://oss.sonatype.org")`) - upload = flag.String("upload", "", `Destination to upload the archive (usually "gethstore/builds")`) + local = flag.Bool("local", false, `Flag whether we're only doing a local build (skip Maven artifacts)`) + signer = flag.String("signer", "", `Environment variable holding the signing key (e.g. ANDROID_SIGNING_KEY)`) + signify = flag.String("signify", "", `Environment variable holding the signify signing key (e.g. ANDROID_SIGNIFY_KEY)`) + deploy = flag.String("deploy", "", `Destination to deploy the archive (usually "https://oss.sonatype.org")`) + upload = flag.String("upload", "", `Destination to upload the archive (usually "gethstore/builds")`) ) flag.CommandLine.Parse(cmdline) env := build.Env() + tc := new(build.GoToolchain) // Sanity check that the SDK and NDK are installed and set if os.Getenv("ANDROID_HOME") == "" { log.Fatal("Please ensure ANDROID_HOME points to your Android SDK") } + + // Build gomobile. + install := tc.Install(GOBIN, "golang.org/x/mobile/cmd/gomobile@latest", "golang.org/x/mobile/cmd/gobind@latest") + install.Env = append(install.Env) + build.MustRun(install) + + // Ensure all dependencies are available. This is required to make + // gomobile bind work because it expects go.sum to contain all checksums. + build.MustRun(tc.Go("mod", "download")) + // Build the Android archive and Maven resources - build.MustRun(goTool("get", "golang.org/x/mobile/cmd/gomobile", "golang.org/x/mobile/cmd/gobind")) build.MustRun(gomobileTool("bind", "-ldflags", "-s -w", "--target", "android", "--javapkg", "org.ethereum", "-v", "github.com/ethereum/go-ethereum/mobile")) if *local { @@ -849,7 +1018,7 @@ func doAndroidArchive(cmdline []string) { archive := "geth-" + archiveBasename("android", params.ArchiveVersion(env.Commit)) + ".aar" os.Rename("geth.aar", archive) - if err := archiveUpload(archive, *upload, *signer); err != nil { + if err := archiveUpload(archive, *upload, *signer, *signify); err != nil { log.Fatal(err) } // Sign and upload all the artifacts to Maven Central @@ -942,17 +1111,24 @@ func newMavenMetadata(env build.Environment) mavenMetadata { func doXCodeFramework(cmdline []string) { var ( - local = flag.Bool("local", false, `Flag whether we're only doing a local build (skip Maven artifacts)`) - signer = flag.String("signer", "", `Environment variable holding the signing key (e.g. IOS_SIGNING_KEY)`) - deploy = flag.String("deploy", "", `Destination to deploy the archive (usually "trunk")`) - upload = flag.String("upload", "", `Destination to upload the archives (usually "gethstore/builds")`) + local = flag.Bool("local", false, `Flag whether we're only doing a local build (skip Maven artifacts)`) + signer = flag.String("signer", "", `Environment variable holding the signing key (e.g. IOS_SIGNING_KEY)`) + signify = flag.String("signify", "", `Environment variable holding the signify signing key (e.g. IOS_SIGNIFY_KEY)`) + deploy = flag.String("deploy", "", `Destination to deploy the archive (usually "trunk")`) + upload = flag.String("upload", "", `Destination to upload the archives (usually "gethstore/builds")`) ) flag.CommandLine.Parse(cmdline) env := build.Env() + tc := new(build.GoToolchain) + + // Build gomobile. + build.MustRun(tc.Install(GOBIN, "golang.org/x/mobile/cmd/gomobile@latest", "golang.org/x/mobile/cmd/gobind@latest")) + + // Ensure all dependencies are available. This is required to make + // gomobile bind work because it expects go.sum to contain all checksums. + build.MustRun(tc.Go("mod", "download")) // Build the iOS XCode framework - build.MustRun(goTool("get", "golang.org/x/mobile/cmd/gomobile", "golang.org/x/mobile/cmd/gobind")) - build.MustRun(gomobileTool("init")) bind := gomobileTool("bind", "-ldflags", "-s -w", "--target", "ios", "-v", "github.com/ethereum/go-ethereum/mobile") if *local { @@ -961,26 +1137,26 @@ func doXCodeFramework(cmdline []string) { build.MustRun(bind) return } + + // Create the archive. + maybeSkipArchive(env) archive := "geth-" + archiveBasename("ios", params.ArchiveVersion(env.Commit)) - if err := os.Mkdir(archive, os.ModePerm); err != nil { + if err := os.MkdirAll(archive, 0755); err != nil { log.Fatal(err) } bind.Dir, _ = filepath.Abs(archive) build.MustRun(bind) build.MustRunCommand("tar", "-zcvf", archive+".tar.gz", archive) - // Skip CocoaPods deploy and Azure upload for PR builds - maybeSkipArchive(env) - // Sign and upload the framework to Azure - if err := archiveUpload(archive+".tar.gz", *upload, *signer); err != nil { + if err := archiveUpload(archive+".tar.gz", *upload, *signer, *signify); err != nil { log.Fatal(err) } // Prepare and upload a PodSpec to CocoaPods if *deploy != "" { meta := newPodMetadata(env, archive) build.Render("build/pod.podspec", "Geth.podspec", 0755, meta) - build.MustRunCommand("pod", *deploy, "push", "Geth.podspec", "--allow-warnings", "--verbose") + build.MustRunCommand("pod", *deploy, "push", "Geth.podspec", "--allow-warnings") } } @@ -1037,10 +1213,14 @@ func doXgo(cmdline []string) { ) flag.CommandLine.Parse(cmdline) env := build.Env() + var tc build.GoToolchain // Make sure xgo is available for cross compilation - gogetxgo := goTool("get", "github.com/karalabe/xgo") - build.MustRun(gogetxgo) + build.MustRun(tc.Install(GOBIN, "github.com/karalabe/xgo@latest")) + + var shFile, _ = filepath.Abs(filepath.Join("build", "set_goproxy_to_xgo.sh")) + // Try to use custom goproxy if set + build.MustRunCommand("sh", shFile) // If all tools building is requested, build everything the builder wants args := append(buildFlags(env), flag.Args()...) @@ -1051,27 +1231,23 @@ func doXgo(cmdline []string) { if strings.HasPrefix(res, GOBIN) { // Binary tool found, cross build it explicitly args = append(args, "./"+filepath.Join("cmd", filepath.Base(res))) - xgo := xgoTool(args) - build.MustRun(xgo) + build.MustRun(xgoTool(args)) args = args[:len(args)-1] } } return } - // Otherwise xxecute the explicit cross compilation + + // Otherwise execute the explicit cross compilation path := args[len(args)-1] args = append(args[:len(args)-1], []string{"--dest", GOBIN, path}...) - - xgo := xgoTool(args) - build.MustRun(xgo) + build.MustRun(xgoTool(args)) } func xgoTool(args []string) *exec.Cmd { cmd := exec.Command(filepath.Join(GOBIN, "xgo"), args...) cmd.Env = os.Environ() - cmd.Env = append(cmd.Env, []string{ - "GOBIN=" + GOBIN, - }...) + cmd.Env = append(cmd.Env, []string{"GOBIN=" + GOBIN}...) return cmd } diff --git a/build/nsis.envvarupdate.nsh b/build/nsis.envvarupdate.nsh index 9c3ecbe33..95c2f1f63 100644 --- a/build/nsis.envvarupdate.nsh +++ b/build/nsis.envvarupdate.nsh @@ -43,7 +43,7 @@ !ifndef Un${StrFuncName}_INCLUDED ${Un${StrFuncName}} !endif - !define un.${StrFuncName} "${Un${StrFuncName}}" + !define un.${StrFuncName} '${Un${StrFuncName}}' !macroend !insertmacro _IncludeStrFunction StrTok diff --git a/build/revert_xgo.sh b/build/revert_xgo.sh new file mode 100755 index 000000000..c14e226b9 --- /dev/null +++ b/build/revert_xgo.sh @@ -0,0 +1,27 @@ +#!/bin/sh +tagLatest=karalabe/xgo-latest:latest +tagOrigin=karalabe/xgo-latest:origin-latest +tagProxy=karalabe/xgo-latest:goproxy + +if test ! -z "$(docker images -q ${tagOrigin})"; then + if test "$(docker images -q ${tagOrigin})" = "$(docker images -q ${tagLatest})"; then + echo "Need to do nothing" + exit 0 + fi + + if test ! -z "$(docker images -q ${tagLatest})"; then + echo "Info: remove old ${tagLatest}" + docker rmi ${tagLatest} + if test ! -z "$(docker images -q ${tagProxy})"; then + echo "Info: remove old ${tagProxy}" + docker rmi ${tagProxy} + fi + fi + echo "Info: tag ${tagOrigin} to ${tagLatest}" + docker tag ${tagOrigin} ${tagLatest} + echo "Done" +else + echo "Error: ${tagOrigin} not exist! Nothing changed." +fi + +docker images karalabe/xgo-latest \ No newline at end of file diff --git a/build/set_goproxy_to_xgo.sh b/build/set_goproxy_to_xgo.sh new file mode 100755 index 000000000..c01378288 --- /dev/null +++ b/build/set_goproxy_to_xgo.sh @@ -0,0 +1,35 @@ +#!/bin/sh +tagLatest=karalabe/xgo-latest:latest +tagOrigin=karalabe/xgo-latest:origin-latest +tagProxy=karalabe/xgo-latest:goproxy + +# check if already set +if test -n "$(docker images -q ${tagProxy})"; then + if test "$(docker images -q ${tagProxy})" = "$(docker images -q ${tagLatest})"; then + # already set, just exit + exit 0 + else + echo "Using existed image ${tagProxy}" + docker tag ${tagProxy} ${tagLatest} + docker images karalabe/xgo-latest + exit 0 + fi +fi + +# check env GOPROXY +if [ $GOPROXY ] && [ $GOPROXY != https://proxy.golang.org,direct ]; then + echo "Using ${GOPROXY}" +else + echo "No custom GOPROXY, nothing changed." + exit 0 +fi + +docker run -d --name=xgo-build ${tagLatest} /bin/sh +docker exec xgo-build /bin/bash -c "go env -w GOPROXY=${GOPROXY}" +docker commit -a "darlzan" -m "use ${GOPROXY}" xgo-build ${tagProxy} +docker stop xgo-build +docker rm xgo-build +docker tag ${tagLatest} ${tagOrigin} +docker tag ${tagProxy} ${tagLatest} +#show results +docker images karalabe/xgo-latest \ No newline at end of file diff --git a/cmd/abidump/main.go b/cmd/abidump/main.go index 35cbcbb0e..4f942749d 100644 --- a/cmd/abidump/main.go +++ b/cmd/abidump/main.go @@ -23,7 +23,7 @@ import ( "os" "strings" - "github.com/ethereum/go-ethereum/signer/core" + "github.com/ethereum/go-ethereum/signer/core/apitypes" "github.com/ethereum/go-ethereum/signer/fourbyte" ) @@ -41,7 +41,7 @@ func parse(data []byte) { if err != nil { die(err) } - messages := core.ValidationMessages{} + messages := apitypes.ValidationMessages{} db.ValidateCallData(nil, data, &messages) for _, m := range messages.Messages { fmt.Printf("%v: %v\n", m.Typ, m.Message) diff --git a/cmd/abigen/main.go b/cmd/abigen/main.go index a74b0396d..7b3b35e4e 100644 --- a/cmd/abigen/main.go +++ b/cmd/abigen/main.go @@ -96,7 +96,7 @@ var ( } aliasFlag = cli.StringFlag{ Name: "alias", - Usage: "Comma separated aliases for function and event renaming, e.g. foo=bar", + Usage: "Comma separated aliases for function and event renaming, e.g. original1=alias1, original2=alias2", } ) diff --git a/cmd/bootnode/main.go b/cmd/bootnode/main.go index 6c9ff615a..036b968ef 100644 --- a/cmd/bootnode/main.go +++ b/cmd/bootnode/main.go @@ -28,7 +28,6 @@ import ( "github.com/ethereum/go-ethereum/crypto" "github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/p2p/discover" - "github.com/ethereum/go-ethereum/p2p/discv5" "github.com/ethereum/go-ethereum/p2p/enode" "github.com/ethereum/go-ethereum/p2p/nat" "github.com/ethereum/go-ethereum/p2p/netutil" @@ -121,17 +120,17 @@ func main() { printNotice(&nodeKey.PublicKey, *realaddr) + db, _ := enode.OpenDB("") + ln := enode.NewLocalNode(db, nodeKey) + cfg := discover.Config{ + PrivateKey: nodeKey, + NetRestrict: restrictList, + } if *runv5 { - if _, err := discv5.ListenUDP(nodeKey, conn, "", restrictList); err != nil { + if _, err := discover.ListenV5(conn, ln, cfg); err != nil { utils.Fatalf("%v", err) } } else { - db, _ := enode.OpenDB("") - ln := enode.NewLocalNode(db, nodeKey) - cfg := discover.Config{ - PrivateKey: nodeKey, - NetRestrict: restrictList, - } if _, err := discover.ListenUDP(conn, ln, cfg); err != nil { utils.Fatalf("%v", err) } diff --git a/cmd/clef/README.md b/cmd/clef/README.md index 700f0a144..27c62817f 100644 --- a/cmd/clef/README.md +++ b/cmd/clef/README.md @@ -919,4 +919,4 @@ There are a couple of implementation for a UI. We'll try to keep this list up to | QtSigner| https://github.com/holiman/qtsigner/| Python3/QT-based| :+1:| :+1:| :+1:| :+1:| :+1:| :x: | :+1: (partially)| | GtkSigner| https://github.com/holiman/gtksigner| Python3/GTK-based| :+1:| :x:| :x:| :+1:| :+1:| :x: | :x: | | Frame | https://github.com/floating/frame/commits/go-signer| Electron-based| :x:| :x:| :x:| :x:| ?| :x: | :x: | -| Clef UI| https://github.com/kyokan/clef-ui| Golang/QT-based| :+1:| :+1:| :x:| :+1:| :+1:| :x: | :+1: (approve tx only)| +| Clef UI| https://github.com/ethereum/clef-ui| Golang/QT-based| :+1:| :+1:| :x:| :+1:| :+1:| :x: | :+1: (approve tx only)| diff --git a/cmd/clef/main.go b/cmd/clef/main.go index 7ee519aab..d3fee9575 100644 --- a/cmd/clef/main.go +++ b/cmd/clef/main.go @@ -50,11 +50,11 @@ import ( "github.com/ethereum/go-ethereum/rlp" "github.com/ethereum/go-ethereum/rpc" "github.com/ethereum/go-ethereum/signer/core" + "github.com/ethereum/go-ethereum/signer/core/apitypes" "github.com/ethereum/go-ethereum/signer/fourbyte" "github.com/ethereum/go-ethereum/signer/rules" "github.com/ethereum/go-ethereum/signer/storage" - - colorable "github.com/mattn/go-colorable" + "github.com/mattn/go-colorable" "github.com/mattn/go-isatty" "gopkg.in/urfave/cli.v1" ) @@ -107,11 +107,6 @@ var ( Usage: "HTTP-RPC server listening port", Value: node.DefaultHTTPPort + 5, } - legacyRPCPortFlag = cli.IntFlag{ - Name: "rpcport", - Usage: "HTTP-RPC server listening port (Deprecated, please use --http.port).", - Value: node.DefaultHTTPPort + 5, - } signerSecretFlag = cli.StringFlag{ Name: "signersecret", Usage: "A file containing the (encrypted) master seed to encrypt Clef data, e.g. keystore credentials and ruleset hash", @@ -250,12 +245,6 @@ var AppHelpFlagGroups = []flags.FlagGroup{ acceptFlag, }, }, - { - Name: "ALIASED (deprecated)", - Flags: []cli.Flag{ - legacyRPCPortFlag, - }, - }, } func init() { @@ -283,7 +272,6 @@ func init() { testFlag, advancedMode, acceptFlag, - legacyRPCPortFlag, } app.Action = signer app.Commands = []cli.Command{initCommand, @@ -669,7 +657,7 @@ func signer(c *cli.Context) error { cors := utils.SplitAndTrim(c.GlobalString(utils.HTTPCORSDomainFlag.Name)) srv := rpc.NewServer() - err := node.RegisterApisFromWhitelist(rpcAPI, []string{"account"}, srv, false) + err := node.RegisterApis(rpcAPI, []string{"account"}, srv, false) if err != nil { utils.Fatalf("Could not register API: %w", err) } @@ -677,12 +665,6 @@ func signer(c *cli.Context) error { // set port port := c.Int(rpcPortFlag.Name) - if c.GlobalIsSet(legacyRPCPortFlag.Name) { - if !c.GlobalIsSet(rpcPortFlag.Name) { - port = c.Int(legacyRPCPortFlag.Name) - } - log.Warn("The flag --rpcport is deprecated and will be removed in the future, please use --http.port") - } // start http server httpEndpoint := fmt.Sprintf("%s:%d", c.GlobalString(utils.HTTPListenAddrFlag.Name), port) @@ -747,12 +729,10 @@ func DefaultConfigDir() string { appdata := os.Getenv("APPDATA") if appdata != "" { return filepath.Join(appdata, "Signer") - } else { - return filepath.Join(home, "AppData", "Roaming", "Signer") } - } else { - return filepath.Join(home, ".clef") + return filepath.Join(home, "AppData", "Roaming", "Signer") } + return filepath.Join(home, ".clef") } // As we cannot guess a stable location, return empty and handle later return "" @@ -807,14 +787,16 @@ func readMasterKey(ctx *cli.Context, ui core.UIClientAPI) ([]byte, error) { // checkFile is a convenience function to check if a file // * exists -// * is mode 0400 +// * is mode 0400 (unix only) func checkFile(filename string) error { info, err := os.Stat(filename) if err != nil { return fmt.Errorf("failed stat on %s: %v", filename, err) } // Check the unix permission bits - if info.Mode().Perm()&0377 != 0 { + // However, on windows, we cannot use the unix perm-bits, see + // https://github.com/ethereum/go-ethereum/issues/20123 + if runtime.GOOS != "windows" && info.Mode().Perm()&0377 != 0 { return fmt.Errorf("file (%v) has insecure file permissions (%v)", filename, info.Mode().String()) } return nil @@ -941,13 +923,13 @@ func testExternalUI(api *core.SignerAPI) { time.Sleep(delay) data := hexutil.Bytes([]byte{}) to := common.NewMixedcaseAddress(a) - tx := core.SendTxArgs{ + tx := apitypes.SendTxArgs{ Data: &data, Nonce: 0x1, Value: hexutil.Big(*big.NewInt(6)), From: common.NewMixedcaseAddress(a), To: &to, - GasPrice: hexutil.Big(*big.NewInt(5)), + GasPrice: (*hexutil.Big)(big.NewInt(5)), Gas: 1000, Input: nil, } @@ -1073,17 +1055,17 @@ func GenDoc(ctx *cli.Context) { data := hexutil.Bytes([]byte{0x01, 0x02, 0x03, 0x04}) add("SignTxRequest", desc, &core.SignTxRequest{ Meta: meta, - Callinfo: []core.ValidationInfo{ + Callinfo: []apitypes.ValidationInfo{ {Typ: "Warning", Message: "Something looks odd, show this message as a warning"}, {Typ: "Info", Message: "User should see this as well"}, }, - Transaction: core.SendTxArgs{ + Transaction: apitypes.SendTxArgs{ Data: &data, Nonce: 0x1, Value: hexutil.Big(*big.NewInt(6)), From: common.NewMixedcaseAddress(a), To: nil, - GasPrice: hexutil.Big(*big.NewInt(5)), + GasPrice: (*hexutil.Big)(big.NewInt(5)), Gas: 1000, Input: nil, }}) @@ -1093,13 +1075,13 @@ func GenDoc(ctx *cli.Context) { add("SignTxResponse - approve", "Response to request to sign a transaction. This response needs to contain the `transaction`"+ ", because the UI is free to make modifications to the transaction.", &core.SignTxResponse{Approved: true, - Transaction: core.SendTxArgs{ + Transaction: apitypes.SendTxArgs{ Data: &data, Nonce: 0x4, Value: hexutil.Big(*big.NewInt(6)), From: common.NewMixedcaseAddress(a), To: nil, - GasPrice: hexutil.Big(*big.NewInt(5)), + GasPrice: (*hexutil.Big)(big.NewInt(5)), Gas: 1000, Input: nil, }}) @@ -1124,7 +1106,7 @@ func GenDoc(ctx *cli.Context) { rlpdata := common.FromHex("0xf85d640101948a8eafb1cf62bfbeb1741769dae1a9dd47996192018026a0716bd90515acb1e68e5ac5867aa11a1e65399c3349d479f5fb698554ebc6f293a04e8a4ebfff434e971e0ef12c5bf3a881b06fd04fc3f8b8a7291fb67a26a1d4ed") var tx types.Transaction - rlp.DecodeBytes(rlpdata, &tx) + tx.UnmarshalBinary(rlpdata) add("OnApproved - SignTransactionResult", desc, ðapi.SignTransactionResult{Raw: rlpdata, Tx: &tx}) } diff --git a/cmd/clef/sign_flow.png b/cmd/clef/sign_flow.png index 93ef81a32..e7010ab43 100644 Binary files a/cmd/clef/sign_flow.png and b/cmd/clef/sign_flow.png differ diff --git a/cmd/clef/testdata/sign_1559_missing_field_exp_fail.json b/cmd/clef/testdata/sign_1559_missing_field_exp_fail.json new file mode 100644 index 000000000..c5a133686 --- /dev/null +++ b/cmd/clef/testdata/sign_1559_missing_field_exp_fail.json @@ -0,0 +1,16 @@ +{ + "jsonrpc": "2.0", + "method": "account_signTransaction", + "params": [ + { + "from": "0x8A8eAFb1cf62BfBeb1741769DAE1a9dd47996192", + "to": "0x8A8eAFb1cf62BfBeb1741769DAE1a9dd47996192", + "gas": "0x333", + "maxFeePerGas": "0x123", + "nonce": "0x0", + "value": "0x10", + "data": "0x4401a6e40000000000000000000000000000000000000000000000000000000000000012" + } + ], + "id": 67 +} diff --git a/cmd/clef/testdata/sign_1559_missing_maxfeepergas_exp_fail.json b/cmd/clef/testdata/sign_1559_missing_maxfeepergas_exp_fail.json new file mode 100644 index 000000000..df69231d7 --- /dev/null +++ b/cmd/clef/testdata/sign_1559_missing_maxfeepergas_exp_fail.json @@ -0,0 +1,16 @@ +{ + "jsonrpc": "2.0", + "method": "account_signTransaction", + "params": [ + { + "from": "0x8A8eAFb1cf62BfBeb1741769DAE1a9dd47996192", + "to": "0x8A8eAFb1cf62BfBeb1741769DAE1a9dd47996192", + "gas": "0x333", + "maxPriorityFeePerGas": "0x123", + "nonce": "0x0", + "value": "0x10", + "data": "0x4401a6e40000000000000000000000000000000000000000000000000000000000000012" + } + ], + "id": 67 +} diff --git a/cmd/clef/testdata/sign_1559_tx.json b/cmd/clef/testdata/sign_1559_tx.json new file mode 100644 index 000000000..29355f6cf --- /dev/null +++ b/cmd/clef/testdata/sign_1559_tx.json @@ -0,0 +1,17 @@ +{ + "jsonrpc": "2.0", + "method": "account_signTransaction", + "params": [ + { + "from": "0x8A8eAFb1cf62BfBeb1741769DAE1a9dd47996192", + "to": "0x8A8eAFb1cf62BfBeb1741769DAE1a9dd47996192", + "gas": "0x333", + "maxPriorityFeePerGas": "0x123", + "maxFeePerGas": "0x123", + "nonce": "0x0", + "value": "0x10", + "data": "0x4401a6e40000000000000000000000000000000000000000000000000000000000000012" + } + ], + "id": 67 +} diff --git a/cmd/clef/testdata/sign_bad_checksum_exp_fail.json b/cmd/clef/testdata/sign_bad_checksum_exp_fail.json new file mode 100644 index 000000000..21ba7b3fc --- /dev/null +++ b/cmd/clef/testdata/sign_bad_checksum_exp_fail.json @@ -0,0 +1,17 @@ +{ + "jsonrpc": "2.0", + "method": "account_signTransaction", + "params": [ + { + "from":"0x8a8eafb1cf62bfbeb1741769dae1a9dd47996192", + "to":"0x8a8eafb1cf62bfbeb1741769dae1a9dd47996192", + "gas": "0x333", + "gasPrice": "0x123", + "nonce": "0x0", + "value": "0x10", + "data": + "0x4401a6e40000000000000000000000000000000000000000000000000000000000000012" + } + ], + "id": 67 +} diff --git a/cmd/clef/testdata/sign_normal_exp_ok.json b/cmd/clef/testdata/sign_normal_exp_ok.json new file mode 100644 index 000000000..7f3a9202a --- /dev/null +++ b/cmd/clef/testdata/sign_normal_exp_ok.json @@ -0,0 +1,17 @@ +{ + "jsonrpc": "2.0", + "method": "account_signTransaction", + "params": [ + { + "from":"0x8A8eAFb1cf62BfBeb1741769DAE1a9dd47996192", + "to":"0x8A8eAFb1cf62BfBeb1741769DAE1a9dd47996192", + "gas": "0x333", + "gasPrice": "0x123", + "nonce": "0x0", + "value": "0x10", + "data": + "0x4401a6e40000000000000000000000000000000000000000000000000000000000000012" + } + ], + "id": 67 +} diff --git a/cmd/devp2p/README.md b/cmd/devp2p/README.md index 2763c7508..7f816b602 100644 --- a/cmd/devp2p/README.md +++ b/cmd/devp2p/README.md @@ -30,6 +30,29 @@ Run `devp2p dns to-route53 ` to publish a tree to Amazon Route53. You can find more information about these commands in the [DNS Discovery Setup Guide][dns-tutorial]. +### Node Set Utilities + +There are several commands for working with JSON node set files. These files are generated +by the discovery crawlers and DNS client commands. Node sets also used as the input of the +DNS deployer commands. + +Run `devp2p nodeset info ` to display statistics of a node set. + +Run `devp2p nodeset filter ` to write a new, filtered node +set to standard output. The following filters are supported: + +- `-limit ` limits the output set to N entries, taking the top N nodes by score +- `-ip ` filters nodes by IP subnet +- `-min-age ` filters nodes by 'first seen' time +- `-eth-network ` filters nodes by "eth" ENR entry +- `-les-server` filters nodes by LES server support +- `-snap` filters nodes by snap protocol support + +For example, given a node set in `nodes.json`, you could create a filtered set containing +up to 20 eth mainnet nodes which also support snap sync using this command: + + devp2p nodeset filter nodes.json -eth-network mainnet -snap -limit 20 + ### Discovery v4 Utilities The `devp2p discv4 ...` command family deals with the [Node Discovery v4][discv4] @@ -81,6 +104,37 @@ Now get the ENR of your node and store it in the `NODE` environment variable. Start the test by running `devp2p discv5 test -listen1 127.0.0.1 -listen2 127.0.0.2 $NODE`. +### Eth Protocol Test Suite + +The Eth Protocol test suite is a conformance test suite for the [eth protocol][eth]. + +To run the eth protocol test suite against your implementation, the node needs to be initialized as such: + +1. initialize the geth node with the `genesis.json` file contained in the `testdata` directory +2. import the `halfchain.rlp` file in the `testdata` directory +3. run geth with the following flags: +``` +geth --datadir --nodiscover --nat=none --networkid 19763 --verbosity 5 +``` + +Then, run the following command, replacing `` with the enode of the geth node: + ``` + devp2p rlpx eth-test cmd/devp2p/internal/ethtest/testdata/chain.rlp cmd/devp2p/internal/ethtest/testdata/genesis.json +``` + +Repeat the above process (re-initialising the node) in order to run the Eth Protocol test suite again. + +#### Eth66 Test Suite + +The Eth66 test suite is also a conformance test suite for the eth 66 protocol version specifically. +To run the eth66 protocol test suite, initialize a geth node as described above and run the following command, +replacing `` with the enode of the geth node: + + ``` + devp2p rlpx eth66-test cmd/devp2p/internal/ethtest/testdata/chain.rlp cmd/devp2p/internal/ethtest/testdata/genesis.json +``` + +[eth]: https://github.com/ethereum/devp2p/blob/master/caps/eth.md [dns-tutorial]: https://geth.ethereum.org/docs/developers/dns-discovery-setup [discv4]: https://github.com/ethereum/devp2p/tree/master/discv4.md [discv5]: https://github.com/ethereum/devp2p/tree/master/discv5/discv5.md diff --git a/cmd/devp2p/discv4cmd.go b/cmd/devp2p/discv4cmd.go index 44186d2a0..5e4c5ed5d 100644 --- a/cmd/devp2p/discv4cmd.go +++ b/cmd/devp2p/discv4cmd.go @@ -19,14 +19,12 @@ package main import ( "fmt" "net" - "os" "strings" "time" "github.com/ethereum/go-ethereum/cmd/devp2p/internal/v4test" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/crypto" - "github.com/ethereum/go-ethereum/internal/utesting" "github.com/ethereum/go-ethereum/p2p/discover" "github.com/ethereum/go-ethereum/p2p/enode" "github.com/ethereum/go-ethereum/params" @@ -82,7 +80,13 @@ var ( Name: "test", Usage: "Runs tests against a node", Action: discv4Test, - Flags: []cli.Flag{remoteEnodeFlag, testPatternFlag, testListen1Flag, testListen2Flag}, + Flags: []cli.Flag{ + remoteEnodeFlag, + testPatternFlag, + testTAPFlag, + testListen1Flag, + testListen2Flag, + }, } ) @@ -113,20 +117,6 @@ var ( Usage: "Enode of the remote node under test", EnvVar: "REMOTE_ENODE", } - testPatternFlag = cli.StringFlag{ - Name: "run", - Usage: "Pattern of test suite(s) to run", - } - testListen1Flag = cli.StringFlag{ - Name: "listen1", - Usage: "IP address of the first tester", - Value: v4test.Listen1, - } - testListen2Flag = cli.StringFlag{ - Name: "listen2", - Usage: "IP address of the second tester", - Value: v4test.Listen2, - } ) func discv4Ping(ctx *cli.Context) error { @@ -213,6 +203,7 @@ func discv4Crawl(ctx *cli.Context) error { return nil } +// discv4Test runs the protocol test suite. func discv4Test(ctx *cli.Context) error { // Configure test package globals. if !ctx.IsSet(remoteEnodeFlag.Name) { @@ -221,18 +212,7 @@ func discv4Test(ctx *cli.Context) error { v4test.Remote = ctx.String(remoteEnodeFlag.Name) v4test.Listen1 = ctx.String(testListen1Flag.Name) v4test.Listen2 = ctx.String(testListen2Flag.Name) - - // Filter and run test cases. - tests := v4test.AllTests - if ctx.IsSet(testPatternFlag.Name) { - tests = utesting.MatchTests(tests, ctx.String(testPatternFlag.Name)) - } - results := utesting.RunTests(tests, os.Stdout) - if fails := utesting.CountFailures(results); fails > 0 { - return fmt.Errorf("%v/%v tests passed.", len(tests)-fails, len(tests)) - } - fmt.Printf("%v/%v passed\n", len(tests), len(tests)) - return nil + return runTests(ctx, v4test.AllTests) } // startV4 starts an ephemeral discovery V4 node. diff --git a/cmd/devp2p/discv5cmd.go b/cmd/devp2p/discv5cmd.go index 1d7442144..e20d7c9cf 100644 --- a/cmd/devp2p/discv5cmd.go +++ b/cmd/devp2p/discv5cmd.go @@ -18,13 +18,10 @@ package main import ( "fmt" - "os" "time" "github.com/ethereum/go-ethereum/cmd/devp2p/internal/v5test" "github.com/ethereum/go-ethereum/common" - "github.com/ethereum/go-ethereum/internal/utesting" - "github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/p2p/discover" "gopkg.in/urfave/cli.v1" ) @@ -62,7 +59,12 @@ var ( Name: "test", Usage: "Runs protocol tests against a node", Action: discv5Test, - Flags: []cli.Flag{testPatternFlag, testListen1Flag, testListen2Flag}, + Flags: []cli.Flag{ + testPatternFlag, + testTAPFlag, + testListen1Flag, + testListen2Flag, + }, } discv5ListenCommand = cli.Command{ Name: "listen", @@ -114,28 +116,14 @@ func discv5Crawl(ctx *cli.Context) error { return nil } +// discv5Test runs the protocol test suite. func discv5Test(ctx *cli.Context) error { - // Disable logging unless explicitly enabled. - if !ctx.GlobalIsSet("verbosity") && !ctx.GlobalIsSet("vmodule") { - log.Root().SetHandler(log.DiscardHandler()) - } - - // Filter and run test cases. suite := &v5test.Suite{ Dest: getNodeArg(ctx), Listen1: ctx.String(testListen1Flag.Name), Listen2: ctx.String(testListen2Flag.Name), } - tests := suite.AllTests() - if ctx.IsSet(testPatternFlag.Name) { - tests = utesting.MatchTests(tests, ctx.String(testPatternFlag.Name)) - } - results := utesting.RunTests(tests, os.Stdout) - if fails := utesting.CountFailures(results); fails > 0 { - return fmt.Errorf("%v/%v tests passed.", len(tests)-fails, len(tests)) - } - fmt.Printf("%v/%v passed\n", len(tests), len(tests)) - return nil + return runTests(ctx, suite.AllTests()) } func discv5Listen(ctx *cli.Context) error { diff --git a/cmd/devp2p/dns_cloudflare.go b/cmd/devp2p/dns_cloudflare.go index a4d10dcfd..596254df9 100644 --- a/cmd/devp2p/dns_cloudflare.go +++ b/cmd/devp2p/dns_cloudflare.go @@ -17,6 +17,7 @@ package main import ( + "context" "fmt" "strings" @@ -79,7 +80,7 @@ func (c *cloudflareClient) checkZone(name string) error { c.zoneID = id } log.Info(fmt.Sprintf("Checking Permissions on zone %s", c.zoneID)) - zone, err := c.ZoneDetails(c.zoneID) + zone, err := c.ZoneDetails(context.Background(), c.zoneID) if err != nil { return err } @@ -112,7 +113,7 @@ func (c *cloudflareClient) uploadRecords(name string, records map[string]string) records = lrecords log.Info(fmt.Sprintf("Retrieving existing TXT records on %s", name)) - entries, err := c.DNSRecords(c.zoneID, cloudflare.DNSRecord{Type: "TXT"}) + entries, err := c.DNSRecords(context.Background(), c.zoneID, cloudflare.DNSRecord{Type: "TXT"}) if err != nil { return err } @@ -134,14 +135,15 @@ func (c *cloudflareClient) uploadRecords(name string, records map[string]string) if path != name { ttl = treeNodeTTL // Max TTL permitted by Cloudflare } - _, err = c.CreateDNSRecord(c.zoneID, cloudflare.DNSRecord{Type: "TXT", Name: path, Content: val, TTL: ttl}) + record := cloudflare.DNSRecord{Type: "TXT", Name: path, Content: val, TTL: ttl} + _, err = c.CreateDNSRecord(context.Background(), c.zoneID, record) } else if old.Content != val { // Entry already exists, only change its content. log.Info(fmt.Sprintf("Updating %s from %q to %q", path, old.Content, val)) old.Content = val - err = c.UpdateDNSRecord(c.zoneID, old.ID, old) + err = c.UpdateDNSRecord(context.Background(), c.zoneID, old.ID, old) } else { - log.Info(fmt.Sprintf("Skipping %s = %q", path, val)) + log.Debug(fmt.Sprintf("Skipping %s = %q", path, val)) } if err != nil { return fmt.Errorf("failed to publish %s: %v", path, err) @@ -155,7 +157,7 @@ func (c *cloudflareClient) uploadRecords(name string, records map[string]string) } // Stale entry, nuke it. log.Info(fmt.Sprintf("Deleting %s = %q", path, entry.Content)) - if err := c.DeleteDNSRecord(c.zoneID, entry.ID); err != nil { + if err := c.DeleteDNSRecord(context.Background(), c.zoneID, entry.ID); err != nil { return fmt.Errorf("failed to delete %s: %v", path, err) } } diff --git a/cmd/devp2p/dns_route53.go b/cmd/devp2p/dns_route53.go index c5f99529b..1d4f975dd 100644 --- a/cmd/devp2p/dns_route53.go +++ b/cmd/devp2p/dns_route53.go @@ -17,16 +17,19 @@ package main import ( + "context" "errors" "fmt" "sort" "strconv" "strings" + "time" - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/credentials" - "github.com/aws/aws-sdk-go/aws/session" - "github.com/aws/aws-sdk-go/service/route53" + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/config" + "github.com/aws/aws-sdk-go-v2/credentials" + "github.com/aws/aws-sdk-go-v2/service/route53" + "github.com/aws/aws-sdk-go-v2/service/route53/types" "github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/p2p/dnsdisc" "gopkg.in/urfave/cli.v1" @@ -38,6 +41,7 @@ const ( // https://docs.aws.amazon.com/Route53/latest/DeveloperGuide/DNSLimitations.html#limits-api-requests-changeresourcerecordsets route53ChangeSizeLimit = 32000 route53ChangeCountLimit = 1000 + maxRetryLimit = 60 ) var ( @@ -55,10 +59,15 @@ var ( Name: "zone-id", Usage: "Route53 Zone ID", } + route53RegionFlag = cli.StringFlag{ + Name: "aws-region", + Usage: "AWS Region", + Value: "eu-central-1", + } ) type route53Client struct { - api *route53.Route53 + api *route53.Client zoneID string } @@ -72,15 +81,16 @@ func newRoute53Client(ctx *cli.Context) *route53Client { akey := ctx.String(route53AccessKeyFlag.Name) asec := ctx.String(route53AccessSecretFlag.Name) if akey == "" || asec == "" { - exit(fmt.Errorf("need Route53 Access Key ID and secret proceed")) + exit(fmt.Errorf("need Route53 Access Key ID and secret to proceed")) } - config := &aws.Config{Credentials: credentials.NewStaticCredentials(akey, asec, "")} - session, err := session.NewSession(config) + creds := aws.NewCredentialsCache(credentials.NewStaticCredentialsProvider(akey, asec, "")) + cfg, err := config.LoadDefaultConfig(context.Background(), config.WithCredentialsProvider(creds)) if err != nil { - exit(fmt.Errorf("can't create AWS session: %v", err)) + exit(fmt.Errorf("can't initialize AWS configuration: %v", err)) } + cfg.Region = ctx.String(route53RegionFlag.Name) return &route53Client{ - api: route53.New(session), + api: route53.NewFromConfig(cfg), zoneID: ctx.String(route53ZoneIDFlag.Name), } } @@ -97,31 +107,74 @@ func (c *route53Client) deploy(name string, t *dnsdisc.Tree) error { return err } log.Info(fmt.Sprintf("Found %d TXT records", len(existing))) - records := t.ToTXT(name) changes := c.computeChanges(name, records, existing) + + // Submit to API. + comment := fmt.Sprintf("enrtree update of %s at seq %d", name, t.Seq()) + return c.submitChanges(changes, comment) +} + +// deleteDomain removes all TXT records of the given domain. +func (c *route53Client) deleteDomain(name string) error { + if err := c.checkZone(name); err != nil { + return err + } + + // Compute DNS changes. + existing, err := c.collectRecords(name) + if err != nil { + return err + } + log.Info(fmt.Sprintf("Found %d TXT records", len(existing))) + changes := makeDeletionChanges(existing, nil) + + // Submit to API. + comment := "enrtree delete of " + name + return c.submitChanges(changes, comment) +} + +// submitChanges submits the given DNS changes to Route53. +func (c *route53Client) submitChanges(changes []types.Change, comment string) error { if len(changes) == 0 { log.Info("No DNS changes needed") return nil } - // Submit change batches. + var err error batches := splitChanges(changes, route53ChangeSizeLimit, route53ChangeCountLimit) + changesToCheck := make([]*route53.ChangeResourceRecordSetsOutput, len(batches)) for i, changes := range batches { log.Info(fmt.Sprintf("Submitting %d changes to Route53", len(changes))) - batch := new(route53.ChangeBatch) - batch.SetChanges(changes) - batch.SetComment(fmt.Sprintf("enrtree update %d/%d of %s at seq %d", i+1, len(batches), name, t.Seq())) + batch := &types.ChangeBatch{ + Changes: changes, + Comment: aws.String(fmt.Sprintf("%s (%d/%d)", comment, i+1, len(batches))), + } req := &route53.ChangeResourceRecordSetsInput{HostedZoneId: &c.zoneID, ChangeBatch: batch} - resp, err := c.api.ChangeResourceRecordSets(req) + changesToCheck[i], err = c.api.ChangeResourceRecordSets(context.TODO(), req) if err != nil { return err } + } - log.Info(fmt.Sprintf("Waiting for change request %s", *resp.ChangeInfo.Id)) - wreq := &route53.GetChangeInput{Id: resp.ChangeInfo.Id} - if err := c.api.WaitUntilResourceRecordSetsChanged(wreq); err != nil { - return err + // Wait for all change batches to propagate. + for _, change := range changesToCheck { + log.Info(fmt.Sprintf("Waiting for change request %s", *change.ChangeInfo.Id)) + wreq := &route53.GetChangeInput{Id: change.ChangeInfo.Id} + var count int + for { + wresp, err := c.api.GetChange(context.TODO(), wreq) + if err != nil { + return err + } + + count++ + + if wresp.ChangeInfo.Status == types.ChangeStatusInsync || count >= maxRetryLimit { + break + } + + time.Sleep(30 * time.Second) } } return nil @@ -140,7 +193,7 @@ func (c *route53Client) findZoneID(name string) (string, error) { log.Info(fmt.Sprintf("Finding Route53 Zone ID for %s", name)) var req route53.ListHostedZonesByNameInput for { - resp, err := c.api.ListHostedZonesByName(&req) + resp, err := c.api.ListHostedZonesByName(context.TODO(), &req) if err != nil { return "", err } @@ -149,7 +202,7 @@ func (c *route53Client) findZoneID(name string) (string, error) { return *zone.Id, nil } } - if !*resp.IsTruncated { + if !resp.IsTruncated { break } req.DNSName = resp.NextDNSName @@ -158,8 +211,9 @@ func (c *route53Client) findZoneID(name string) (string, error) { return "", errors.New("can't find zone ID for " + name) } -// computeChanges creates DNS changes for the given record. -func (c *route53Client) computeChanges(name string, records map[string]string, existing map[string]recordSet) []*route53.Change { +// computeChanges creates DNS changes for the given set of DNS discovery records. +// The 'existing' arg is the set of records that already exist on Route53. +func (c *route53Client) computeChanges(name string, records map[string]string, existing map[string]recordSet) []types.Change { // Convert all names to lowercase. lrecords := make(map[string]string, len(records)) for name, r := range records { @@ -167,58 +221,70 @@ func (c *route53Client) computeChanges(name string, records map[string]string, e } records = lrecords - var changes []*route53.Change - for path, val := range records { + var changes []types.Change + for path, newValue := range records { + prevRecords, exists := existing[path] + prevValue := strings.Join(prevRecords.values, "") + + // prevValue contains quoted strings, encode newValue to compare. + newValue = splitTXT(newValue) + + // Assign TTL. ttl := int64(rootTTL) if path != name { ttl = int64(treeNodeTTL) } - prevRecords, exists := existing[path] - prevValue := strings.Join(prevRecords.values, "") if !exists { // Entry is unknown, push a new one - log.Info(fmt.Sprintf("Creating %s = %q", path, val)) - changes = append(changes, newTXTChange("CREATE", path, ttl, splitTXT(val))) - } else if prevValue != val || prevRecords.ttl != ttl { + log.Info(fmt.Sprintf("Creating %s = %s", path, newValue)) + changes = append(changes, newTXTChange("CREATE", path, ttl, newValue)) + } else if prevValue != newValue || prevRecords.ttl != ttl { // Entry already exists, only change its content. - log.Info(fmt.Sprintf("Updating %s from %q to %q", path, prevValue, val)) - changes = append(changes, newTXTChange("UPSERT", path, ttl, splitTXT(val))) + log.Info(fmt.Sprintf("Updating %s from %s to %s", path, prevValue, newValue)) + changes = append(changes, newTXTChange("UPSERT", path, ttl, newValue)) } else { - log.Info(fmt.Sprintf("Skipping %s = %q", path, val)) + log.Debug(fmt.Sprintf("Skipping %s = %s", path, newValue)) } } // Iterate over the old records and delete anything stale. - for path, set := range existing { - if _, ok := records[path]; ok { + changes = append(changes, makeDeletionChanges(existing, records)...) + + // Ensure changes are in the correct order. + sortChanges(changes) + return changes +} + +// makeDeletionChanges creates record changes which delete all records not contained in 'keep'. +func makeDeletionChanges(records map[string]recordSet, keep map[string]string) []types.Change { + var changes []types.Change + for path, set := range records { + if _, ok := keep[path]; ok { continue } - // Stale entry, nuke it. - log.Info(fmt.Sprintf("Deleting %s = %q", path, strings.Join(set.values, ""))) + log.Info(fmt.Sprintf("Deleting %s = %s", path, strings.Join(set.values, ""))) changes = append(changes, newTXTChange("DELETE", path, set.ttl, set.values...)) } - - sortChanges(changes) return changes } // sortChanges ensures DNS changes are in leaf-added -> root-changed -> leaf-deleted order. -func sortChanges(changes []*route53.Change) { +func sortChanges(changes []types.Change) { score := map[string]int{"CREATE": 1, "UPSERT": 2, "DELETE": 3} sort.Slice(changes, func(i, j int) bool { - if *changes[i].Action == *changes[j].Action { + if changes[i].Action == changes[j].Action { return *changes[i].ResourceRecordSet.Name < *changes[j].ResourceRecordSet.Name } - return score[*changes[i].Action] < score[*changes[j].Action] + return score[string(changes[i].Action)] < score[string(changes[j].Action)] }) } // splitChanges splits up DNS changes such that each change batch // is smaller than the given RDATA limit. -func splitChanges(changes []*route53.Change, sizeLimit, countLimit int) [][]*route53.Change { +func splitChanges(changes []types.Change, sizeLimit, countLimit int) [][]types.Change { var ( - batches [][]*route53.Change + batches [][]types.Change batchSize int batchCount int ) @@ -241,7 +307,7 @@ func splitChanges(changes []*route53.Change, sizeLimit, countLimit int) [][]*rou } // changeSize returns the RDATA size of a DNS change. -func changeSize(ch *route53.Change) int { +func changeSize(ch types.Change) int { size := 0 for _, rr := range ch.ResourceRecordSet.ResourceRecords { if rr.Value != nil { @@ -251,8 +317,8 @@ func changeSize(ch *route53.Change) int { return size } -func changeCount(ch *route53.Change) int { - if *ch.Action == "UPSERT" { +func changeCount(ch types.Change) int { + if ch.Action == types.ChangeActionUpsert { return 2 } return 1 @@ -260,13 +326,17 @@ func changeCount(ch *route53.Change) int { // collectRecords collects all TXT records below the given name. func (c *route53Client) collectRecords(name string) (map[string]recordSet, error) { - log.Info(fmt.Sprintf("Retrieving existing TXT records on %s (%s)", name, c.zoneID)) var req route53.ListResourceRecordSetsInput - req.SetHostedZoneId(c.zoneID) + req.HostedZoneId = &c.zoneID existing := make(map[string]recordSet) - err := c.api.ListResourceRecordSetsPages(&req, func(resp *route53.ListResourceRecordSetsOutput, last bool) bool { + for page := 0; ; page++ { + log.Info("Loading existing TXT records", "name", name, "zone", c.zoneID, "page", page) + resp, err := c.api.ListResourceRecordSets(context.TODO(), &req) + if err != nil { + return existing, err + } for _, set := range resp.ResourceRecordSets { - if !isSubdomain(*set.Name, name) || *set.Type != "TXT" { + if !isSubdomain(*set.Name, name) || set.Type != types.RRTypeTxt { continue } s := recordSet{ttl: *set.TTL} @@ -276,28 +346,44 @@ func (c *route53Client) collectRecords(name string) (map[string]recordSet, error name := strings.TrimSuffix(*set.Name, ".") existing[name] = s } - return true - }) - return existing, err + + if !resp.IsTruncated { + break + } + // Set the cursor to the next batch. From the AWS docs: + // + // To display the next page of results, get the values of NextRecordName, + // NextRecordType, and NextRecordIdentifier (if any) from the response. Then submit + // another ListResourceRecordSets request, and specify those values for + // StartRecordName, StartRecordType, and StartRecordIdentifier. + req.StartRecordIdentifier = resp.NextRecordIdentifier + req.StartRecordName = resp.NextRecordName + req.StartRecordType = resp.NextRecordType + } + + return existing, nil } // newTXTChange creates a change to a TXT record. -func newTXTChange(action, name string, ttl int64, values ...string) *route53.Change { - var c route53.Change - var r route53.ResourceRecordSet - var rrs []*route53.ResourceRecord +func newTXTChange(action, name string, ttl int64, values ...string) types.Change { + r := types.ResourceRecordSet{ + Type: types.RRTypeTxt, + Name: &name, + TTL: &ttl, + } + var rrs []types.ResourceRecord for _, val := range values { - rr := new(route53.ResourceRecord) - rr.SetValue(val) + var rr types.ResourceRecord + rr.Value = aws.String(val) rrs = append(rrs, rr) } - r.SetType("TXT") - r.SetName(name) - r.SetTTL(ttl) - r.SetResourceRecords(rrs) - c.SetAction(action) - c.SetResourceRecordSet(&r) - return &c + + r.ResourceRecords = rrs + + return types.Change{ + Action: types.ChangeAction(action), + ResourceRecordSet: &r, + } } // isSubdomain returns true if name is a subdomain of domain. diff --git a/cmd/devp2p/dns_route53_test.go b/cmd/devp2p/dns_route53_test.go index a2ef3791f..e6eb516e6 100644 --- a/cmd/devp2p/dns_route53_test.go +++ b/cmd/devp2p/dns_route53_test.go @@ -20,7 +20,7 @@ import ( "reflect" "testing" - "github.com/aws/aws-sdk-go/service/route53" + "github.com/aws/aws-sdk-go-v2/service/route53/types" ) // This test checks that computeChanges/splitChanges create DNS changes in @@ -43,93 +43,93 @@ func TestRoute53ChangeSort(t *testing.T) { "MHTDO6TMUBRIA2XWG5LUDACK24.n": "enr:-HW4QLAYqmrwllBEnzWWs7I5Ev2IAs7x_dZlbYdRdMUx5EyKHDXp7AV5CkuPGUPdvbv1_Ms1CPfhcGCvSElSosZmyoqAgmlkgnY0iXNlY3AyNTZrMaECriawHKWdDRk2xeZkrOXBQ0dfMFLHY4eENZwdufn1S1o", } - wantChanges := []*route53.Change{ + wantChanges := []types.Change{ { - Action: sp("CREATE"), - ResourceRecordSet: &route53.ResourceRecordSet{ + Action: "CREATE", + ResourceRecordSet: &types.ResourceRecordSet{ Name: sp("2xs2367yhaxjfglzhvawlqd4zy.n"), - ResourceRecords: []*route53.ResourceRecord{{ + ResourceRecords: []types.ResourceRecord{{ Value: sp(`"enr:-HW4QOFzoVLaFJnNhbgMoDXPnOvcdVuj7pDpqRvh6BRDO68aVi5ZcjB3vzQRZH2IcLBGHzo8uUN3snqmgTiE56CH3AMBgmlkgnY0iXNlY3AyNTZrMaECC2_24YYkYHEgdzxlSNKQEnHhuNAbNlMlWJxrJxbAFvA"`), }}, TTL: ip(treeNodeTTL), - Type: sp("TXT"), + Type: "TXT", }, }, { - Action: sp("CREATE"), - ResourceRecordSet: &route53.ResourceRecordSet{ + Action: "CREATE", + ResourceRecordSet: &types.ResourceRecordSet{ Name: sp("c7hrfpf3blgf3yr4dy5kx3smbe.n"), - ResourceRecords: []*route53.ResourceRecord{{ + ResourceRecords: []types.ResourceRecord{{ Value: sp(`"enrtree://AM5FCQLWIZX2QFPNJAP7VUERCCRNGRHWZG3YYHIUV7BVDQ5FDPRT2@morenodes.example.org"`), }}, TTL: ip(treeNodeTTL), - Type: sp("TXT"), + Type: "TXT", }, }, { - Action: sp("CREATE"), - ResourceRecordSet: &route53.ResourceRecordSet{ + Action: "CREATE", + ResourceRecordSet: &types.ResourceRecordSet{ Name: sp("h4fht4b454p6uxfd7jcyq5pwdy.n"), - ResourceRecords: []*route53.ResourceRecord{{ + ResourceRecords: []types.ResourceRecord{{ Value: sp(`"enr:-HW4QAggRauloj2SDLtIHN1XBkvhFZ1vtf1raYQp9TBW2RD5EEawDzbtSmlXUfnaHcvwOizhVYLtr7e6vw7NAf6mTuoCgmlkgnY0iXNlY3AyNTZrMaECjrXI8TLNXU0f8cthpAMxEshUyQlK-AM0PW2wfrnacNI"`), }}, TTL: ip(treeNodeTTL), - Type: sp("TXT"), + Type: "TXT", }, }, { - Action: sp("CREATE"), - ResourceRecordSet: &route53.ResourceRecordSet{ + Action: "CREATE", + ResourceRecordSet: &types.ResourceRecordSet{ Name: sp("jwxydbpxywg6fx3gmdibfa6cj4.n"), - ResourceRecords: []*route53.ResourceRecord{{ + ResourceRecords: []types.ResourceRecord{{ Value: sp(`"enrtree-branch:2XS2367YHAXJFGLZHVAWLQD4ZY,H4FHT4B454P6UXFD7JCYQ5PWDY,MHTDO6TMUBRIA2XWG5LUDACK24"`), }}, TTL: ip(treeNodeTTL), - Type: sp("TXT"), + Type: "TXT", }, }, { - Action: sp("CREATE"), - ResourceRecordSet: &route53.ResourceRecordSet{ + Action: "CREATE", + ResourceRecordSet: &types.ResourceRecordSet{ Name: sp("mhtdo6tmubria2xwg5ludack24.n"), - ResourceRecords: []*route53.ResourceRecord{{ + ResourceRecords: []types.ResourceRecord{{ Value: sp(`"enr:-HW4QLAYqmrwllBEnzWWs7I5Ev2IAs7x_dZlbYdRdMUx5EyKHDXp7AV5CkuPGUPdvbv1_Ms1CPfhcGCvSElSosZmyoqAgmlkgnY0iXNlY3AyNTZrMaECriawHKWdDRk2xeZkrOXBQ0dfMFLHY4eENZwdufn1S1o"`), }}, TTL: ip(treeNodeTTL), - Type: sp("TXT"), + Type: "TXT", }, }, { - Action: sp("UPSERT"), - ResourceRecordSet: &route53.ResourceRecordSet{ + Action: "UPSERT", + ResourceRecordSet: &types.ResourceRecordSet{ Name: sp("n"), - ResourceRecords: []*route53.ResourceRecord{{ + ResourceRecords: []types.ResourceRecord{{ Value: sp(`"enrtree-root:v1 e=JWXYDBPXYWG6FX3GMDIBFA6CJ4 l=C7HRFPF3BLGF3YR4DY5KX3SMBE seq=1 sig=o908WmNp7LibOfPsr4btQwatZJ5URBr2ZAuxvK4UWHlsB9sUOTJQaGAlLPVAhM__XJesCHxLISo94z5Z2a463gA"`), }}, TTL: ip(rootTTL), - Type: sp("TXT"), + Type: "TXT", }, }, { - Action: sp("DELETE"), - ResourceRecordSet: &route53.ResourceRecordSet{ + Action: "DELETE", + ResourceRecordSet: &types.ResourceRecordSet{ Name: sp("2kfjogvxdqtxxugbh7gs7naaai.n"), - ResourceRecords: []*route53.ResourceRecord{ + ResourceRecords: []types.ResourceRecord{ {Value: sp(`"enr:-HW4QO1ml1DdXLeZLsUxewnthhUy8eROqkDyoMTyavfks9JlYQIlMFEUoM78PovJDPQrAkrb3LRJ-""vtrymDguKCOIAWAgmlkgnY0iXNlY3AyNTZrMaEDffaGfJzgGhUif1JqFruZlYmA31HzathLSWxfbq_QoQ4"`)}, }, TTL: ip(3333), - Type: sp("TXT"), + Type: "TXT", }, }, { - Action: sp("DELETE"), - ResourceRecordSet: &route53.ResourceRecordSet{ + Action: "DELETE", + ResourceRecordSet: &types.ResourceRecordSet{ Name: sp("fdxn3sn67na5dka4j2gok7bvqi.n"), - ResourceRecords: []*route53.ResourceRecord{{ + ResourceRecords: []types.ResourceRecord{{ Value: sp(`"enrtree-branch:"`), }}, TTL: ip(treeNodeTTL), - Type: sp("TXT"), + Type: "TXT", }, }, } @@ -141,7 +141,7 @@ func TestRoute53ChangeSort(t *testing.T) { } // Check splitting according to size. - wantSplit := [][]*route53.Change{ + wantSplit := [][]types.Change{ wantChanges[:4], wantChanges[4:6], wantChanges[6:], @@ -152,7 +152,7 @@ func TestRoute53ChangeSort(t *testing.T) { } // Check splitting according to count. - wantSplit = [][]*route53.Change{ + wantSplit = [][]types.Change{ wantChanges[:5], wantChanges[5:], } @@ -162,5 +162,29 @@ func TestRoute53ChangeSort(t *testing.T) { } } +// This test checks that computeChanges compares the quoted value of the records correctly. +func TestRoute53NoChange(t *testing.T) { + // Existing record set. + testTree0 := map[string]recordSet{ + "n": {ttl: rootTTL, values: []string{ + `"enrtree-root:v1 e=JWXYDBPXYWG6FX3GMDIBFA6CJ4 l=C7HRFPF3BLGF3YR4DY5KX3SMBE seq=1 sig=o908WmNp7LibOfPsr4btQwatZJ5URBr2ZAuxvK4UWHlsB9sUOTJQaGAlLPVAhM__XJesCHxLISo94z5Z2a463gA"`, + }}, + "2xs2367yhaxjfglzhvawlqd4zy.n": {ttl: treeNodeTTL, values: []string{ + `"enr:-HW4QOFzoVLaFJnNhbgMoDXPnOvcdVuj7pDpqRvh6BRDO68aVi5ZcjB3vzQRZH2IcLBGHzo8uUN3snqmgTiE56CH3AMBgmlkgnY0iXNlY3AyNTZrMaECC2_24YYkYHEgdzxlSNKQEnHhuNAbNlMlWJxrJxbAFvA"`, + }}, + } + // New set. + testTree1 := map[string]string{ + "n": "enrtree-root:v1 e=JWXYDBPXYWG6FX3GMDIBFA6CJ4 l=C7HRFPF3BLGF3YR4DY5KX3SMBE seq=1 sig=o908WmNp7LibOfPsr4btQwatZJ5URBr2ZAuxvK4UWHlsB9sUOTJQaGAlLPVAhM__XJesCHxLISo94z5Z2a463gA", + "2XS2367YHAXJFGLZHVAWLQD4ZY.n": "enr:-HW4QOFzoVLaFJnNhbgMoDXPnOvcdVuj7pDpqRvh6BRDO68aVi5ZcjB3vzQRZH2IcLBGHzo8uUN3snqmgTiE56CH3AMBgmlkgnY0iXNlY3AyNTZrMaECC2_24YYkYHEgdzxlSNKQEnHhuNAbNlMlWJxrJxbAFvA", + } + + var client route53Client + changes := client.computeChanges("n", testTree1, testTree0) + if len(changes) > 0 { + t.Fatalf("wrong changes (got %d, want 0)", len(changes)) + } +} + func sp(s string) *string { return &s } func ip(i int64) *int64 { return &i } diff --git a/cmd/devp2p/dnscmd.go b/cmd/devp2p/dnscmd.go index 13110f21c..66deef56e 100644 --- a/cmd/devp2p/dnscmd.go +++ b/cmd/devp2p/dnscmd.go @@ -30,7 +30,7 @@ import ( "github.com/ethereum/go-ethereum/console/prompt" "github.com/ethereum/go-ethereum/p2p/dnsdisc" "github.com/ethereum/go-ethereum/p2p/enode" - cli "gopkg.in/urfave/cli.v1" + "gopkg.in/urfave/cli.v1" ) var ( @@ -43,6 +43,7 @@ var ( dnsTXTCommand, dnsCloudflareCommand, dnsRoute53Command, + dnsRoute53NukeCommand, }, } dnsSyncCommand = cli.Command{ @@ -77,7 +78,24 @@ var ( Usage: "Deploy DNS TXT records to Amazon Route53", ArgsUsage: "", Action: dnsToRoute53, - Flags: []cli.Flag{route53AccessKeyFlag, route53AccessSecretFlag, route53ZoneIDFlag}, + Flags: []cli.Flag{ + route53AccessKeyFlag, + route53AccessSecretFlag, + route53ZoneIDFlag, + route53RegionFlag, + }, + } + dnsRoute53NukeCommand = cli.Command{ + Name: "nuke-route53", + Usage: "Deletes DNS TXT records of a subdomain on Amazon Route53", + ArgsUsage: "", + Action: dnsNukeRoute53, + Flags: []cli.Flag{ + route53AccessKeyFlag, + route53AccessSecretFlag, + route53ZoneIDFlag, + route53RegionFlag, + }, } ) @@ -169,6 +187,9 @@ func dnsSign(ctx *cli.Context) error { return nil } +// directoryName returns the directory name of the given path. +// For example, when dir is "foo/bar", it returns "bar". +// When dir is ".", and the working directory is "example/foo", it returns "foo". func directoryName(dir string) string { abs, err := filepath.Abs(dir) if err != nil { @@ -177,7 +198,7 @@ func directoryName(dir string) string { return filepath.Base(abs) } -// dnsToTXT peforms dnsTXTCommand. +// dnsToTXT performs dnsTXTCommand. func dnsToTXT(ctx *cli.Context) error { if ctx.NArg() < 1 { return fmt.Errorf("need tree definition directory as argument") @@ -194,9 +215,9 @@ func dnsToTXT(ctx *cli.Context) error { return nil } -// dnsToCloudflare peforms dnsCloudflareCommand. +// dnsToCloudflare performs dnsCloudflareCommand. func dnsToCloudflare(ctx *cli.Context) error { - if ctx.NArg() < 1 { + if ctx.NArg() != 1 { return fmt.Errorf("need tree definition directory as argument") } domain, t, err := loadTreeDefinitionForExport(ctx.Args().Get(0)) @@ -207,9 +228,9 @@ func dnsToCloudflare(ctx *cli.Context) error { return client.deploy(domain, t) } -// dnsToRoute53 peforms dnsRoute53Command. +// dnsToRoute53 performs dnsRoute53Command. func dnsToRoute53(ctx *cli.Context) error { - if ctx.NArg() < 1 { + if ctx.NArg() != 1 { return fmt.Errorf("need tree definition directory as argument") } domain, t, err := loadTreeDefinitionForExport(ctx.Args().Get(0)) @@ -220,6 +241,15 @@ func dnsToRoute53(ctx *cli.Context) error { return client.deploy(domain, t) } +// dnsNukeRoute53 performs dnsRoute53NukeCommand. +func dnsNukeRoute53(ctx *cli.Context) error { + if ctx.NArg() != 1 { + return fmt.Errorf("need domain name as argument") + } + client := newRoute53Client(ctx) + return client.deleteDomain(ctx.Args().First()) +} + // loadSigningKey loads a private key in Ethereum keystore format. func loadSigningKey(keyfile string) *ecdsa.PrivateKey { keyjson, err := ioutil.ReadFile(keyfile) diff --git a/cmd/devp2p/internal/ethtest/chain.go b/cmd/devp2p/internal/ethtest/chain.go index 654888a4c..34a20c515 100644 --- a/cmd/devp2p/internal/ethtest/chain.go +++ b/cmd/devp2p/internal/ethtest/chain.go @@ -34,6 +34,7 @@ import ( ) type Chain struct { + genesis core.Genesis blocks []*types.Block chainConfig *params.ChainConfig } @@ -53,10 +54,24 @@ func (c *Chain) Len() int { return len(c.blocks) } -// TD calculates the total difficulty of the chain. -func (c *Chain) TD(height int) *big.Int { // TODO later on channge scheme so that the height is included in range +// TD calculates the total difficulty of the chain at the +// chain head. +func (c *Chain) TD() *big.Int { sum := big.NewInt(0) - for _, block := range c.blocks[:height] { + for _, block := range c.blocks[:c.Len()] { + sum.Add(sum, block.Difficulty()) + } + return sum +} + +// TotalDifficultyAt calculates the total difficulty of the chain +// at the given block height. +func (c *Chain) TotalDifficultyAt(height int) *big.Int { + sum := big.NewInt(0) + if height >= c.Len() { + return sum + } + for _, block := range c.blocks[:height+1] { sum.Add(sum, block.Difficulty()) } return sum @@ -124,13 +139,40 @@ func (c *Chain) GetHeaders(req GetBlockHeaders) (BlockHeaders, error) { // loadChain takes the given chain.rlp file, and decodes and returns // the blocks from the file. func loadChain(chainfile string, genesis string) (*Chain, error) { - // Open the file handle and potentially unwrap the gzip stream + gen, err := loadGenesis(genesis) + if err != nil { + return nil, err + } + gblock := gen.ToBlock(nil) + + blocks, err := blocksFromFile(chainfile, gblock) + if err != nil { + return nil, err + } + + c := &Chain{genesis: gen, blocks: blocks, chainConfig: gen.Config} + return c, nil +} + +func loadGenesis(genesisFile string) (core.Genesis, error) { + chainConfig, err := ioutil.ReadFile(genesisFile) + if err != nil { + return core.Genesis{}, err + } + var gen core.Genesis + if err := json.Unmarshal(chainConfig, &gen); err != nil { + return core.Genesis{}, err + } + return gen, nil +} + +func blocksFromFile(chainfile string, gblock *types.Block) ([]*types.Block, error) { + // Load chain.rlp. fh, err := os.Open(chainfile) if err != nil { return nil, err } defer fh.Close() - var reader io.Reader = fh if strings.HasSuffix(chainfile, ".gz") { if reader, err = gzip.NewReader(reader); err != nil { @@ -138,29 +180,19 @@ func loadChain(chainfile string, genesis string) (*Chain, error) { } } stream := rlp.NewStream(reader, 0) - var blocks []*types.Block + var blocks = make([]*types.Block, 1) + blocks[0] = gblock for i := 0; ; i++ { var b types.Block if err := stream.Decode(&b); err == io.EOF { break } else if err != nil { - return nil, fmt.Errorf("at block %d: %v", i, err) + return nil, fmt.Errorf("at block index %d: %v", i, err) + } + if b.NumberU64() != uint64(i+1) { + return nil, fmt.Errorf("block at index %d has wrong number %d", i, b.NumberU64()) } blocks = append(blocks, &b) } - - // Open the file handle and potentially unwrap the gzip stream - chainConfig, err := ioutil.ReadFile(genesis) - if err != nil { - return nil, err - } - var gen core.Genesis - if err := json.Unmarshal(chainConfig, &gen); err != nil { - return nil, err - } - - return &Chain{ - blocks: blocks, - chainConfig: gen.Config, - }, nil + return blocks, nil } diff --git a/cmd/devp2p/internal/ethtest/chain_test.go b/cmd/devp2p/internal/ethtest/chain_test.go index c8b977d23..ec98833ab 100644 --- a/cmd/devp2p/internal/ethtest/chain_test.go +++ b/cmd/devp2p/internal/ethtest/chain_test.go @@ -21,6 +21,7 @@ import ( "strconv" "testing" + "github.com/ethereum/go-ethereum/eth/protocols/eth" "github.com/ethereum/go-ethereum/p2p" "github.com/stretchr/testify/assert" ) @@ -34,7 +35,31 @@ func TestEthProtocolNegotiation(t *testing.T) { expected uint32 }{ { - conn: &Conn{}, + conn: &Conn{ + ourHighestProtoVersion: 65, + }, + caps: []p2p.Cap{ + {Name: "eth", Version: 63}, + {Name: "eth", Version: 64}, + {Name: "eth", Version: 65}, + }, + expected: uint32(65), + }, + { + conn: &Conn{ + ourHighestProtoVersion: 65, + }, + caps: []p2p.Cap{ + {Name: "eth", Version: 63}, + {Name: "eth", Version: 64}, + {Name: "eth", Version: 65}, + }, + expected: uint32(65), + }, + { + conn: &Conn{ + ourHighestProtoVersion: 65, + }, caps: []p2p.Cap{ {Name: "eth", Version: 63}, {Name: "eth", Version: 64}, @@ -43,7 +68,20 @@ func TestEthProtocolNegotiation(t *testing.T) { expected: uint32(65), }, { - conn: &Conn{}, + conn: &Conn{ + ourHighestProtoVersion: 64, + }, + caps: []p2p.Cap{ + {Name: "eth", Version: 63}, + {Name: "eth", Version: 64}, + {Name: "eth", Version: 65}, + }, + expected: 64, + }, + { + conn: &Conn{ + ourHighestProtoVersion: 65, + }, caps: []p2p.Cap{ {Name: "eth", Version: 0}, {Name: "eth", Version: 89}, @@ -52,7 +90,20 @@ func TestEthProtocolNegotiation(t *testing.T) { expected: uint32(65), }, { - conn: &Conn{}, + conn: &Conn{ + ourHighestProtoVersion: 64, + }, + caps: []p2p.Cap{ + {Name: "eth", Version: 63}, + {Name: "eth", Version: 64}, + {Name: "wrongProto", Version: 65}, + }, + expected: uint32(64), + }, + { + conn: &Conn{ + ourHighestProtoVersion: 65, + }, caps: []p2p.Cap{ {Name: "eth", Version: 63}, {Name: "eth", Version: 64}, @@ -65,7 +116,7 @@ func TestEthProtocolNegotiation(t *testing.T) { for i, tt := range tests { t.Run(strconv.Itoa(i), func(t *testing.T) { tt.conn.negotiateEthProtocol(tt.caps) - assert.Equal(t, tt.expected, uint32(tt.conn.ethProtocolVersion)) + assert.Equal(t, tt.expected, uint32(tt.conn.negotiatedProtoVersion)) }) } } @@ -73,7 +124,7 @@ func TestEthProtocolNegotiation(t *testing.T) { // TestChain_GetHeaders tests whether the test suite can correctly // respond to a GetBlockHeaders request from a node. func TestChain_GetHeaders(t *testing.T) { - chainFile, err := filepath.Abs("./testdata/chain.rlp.gz") + chainFile, err := filepath.Abs("./testdata/chain.rlp") if err != nil { t.Fatal(err) } @@ -93,7 +144,7 @@ func TestChain_GetHeaders(t *testing.T) { }{ { req: GetBlockHeaders{ - Origin: hashOrNumber{ + Origin: eth.HashOrNumber{ Number: uint64(2), }, Amount: uint64(5), @@ -110,7 +161,7 @@ func TestChain_GetHeaders(t *testing.T) { }, { req: GetBlockHeaders{ - Origin: hashOrNumber{ + Origin: eth.HashOrNumber{ Number: uint64(chain.Len() - 1), }, Amount: uint64(3), @@ -125,7 +176,7 @@ func TestChain_GetHeaders(t *testing.T) { }, { req: GetBlockHeaders{ - Origin: hashOrNumber{ + Origin: eth.HashOrNumber{ Hash: chain.Head().Hash(), }, Amount: uint64(1), diff --git a/cmd/devp2p/internal/ethtest/helpers.go b/cmd/devp2p/internal/ethtest/helpers.go new file mode 100644 index 000000000..6f7365483 --- /dev/null +++ b/cmd/devp2p/internal/ethtest/helpers.go @@ -0,0 +1,749 @@ +// Copyright 2020 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . + +package ethtest + +import ( + "fmt" + "net" + "reflect" + "strings" + "time" + + "github.com/davecgh/go-spew/spew" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/crypto" + "github.com/ethereum/go-ethereum/eth/protocols/eth" + "github.com/ethereum/go-ethereum/internal/utesting" + "github.com/ethereum/go-ethereum/p2p" + "github.com/ethereum/go-ethereum/p2p/rlpx" +) + +var ( + pretty = spew.ConfigState{ + Indent: " ", + DisableCapacities: true, + DisablePointerAddresses: true, + SortKeys: true, + } + timeout = 20 * time.Second +) + +// Is_66 checks if the node supports the eth66 protocol version, +// and if not, exists the test suite +func (s *Suite) Is_66(t *utesting.T) { + conn, err := s.dial66() + if err != nil { + t.Fatalf("dial failed: %v", err) + } + if err := conn.handshake(); err != nil { + t.Fatalf("handshake failed: %v", err) + } + if conn.negotiatedProtoVersion < 66 { + t.Fail() + } +} + +// dial attempts to dial the given node and perform a handshake, +// returning the created Conn if successful. +func (s *Suite) dial() (*Conn, error) { + // dial + fd, err := net.Dial("tcp", fmt.Sprintf("%v:%d", s.Dest.IP(), s.Dest.TCP())) + if err != nil { + return nil, err + } + conn := Conn{Conn: rlpx.NewConn(fd, s.Dest.Pubkey())} + // do encHandshake + conn.ourKey, _ = crypto.GenerateKey() + _, err = conn.Handshake(conn.ourKey) + if err != nil { + conn.Close() + return nil, err + } + // set default p2p capabilities + conn.caps = []p2p.Cap{ + {Name: "eth", Version: 64}, + {Name: "eth", Version: 65}, + } + conn.ourHighestProtoVersion = 65 + return &conn, nil +} + +// dial66 attempts to dial the given node and perform a handshake, +// returning the created Conn with additional eth66 capabilities if +// successful +func (s *Suite) dial66() (*Conn, error) { + conn, err := s.dial() + if err != nil { + return nil, fmt.Errorf("dial failed: %v", err) + } + conn.caps = append(conn.caps, p2p.Cap{Name: "eth", Version: 66}) + conn.ourHighestProtoVersion = 66 + return conn, nil +} + +// peer performs both the protocol handshake and the status message +// exchange with the node in order to peer with it. +func (c *Conn) peer(chain *Chain, status *Status) error { + if err := c.handshake(); err != nil { + return fmt.Errorf("handshake failed: %v", err) + } + if _, err := c.statusExchange(chain, status); err != nil { + return fmt.Errorf("status exchange failed: %v", err) + } + return nil +} + +// handshake performs a protocol handshake with the node. +func (c *Conn) handshake() error { + defer c.SetDeadline(time.Time{}) + c.SetDeadline(time.Now().Add(10 * time.Second)) + // write hello to client + pub0 := crypto.FromECDSAPub(&c.ourKey.PublicKey)[1:] + ourHandshake := &Hello{ + Version: 5, + Caps: c.caps, + ID: pub0, + } + if err := c.Write(ourHandshake); err != nil { + return fmt.Errorf("write to connection failed: %v", err) + } + // read hello from client + switch msg := c.Read().(type) { + case *Hello: + // set snappy if version is at least 5 + if msg.Version >= 5 { + c.SetSnappy(true) + } + c.negotiateEthProtocol(msg.Caps) + if c.negotiatedProtoVersion == 0 { + return fmt.Errorf("unexpected eth protocol version") + } + return nil + default: + return fmt.Errorf("bad handshake: %#v", msg) + } +} + +// negotiateEthProtocol sets the Conn's eth protocol version to highest +// advertised capability from peer. +func (c *Conn) negotiateEthProtocol(caps []p2p.Cap) { + var highestEthVersion uint + for _, capability := range caps { + if capability.Name != "eth" { + continue + } + if capability.Version > highestEthVersion && capability.Version <= c.ourHighestProtoVersion { + highestEthVersion = capability.Version + } + } + c.negotiatedProtoVersion = highestEthVersion +} + +// statusExchange performs a `Status` message exchange with the given node. +func (c *Conn) statusExchange(chain *Chain, status *Status) (Message, error) { + defer c.SetDeadline(time.Time{}) + c.SetDeadline(time.Now().Add(20 * time.Second)) + + // read status message from client + var message Message +loop: + for { + switch msg := c.Read().(type) { + case *Status: + if have, want := msg.Head, chain.blocks[chain.Len()-1].Hash(); have != want { + return nil, fmt.Errorf("wrong head block in status, want: %#x (block %d) have %#x", + want, chain.blocks[chain.Len()-1].NumberU64(), have) + } + if have, want := msg.TD.Cmp(chain.TD()), 0; have != want { + return nil, fmt.Errorf("wrong TD in status: have %v want %v", have, want) + } + if have, want := msg.ForkID, chain.ForkID(); !reflect.DeepEqual(have, want) { + return nil, fmt.Errorf("wrong fork ID in status: have %v, want %v", have, want) + } + if have, want := msg.ProtocolVersion, c.ourHighestProtoVersion; have != uint32(want) { + return nil, fmt.Errorf("wrong protocol version: have %v, want %v", have, want) + } + message = msg + break loop + case *Disconnect: + return nil, fmt.Errorf("disconnect received: %v", msg.Reason) + case *Ping: + c.Write(&Pong{}) // TODO (renaynay): in the future, this should be an error + // (PINGs should not be a response upon fresh connection) + default: + return nil, fmt.Errorf("bad status message: %s", pretty.Sdump(msg)) + } + } + // make sure eth protocol version is set for negotiation + if c.negotiatedProtoVersion == 0 { + return nil, fmt.Errorf("eth protocol version must be set in Conn") + } + if status == nil { + // default status message + status = &Status{ + ProtocolVersion: uint32(c.negotiatedProtoVersion), + NetworkID: chain.chainConfig.ChainID.Uint64(), + TD: chain.TD(), + Head: chain.blocks[chain.Len()-1].Hash(), + Genesis: chain.blocks[0].Hash(), + ForkID: chain.ForkID(), + } + } + if err := c.Write(status); err != nil { + return nil, fmt.Errorf("write to connection failed: %v", err) + } + return message, nil +} + +// createSendAndRecvConns creates two connections, one for sending messages to the +// node, and one for receiving messages from the node. +func (s *Suite) createSendAndRecvConns(isEth66 bool) (*Conn, *Conn, error) { + var ( + sendConn *Conn + recvConn *Conn + err error + ) + if isEth66 { + sendConn, err = s.dial66() + if err != nil { + return nil, nil, fmt.Errorf("dial failed: %v", err) + } + recvConn, err = s.dial66() + if err != nil { + sendConn.Close() + return nil, nil, fmt.Errorf("dial failed: %v", err) + } + } else { + sendConn, err = s.dial() + if err != nil { + return nil, nil, fmt.Errorf("dial failed: %v", err) + } + recvConn, err = s.dial() + if err != nil { + sendConn.Close() + return nil, nil, fmt.Errorf("dial failed: %v", err) + } + } + return sendConn, recvConn, nil +} + +// readAndServe serves GetBlockHeaders requests while waiting +// on another message from the node. +func (c *Conn) readAndServe(chain *Chain, timeout time.Duration) Message { + start := time.Now() + for time.Since(start) < timeout { + c.SetReadDeadline(time.Now().Add(5 * time.Second)) + switch msg := c.Read().(type) { + case *Ping: + c.Write(&Pong{}) + case *GetBlockHeaders: + req := *msg + headers, err := chain.GetHeaders(req) + if err != nil { + return errorf("could not get headers for inbound header request: %v", err) + } + if err := c.Write(headers); err != nil { + return errorf("could not write to connection: %v", err) + } + default: + return msg + } + } + return errorf("no message received within %v", timeout) +} + +// readAndServe66 serves eth66 GetBlockHeaders requests while waiting +// on another message from the node. +func (c *Conn) readAndServe66(chain *Chain, timeout time.Duration) (uint64, Message) { + start := time.Now() + for time.Since(start) < timeout { + c.SetReadDeadline(time.Now().Add(10 * time.Second)) + + reqID, msg := c.Read66() + + switch msg := msg.(type) { + case *Ping: + c.Write(&Pong{}) + case *GetBlockHeaders: + headers, err := chain.GetHeaders(*msg) + if err != nil { + return 0, errorf("could not get headers for inbound header request: %v", err) + } + resp := ð.BlockHeadersPacket66{ + RequestId: reqID, + BlockHeadersPacket: eth.BlockHeadersPacket(headers), + } + if err := c.Write66(resp, BlockHeaders{}.Code()); err != nil { + return 0, errorf("could not write to connection: %v", err) + } + default: + return reqID, msg + } + } + return 0, errorf("no message received within %v", timeout) +} + +// headersRequest executes the given `GetBlockHeaders` request. +func (c *Conn) headersRequest(request *GetBlockHeaders, chain *Chain, isEth66 bool, reqID uint64) (BlockHeaders, error) { + defer c.SetReadDeadline(time.Time{}) + c.SetReadDeadline(time.Now().Add(20 * time.Second)) + // if on eth66 connection, perform eth66 GetBlockHeaders request + if isEth66 { + return getBlockHeaders66(chain, c, request, reqID) + } + if err := c.Write(request); err != nil { + return nil, err + } + switch msg := c.readAndServe(chain, timeout).(type) { + case *BlockHeaders: + return *msg, nil + default: + return nil, fmt.Errorf("invalid message: %s", pretty.Sdump(msg)) + } +} + +// getBlockHeaders66 executes the given `GetBlockHeaders` request over the eth66 protocol. +func getBlockHeaders66(chain *Chain, conn *Conn, request *GetBlockHeaders, id uint64) (BlockHeaders, error) { + // write request + packet := eth.GetBlockHeadersPacket(*request) + req := ð.GetBlockHeadersPacket66{ + RequestId: id, + GetBlockHeadersPacket: &packet, + } + if err := conn.Write66(req, GetBlockHeaders{}.Code()); err != nil { + return nil, fmt.Errorf("could not write to connection: %v", err) + } + // wait for response + msg := conn.waitForResponse(chain, timeout, req.RequestId) + headers, ok := msg.(BlockHeaders) + if !ok { + return nil, fmt.Errorf("unexpected message received: %s", pretty.Sdump(msg)) + } + return headers, nil +} + +// headersMatch returns whether the received headers match the given request +func headersMatch(expected BlockHeaders, headers BlockHeaders) bool { + return reflect.DeepEqual(expected, headers) +} + +// waitForResponse reads from the connection until a response with the expected +// request ID is received. +func (c *Conn) waitForResponse(chain *Chain, timeout time.Duration, requestID uint64) Message { + for { + id, msg := c.readAndServe66(chain, timeout) + if id == requestID { + return msg + } + } +} + +// sendNextBlock broadcasts the next block in the chain and waits +// for the node to propagate the block and import it into its chain. +func (s *Suite) sendNextBlock(isEth66 bool) error { + // set up sending and receiving connections + sendConn, recvConn, err := s.createSendAndRecvConns(isEth66) + if err != nil { + return err + } + defer sendConn.Close() + defer recvConn.Close() + if err = sendConn.peer(s.chain, nil); err != nil { + return fmt.Errorf("peering failed: %v", err) + } + if err = recvConn.peer(s.chain, nil); err != nil { + return fmt.Errorf("peering failed: %v", err) + } + // create new block announcement + nextBlock := s.fullChain.blocks[s.chain.Len()] + blockAnnouncement := &NewBlock{ + Block: nextBlock, + TD: s.fullChain.TotalDifficultyAt(s.chain.Len()), + } + // send announcement and wait for node to request the header + if err = s.testAnnounce(sendConn, recvConn, blockAnnouncement); err != nil { + return fmt.Errorf("failed to announce block: %v", err) + } + // wait for client to update its chain + if err = s.waitForBlockImport(recvConn, nextBlock, isEth66); err != nil { + return fmt.Errorf("failed to receive confirmation of block import: %v", err) + } + // update test suite chain + s.chain.blocks = append(s.chain.blocks, nextBlock) + return nil +} + +// testAnnounce writes a block announcement to the node and waits for the node +// to propagate it. +func (s *Suite) testAnnounce(sendConn, receiveConn *Conn, blockAnnouncement *NewBlock) error { + if err := sendConn.Write(blockAnnouncement); err != nil { + return fmt.Errorf("could not write to connection: %v", err) + } + return s.waitAnnounce(receiveConn, blockAnnouncement) +} + +// waitAnnounce waits for a NewBlock or NewBlockHashes announcement from the node. +func (s *Suite) waitAnnounce(conn *Conn, blockAnnouncement *NewBlock) error { + for { + switch msg := conn.readAndServe(s.chain, timeout).(type) { + case *NewBlock: + if !reflect.DeepEqual(blockAnnouncement.Block.Header(), msg.Block.Header()) { + return fmt.Errorf("wrong header in block announcement: \nexpected %v "+ + "\ngot %v", blockAnnouncement.Block.Header(), msg.Block.Header()) + } + if !reflect.DeepEqual(blockAnnouncement.TD, msg.TD) { + return fmt.Errorf("wrong TD in announcement: expected %v, got %v", blockAnnouncement.TD, msg.TD) + } + return nil + case *NewBlockHashes: + hashes := *msg + if blockAnnouncement.Block.Hash() != hashes[0].Hash { + return fmt.Errorf("wrong block hash in announcement: expected %v, got %v", blockAnnouncement.Block.Hash(), hashes[0].Hash) + } + return nil + case *NewPooledTransactionHashes: + // ignore tx announcements from previous tests + continue + default: + return fmt.Errorf("unexpected: %s", pretty.Sdump(msg)) + } + } +} + +func (s *Suite) waitForBlockImport(conn *Conn, block *types.Block, isEth66 bool) error { + defer conn.SetReadDeadline(time.Time{}) + conn.SetReadDeadline(time.Now().Add(20 * time.Second)) + // create request + req := &GetBlockHeaders{ + Origin: eth.HashOrNumber{ + Hash: block.Hash(), + }, + Amount: 1, + } + // loop until BlockHeaders response contains desired block, confirming the + // node imported the block + for { + var ( + headers BlockHeaders + err error + ) + if isEth66 { + requestID := uint64(54) + headers, err = conn.headersRequest(req, s.chain, eth66, requestID) + } else { + headers, err = conn.headersRequest(req, s.chain, eth65, 0) + } + if err != nil { + return fmt.Errorf("GetBlockHeader request failed: %v", err) + } + // if headers response is empty, node hasn't imported block yet, try again + if len(headers) == 0 { + time.Sleep(100 * time.Millisecond) + continue + } + if !reflect.DeepEqual(block.Header(), headers[0]) { + return fmt.Errorf("wrong header returned: wanted %v, got %v", block.Header(), headers[0]) + } + return nil + } +} + +func (s *Suite) oldAnnounce(isEth66 bool) error { + sendConn, receiveConn, err := s.createSendAndRecvConns(isEth66) + if err != nil { + return err + } + defer sendConn.Close() + defer receiveConn.Close() + if err := sendConn.peer(s.chain, nil); err != nil { + return fmt.Errorf("peering failed: %v", err) + } + if err := receiveConn.peer(s.chain, nil); err != nil { + return fmt.Errorf("peering failed: %v", err) + } + // create old block announcement + oldBlockAnnounce := &NewBlock{ + Block: s.chain.blocks[len(s.chain.blocks)/2], + TD: s.chain.blocks[len(s.chain.blocks)/2].Difficulty(), + } + if err := sendConn.Write(oldBlockAnnounce); err != nil { + return fmt.Errorf("could not write to connection: %v", err) + } + // wait to see if the announcement is propagated + switch msg := receiveConn.readAndServe(s.chain, time.Second*8).(type) { + case *NewBlock: + block := *msg + if block.Block.Hash() == oldBlockAnnounce.Block.Hash() { + return fmt.Errorf("unexpected: block propagated: %s", pretty.Sdump(msg)) + } + case *NewBlockHashes: + hashes := *msg + for _, hash := range hashes { + if hash.Hash == oldBlockAnnounce.Block.Hash() { + return fmt.Errorf("unexpected: block announced: %s", pretty.Sdump(msg)) + } + } + case *Error: + errMsg := *msg + // check to make sure error is timeout (propagation didn't come through == test successful) + if !strings.Contains(errMsg.String(), "timeout") { + return fmt.Errorf("unexpected error: %v", pretty.Sdump(msg)) + } + default: + return fmt.Errorf("unexpected: %s", pretty.Sdump(msg)) + } + return nil +} + +func (s *Suite) maliciousHandshakes(t *utesting.T, isEth66 bool) error { + var ( + conn *Conn + err error + ) + if isEth66 { + conn, err = s.dial66() + if err != nil { + return fmt.Errorf("dial failed: %v", err) + } + } else { + conn, err = s.dial() + if err != nil { + return fmt.Errorf("dial failed: %v", err) + } + } + defer conn.Close() + // write hello to client + pub0 := crypto.FromECDSAPub(&conn.ourKey.PublicKey)[1:] + handshakes := []*Hello{ + { + Version: 5, + Caps: []p2p.Cap{ + {Name: largeString(2), Version: 64}, + }, + ID: pub0, + }, + { + Version: 5, + Caps: []p2p.Cap{ + {Name: "eth", Version: 64}, + {Name: "eth", Version: 65}, + }, + ID: append(pub0, byte(0)), + }, + { + Version: 5, + Caps: []p2p.Cap{ + {Name: "eth", Version: 64}, + {Name: "eth", Version: 65}, + }, + ID: append(pub0, pub0...), + }, + { + Version: 5, + Caps: []p2p.Cap{ + {Name: "eth", Version: 64}, + {Name: "eth", Version: 65}, + }, + ID: largeBuffer(2), + }, + { + Version: 5, + Caps: []p2p.Cap{ + {Name: largeString(2), Version: 64}, + }, + ID: largeBuffer(2), + }, + } + for i, handshake := range handshakes { + t.Logf("Testing malicious handshake %v\n", i) + if err := conn.Write(handshake); err != nil { + return fmt.Errorf("could not write to connection: %v", err) + } + // check that the peer disconnected + for i := 0; i < 2; i++ { + switch msg := conn.readAndServe(s.chain, 20*time.Second).(type) { + case *Disconnect: + case *Error: + case *Hello: + // Discard one hello as Hello's are sent concurrently + continue + default: + return fmt.Errorf("unexpected: %s", pretty.Sdump(msg)) + } + } + // dial for the next round + if isEth66 { + conn, err = s.dial66() + if err != nil { + return fmt.Errorf("dial failed: %v", err) + } + } else { + conn, err = s.dial() + if err != nil { + return fmt.Errorf("dial failed: %v", err) + } + } + } + return nil +} + +func (s *Suite) maliciousStatus(conn *Conn) error { + if err := conn.handshake(); err != nil { + return fmt.Errorf("handshake failed: %v", err) + } + status := &Status{ + ProtocolVersion: uint32(conn.negotiatedProtoVersion), + NetworkID: s.chain.chainConfig.ChainID.Uint64(), + TD: largeNumber(2), + Head: s.chain.blocks[s.chain.Len()-1].Hash(), + Genesis: s.chain.blocks[0].Hash(), + ForkID: s.chain.ForkID(), + } + // get status + msg, err := conn.statusExchange(s.chain, status) + if err != nil { + return fmt.Errorf("status exchange failed: %v", err) + } + switch msg := msg.(type) { + case *Status: + default: + return fmt.Errorf("expected status, got: %#v ", msg) + } + // wait for disconnect + switch msg := conn.readAndServe(s.chain, timeout).(type) { + case *Disconnect: + return nil + case *Error: + return nil + default: + return fmt.Errorf("expected disconnect, got: %s", pretty.Sdump(msg)) + } +} + +func (s *Suite) hashAnnounce(isEth66 bool) error { + // create connections + sendConn, recvConn, err := s.createSendAndRecvConns(isEth66) + if err != nil { + return fmt.Errorf("failed to create connections: %v", err) + } + defer sendConn.Close() + defer recvConn.Close() + if err := sendConn.peer(s.chain, nil); err != nil { + return fmt.Errorf("peering failed: %v", err) + } + if err := recvConn.peer(s.chain, nil); err != nil { + return fmt.Errorf("peering failed: %v", err) + } + // create NewBlockHashes announcement + type anno struct { + Hash common.Hash // Hash of one particular block being announced + Number uint64 // Number of one particular block being announced + } + nextBlock := s.fullChain.blocks[s.chain.Len()] + announcement := anno{Hash: nextBlock.Hash(), Number: nextBlock.Number().Uint64()} + newBlockHash := &NewBlockHashes{announcement} + if err := sendConn.Write(newBlockHash); err != nil { + return fmt.Errorf("failed to write to connection: %v", err) + } + // Announcement sent, now wait for a header request + var ( + id uint64 + msg Message + blockHeaderReq GetBlockHeaders + ) + if isEth66 { + id, msg = sendConn.Read66() + switch msg := msg.(type) { + case GetBlockHeaders: + blockHeaderReq = msg + default: + return fmt.Errorf("unexpected %s", pretty.Sdump(msg)) + } + if blockHeaderReq.Amount != 1 { + return fmt.Errorf("unexpected number of block headers requested: %v", blockHeaderReq.Amount) + } + if blockHeaderReq.Origin.Hash != announcement.Hash { + return fmt.Errorf("unexpected block header requested. Announced:\n %v\n Remote request:\n%v", + pretty.Sdump(announcement), + pretty.Sdump(blockHeaderReq)) + } + if err := sendConn.Write66(ð.BlockHeadersPacket66{ + RequestId: id, + BlockHeadersPacket: eth.BlockHeadersPacket{ + nextBlock.Header(), + }, + }, BlockHeaders{}.Code()); err != nil { + return fmt.Errorf("failed to write to connection: %v", err) + } + } else { + msg = sendConn.Read() + switch msg := msg.(type) { + case *GetBlockHeaders: + blockHeaderReq = *msg + default: + return fmt.Errorf("unexpected %s", pretty.Sdump(msg)) + } + if blockHeaderReq.Amount != 1 { + return fmt.Errorf("unexpected number of block headers requested: %v", blockHeaderReq.Amount) + } + if blockHeaderReq.Origin.Hash != announcement.Hash { + return fmt.Errorf("unexpected block header requested. Announced:\n %v\n Remote request:\n%v", + pretty.Sdump(announcement), + pretty.Sdump(blockHeaderReq)) + } + if err := sendConn.Write(&BlockHeaders{nextBlock.Header()}); err != nil { + return fmt.Errorf("failed to write to connection: %v", err) + } + } + // wait for block announcement + msg = recvConn.readAndServe(s.chain, timeout) + switch msg := msg.(type) { + case *NewBlockHashes: + hashes := *msg + if len(hashes) != 1 { + return fmt.Errorf("unexpected new block hash announcement: wanted 1 announcement, got %d", len(hashes)) + } + if nextBlock.Hash() != hashes[0].Hash { + return fmt.Errorf("unexpected block hash announcement, wanted %v, got %v", nextBlock.Hash(), + hashes[0].Hash) + } + case *NewBlock: + // node should only propagate NewBlock without having requested the body if the body is empty + nextBlockBody := nextBlock.Body() + if len(nextBlockBody.Transactions) != 0 || len(nextBlockBody.Uncles) != 0 { + return fmt.Errorf("unexpected non-empty new block propagated: %s", pretty.Sdump(msg)) + } + if msg.Block.Hash() != nextBlock.Hash() { + return fmt.Errorf("mismatched hash of propagated new block: wanted %v, got %v", + nextBlock.Hash(), msg.Block.Hash()) + } + // check to make sure header matches header that was sent to the node + if !reflect.DeepEqual(nextBlock.Header(), msg.Block.Header()) { + return fmt.Errorf("incorrect header received: wanted %v, got %v", nextBlock.Header(), msg.Block.Header()) + } + default: + return fmt.Errorf("unexpected: %s", pretty.Sdump(msg)) + } + // confirm node imported block + if err := s.waitForBlockImport(recvConn, nextBlock, isEth66); err != nil { + return fmt.Errorf("error waiting for node to import new block: %v", err) + } + // update the chain + s.chain.blocks = append(s.chain.blocks, nextBlock) + return nil +} diff --git a/cmd/devp2p/internal/ethtest/large.go b/cmd/devp2p/internal/ethtest/large.go new file mode 100644 index 000000000..22421355a --- /dev/null +++ b/cmd/devp2p/internal/ethtest/large.go @@ -0,0 +1,80 @@ +// Copyright 2020 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . + +package ethtest + +import ( + "crypto/rand" + "math/big" + + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/common/hexutil" + "github.com/ethereum/go-ethereum/core/types" +) + +// largeNumber returns a very large big.Int. +func largeNumber(megabytes int) *big.Int { + buf := make([]byte, megabytes*1024*1024) + rand.Read(buf) + bigint := new(big.Int) + bigint.SetBytes(buf) + return bigint +} + +// largeBuffer returns a very large buffer. +func largeBuffer(megabytes int) []byte { + buf := make([]byte, megabytes*1024*1024) + rand.Read(buf) + return buf +} + +// largeString returns a very large string. +func largeString(megabytes int) string { + buf := make([]byte, megabytes*1024*1024) + rand.Read(buf) + return hexutil.Encode(buf) +} + +func largeBlock() *types.Block { + return types.NewBlockWithHeader(largeHeader()) +} + +// Returns a random hash +func randHash() common.Hash { + var h common.Hash + rand.Read(h[:]) + return h +} + +func largeHeader() *types.Header { + return &types.Header{ + MixDigest: randHash(), + ReceiptHash: randHash(), + TxHash: randHash(), + Nonce: types.BlockNonce{}, + Extra: []byte{}, + Bloom: types.Bloom{}, + GasUsed: 0, + Coinbase: common.Address{}, + GasLimit: 0, + UncleHash: types.EmptyUncleHash, + Time: 1337, + ParentHash: randHash(), + Root: randHash(), + Number: largeNumber(2), + Difficulty: largeNumber(2), + } +} diff --git a/cmd/devp2p/internal/ethtest/suite.go b/cmd/devp2p/internal/ethtest/suite.go index f70bc43ef..bbc955cd7 100644 --- a/cmd/devp2p/internal/ethtest/suite.go +++ b/cmd/devp2p/internal/ethtest/suite.go @@ -17,18 +17,16 @@ package ethtest import ( - "fmt" - "net" + "time" - "github.com/ethereum/go-ethereum/crypto" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/eth/protocols/eth" "github.com/ethereum/go-ethereum/internal/utesting" "github.com/ethereum/go-ethereum/p2p/enode" - "github.com/ethereum/go-ethereum/p2p/rlpx" - "github.com/stretchr/testify/assert" ) -// Suite represents a structure used to test the eth -// protocol of a node(s). +// Suite represents a structure used to test a node's conformance +// to the eth protocol. type Suite struct { Dest *enode.Node @@ -39,81 +37,358 @@ type Suite struct { // NewSuite creates and returns a new eth-test suite that can // be used to test the given node against the given blockchain // data. -func NewSuite(dest *enode.Node, chainfile string, genesisfile string) *Suite { +func NewSuite(dest *enode.Node, chainfile string, genesisfile string) (*Suite, error) { chain, err := loadChain(chainfile, genesisfile) if err != nil { - panic(err) + return nil, err } return &Suite{ Dest: dest, chain: chain.Shorten(1000), fullChain: chain, + }, nil +} + +func (s *Suite) AllEthTests() []utesting.Test { + return []utesting.Test{ + // status + {Name: "TestStatus", Fn: s.TestStatus}, + {Name: "TestStatus66", Fn: s.TestStatus66}, + // get block headers + {Name: "TestGetBlockHeaders", Fn: s.TestGetBlockHeaders}, + {Name: "TestGetBlockHeaders66", Fn: s.TestGetBlockHeaders66}, + {Name: "TestSimultaneousRequests66", Fn: s.TestSimultaneousRequests66}, + {Name: "TestSameRequestID66", Fn: s.TestSameRequestID66}, + {Name: "TestZeroRequestID66", Fn: s.TestZeroRequestID66}, + // get block bodies + {Name: "TestGetBlockBodies", Fn: s.TestGetBlockBodies}, + {Name: "TestGetBlockBodies66", Fn: s.TestGetBlockBodies66}, + // broadcast + {Name: "TestBroadcast", Fn: s.TestBroadcast}, + {Name: "TestBroadcast66", Fn: s.TestBroadcast66}, + {Name: "TestLargeAnnounce", Fn: s.TestLargeAnnounce}, + {Name: "TestLargeAnnounce66", Fn: s.TestLargeAnnounce66}, + {Name: "TestOldAnnounce", Fn: s.TestOldAnnounce}, + {Name: "TestOldAnnounce66", Fn: s.TestOldAnnounce66}, + {Name: "TestBlockHashAnnounce", Fn: s.TestBlockHashAnnounce}, + {Name: "TestBlockHashAnnounce66", Fn: s.TestBlockHashAnnounce66}, + // malicious handshakes + status + {Name: "TestMaliciousHandshake", Fn: s.TestMaliciousHandshake}, + {Name: "TestMaliciousStatus", Fn: s.TestMaliciousStatus}, + {Name: "TestMaliciousHandshake66", Fn: s.TestMaliciousHandshake66}, + {Name: "TestMaliciousStatus66", Fn: s.TestMaliciousStatus66}, + // test transactions + {Name: "TestTransaction", Fn: s.TestTransaction}, + {Name: "TestTransaction66", Fn: s.TestTransaction66}, + {Name: "TestMaliciousTx", Fn: s.TestMaliciousTx}, + {Name: "TestMaliciousTx66", Fn: s.TestMaliciousTx66}, + {Name: "TestLargeTxRequest66", Fn: s.TestLargeTxRequest66}, + {Name: "TestNewPooledTxs66", Fn: s.TestNewPooledTxs66}, } } -func (s *Suite) AllTests() []utesting.Test { +func (s *Suite) EthTests() []utesting.Test { return []utesting.Test{ - {Name: "Status", Fn: s.TestStatus}, - {Name: "GetBlockHeaders", Fn: s.TestGetBlockHeaders}, - {Name: "Broadcast", Fn: s.TestBroadcast}, - {Name: "GetBlockBodies", Fn: s.TestGetBlockBodies}, + {Name: "TestStatus", Fn: s.TestStatus}, + {Name: "TestGetBlockHeaders", Fn: s.TestGetBlockHeaders}, + {Name: "TestGetBlockBodies", Fn: s.TestGetBlockBodies}, + {Name: "TestBroadcast", Fn: s.TestBroadcast}, + {Name: "TestLargeAnnounce", Fn: s.TestLargeAnnounce}, + {Name: "TestOldAnnounce", Fn: s.TestOldAnnounce}, + {Name: "TestBlockHashAnnounce", Fn: s.TestBlockHashAnnounce}, + {Name: "TestMaliciousHandshake", Fn: s.TestMaliciousHandshake}, + {Name: "TestMaliciousStatus", Fn: s.TestMaliciousStatus}, + {Name: "TestTransaction", Fn: s.TestTransaction}, + {Name: "TestMaliciousTx", Fn: s.TestMaliciousTx}, } } +func (s *Suite) Eth66Tests() []utesting.Test { + return []utesting.Test{ + // only proceed with eth66 test suite if node supports eth 66 protocol + {Name: "TestStatus66", Fn: s.TestStatus66}, + {Name: "TestGetBlockHeaders66", Fn: s.TestGetBlockHeaders66}, + {Name: "TestSimultaneousRequests66", Fn: s.TestSimultaneousRequests66}, + {Name: "TestSameRequestID66", Fn: s.TestSameRequestID66}, + {Name: "TestZeroRequestID66", Fn: s.TestZeroRequestID66}, + {Name: "TestGetBlockBodies66", Fn: s.TestGetBlockBodies66}, + {Name: "TestBroadcast66", Fn: s.TestBroadcast66}, + {Name: "TestLargeAnnounce66", Fn: s.TestLargeAnnounce66}, + {Name: "TestOldAnnounce66", Fn: s.TestOldAnnounce66}, + {Name: "TestBlockHashAnnounce66", Fn: s.TestBlockHashAnnounce66}, + {Name: "TestMaliciousHandshake66", Fn: s.TestMaliciousHandshake66}, + {Name: "TestMaliciousStatus66", Fn: s.TestMaliciousStatus66}, + {Name: "TestTransaction66", Fn: s.TestTransaction66}, + {Name: "TestMaliciousTx66", Fn: s.TestMaliciousTx66}, + {Name: "TestLargeTxRequest66", Fn: s.TestLargeTxRequest66}, + {Name: "TestNewPooledTxs66", Fn: s.TestNewPooledTxs66}, + } +} + +var ( + eth66 = true // indicates whether suite should negotiate eth66 connection + eth65 = false // indicates whether suite should negotiate eth65 connection or below. +) + // TestStatus attempts to connect to the given node and exchange -// a status message with it, and then check to make sure -// the chain head is correct. +// a status message with it. func (s *Suite) TestStatus(t *utesting.T) { conn, err := s.dial() if err != nil { - t.Fatalf("could not dial: %v", err) + t.Fatalf("dial failed: %v", err) } - // get protoHandshake - conn.handshake(t) - // get status - switch msg := conn.statusExchange(t, s.chain).(type) { - case *Status: - t.Logf("%+v\n", msg) - default: - t.Fatalf("unexpected: %#v", msg) + defer conn.Close() + if err := conn.peer(s.chain, nil); err != nil { + t.Fatalf("peering failed: %v", err) + } +} + +// TestStatus66 attempts to connect to the given node and exchange +// a status message with it on the eth66 protocol. +func (s *Suite) TestStatus66(t *utesting.T) { + conn, err := s.dial66() + if err != nil { + t.Fatalf("dial failed: %v", err) + } + defer conn.Close() + if err := conn.peer(s.chain, nil); err != nil { + t.Fatalf("peering failed: %v", err) } } // TestGetBlockHeaders tests whether the given node can respond to -// a `GetBlockHeaders` request and that the response is accurate. +// a `GetBlockHeaders` request accurately. func (s *Suite) TestGetBlockHeaders(t *utesting.T) { conn, err := s.dial() if err != nil { - t.Fatalf("could not dial: %v", err) + t.Fatalf("dial failed: %v", err) } + defer conn.Close() + if err := conn.peer(s.chain, nil); err != nil { + t.Fatalf("handshake(s) failed: %v", err) + } + // write request + req := &GetBlockHeaders{ + Origin: eth.HashOrNumber{ + Hash: s.chain.blocks[1].Hash(), + }, + Amount: 2, + Skip: 1, + Reverse: false, + } + headers, err := conn.headersRequest(req, s.chain, eth65, 0) + if err != nil { + t.Fatalf("GetBlockHeaders request failed: %v", err) + } + // check for correct headers + expected, err := s.chain.GetHeaders(*req) + if err != nil { + t.Fatalf("failed to get headers for given request: %v", err) + } + if !headersMatch(expected, headers) { + t.Fatalf("header mismatch: \nexpected %v \ngot %v", expected, headers) + } +} - conn.handshake(t) - conn.statusExchange(t, s.chain) - - // get block headers +// TestGetBlockHeaders66 tests whether the given node can respond to +// an eth66 `GetBlockHeaders` request and that the response is accurate. +func (s *Suite) TestGetBlockHeaders66(t *utesting.T) { + conn, err := s.dial66() + if err != nil { + t.Fatalf("dial failed: %v", err) + } + defer conn.Close() + if err = conn.peer(s.chain, nil); err != nil { + t.Fatalf("peering failed: %v", err) + } + // write request req := &GetBlockHeaders{ - Origin: hashOrNumber{ + Origin: eth.HashOrNumber{ Hash: s.chain.blocks[1].Hash(), }, Amount: 2, Skip: 1, Reverse: false, } + headers, err := conn.headersRequest(req, s.chain, eth66, 33) + if err != nil { + t.Fatalf("could not get block headers: %v", err) + } + // check for correct headers + expected, err := s.chain.GetHeaders(*req) + if err != nil { + t.Fatalf("failed to get headers for given request: %v", err) + } + if !headersMatch(expected, headers) { + t.Fatalf("header mismatch: \nexpected %v \ngot %v", expected, headers) + } +} - if err := conn.Write(req); err != nil { - t.Fatalf("could not write to connection: %v", err) +// TestSimultaneousRequests66 sends two simultaneous `GetBlockHeader` requests from +// the same connection with different request IDs and checks to make sure the node +// responds with the correct headers per request. +func (s *Suite) TestSimultaneousRequests66(t *utesting.T) { + // create a connection + conn, err := s.dial66() + if err != nil { + t.Fatalf("dial failed: %v", err) + } + defer conn.Close() + if err := conn.peer(s.chain, nil); err != nil { + t.Fatalf("peering failed: %v", err) + } + // create two requests + req1 := ð.GetBlockHeadersPacket66{ + RequestId: uint64(111), + GetBlockHeadersPacket: ð.GetBlockHeadersPacket{ + Origin: eth.HashOrNumber{ + Hash: s.chain.blocks[1].Hash(), + }, + Amount: 2, + Skip: 1, + Reverse: false, + }, + } + req2 := ð.GetBlockHeadersPacket66{ + RequestId: uint64(222), + GetBlockHeadersPacket: ð.GetBlockHeadersPacket{ + Origin: eth.HashOrNumber{ + Hash: s.chain.blocks[1].Hash(), + }, + Amount: 4, + Skip: 1, + Reverse: false, + }, + } + // write the first request + if err := conn.Write66(req1, GetBlockHeaders{}.Code()); err != nil { + t.Fatalf("failed to write to connection: %v", err) + } + // write the second request + if err := conn.Write66(req2, GetBlockHeaders{}.Code()); err != nil { + t.Fatalf("failed to write to connection: %v", err) + } + // wait for responses + msg := conn.waitForResponse(s.chain, timeout, req1.RequestId) + headers1, ok := msg.(BlockHeaders) + if !ok { + t.Fatalf("unexpected %s", pretty.Sdump(msg)) } + msg = conn.waitForResponse(s.chain, timeout, req2.RequestId) + headers2, ok := msg.(BlockHeaders) + if !ok { + t.Fatalf("unexpected %s", pretty.Sdump(msg)) + } + // check received headers for accuracy + expected1, err := s.chain.GetHeaders(GetBlockHeaders(*req1.GetBlockHeadersPacket)) + if err != nil { + t.Fatalf("failed to get expected headers for request 1: %v", err) + } + expected2, err := s.chain.GetHeaders(GetBlockHeaders(*req2.GetBlockHeadersPacket)) + if err != nil { + t.Fatalf("failed to get expected headers for request 2: %v", err) + } + if !headersMatch(expected1, headers1) { + t.Fatalf("header mismatch: \nexpected %v \ngot %v", expected1, headers1) + } + if !headersMatch(expected2, headers2) { + t.Fatalf("header mismatch: \nexpected %v \ngot %v", expected2, headers2) + } +} - switch msg := conn.ReadAndServe(s.chain).(type) { - case *BlockHeaders: - headers := msg - for _, header := range *headers { - num := header.Number.Uint64() - assert.Equal(t, s.chain.blocks[int(num)].Header(), header) - t.Logf("\nHEADER FOR BLOCK NUMBER %d: %+v\n", header.Number, header) - } - default: - t.Fatalf("unexpected: %#v", msg) +// TestSameRequestID66 sends two requests with the same request ID to a +// single node. +func (s *Suite) TestSameRequestID66(t *utesting.T) { + conn, err := s.dial66() + if err != nil { + t.Fatalf("dial failed: %v", err) + } + defer conn.Close() + if err := conn.peer(s.chain, nil); err != nil { + t.Fatalf("peering failed: %v", err) + } + // create requests + reqID := uint64(1234) + request1 := ð.GetBlockHeadersPacket66{ + RequestId: reqID, + GetBlockHeadersPacket: ð.GetBlockHeadersPacket{ + Origin: eth.HashOrNumber{ + Number: 1, + }, + Amount: 2, + }, + } + request2 := ð.GetBlockHeadersPacket66{ + RequestId: reqID, + GetBlockHeadersPacket: ð.GetBlockHeadersPacket{ + Origin: eth.HashOrNumber{ + Number: 33, + }, + Amount: 2, + }, + } + // write the requests + if err = conn.Write66(request1, GetBlockHeaders{}.Code()); err != nil { + t.Fatalf("failed to write to connection: %v", err) + } + if err = conn.Write66(request2, GetBlockHeaders{}.Code()); err != nil { + t.Fatalf("failed to write to connection: %v", err) + } + // wait for responses + msg := conn.waitForResponse(s.chain, timeout, reqID) + headers1, ok := msg.(BlockHeaders) + if !ok { + t.Fatalf("unexpected %s", pretty.Sdump(msg)) + } + msg = conn.waitForResponse(s.chain, timeout, reqID) + headers2, ok := msg.(BlockHeaders) + if !ok { + t.Fatalf("unexpected %s", pretty.Sdump(msg)) + } + // check if headers match + expected1, err := s.chain.GetHeaders(GetBlockHeaders(*request1.GetBlockHeadersPacket)) + if err != nil { + t.Fatalf("failed to get expected block headers: %v", err) + } + expected2, err := s.chain.GetHeaders(GetBlockHeaders(*request2.GetBlockHeadersPacket)) + if err != nil { + t.Fatalf("failed to get expected block headers: %v", err) + } + if !headersMatch(expected1, headers1) { + t.Fatalf("header mismatch: \nexpected %v \ngot %v", expected1, headers1) + } + if !headersMatch(expected2, headers2) { + t.Fatalf("header mismatch: \nexpected %v \ngot %v", expected2, headers2) + } +} + +// TestZeroRequestID_66 checks that a message with a request ID of zero is still handled +// by the node. +func (s *Suite) TestZeroRequestID66(t *utesting.T) { + conn, err := s.dial66() + if err != nil { + t.Fatalf("dial failed: %v", err) + } + defer conn.Close() + if err := conn.peer(s.chain, nil); err != nil { + t.Fatalf("peering failed: %v", err) + } + req := &GetBlockHeaders{ + Origin: eth.HashOrNumber{ + Number: 0, + }, + Amount: 2, + } + headers, err := conn.headersRequest(req, s.chain, eth66, 0) + if err != nil { + t.Fatalf("failed to get block headers: %v", err) + } + expected, err := s.chain.GetHeaders(*req) + if err != nil { + t.Fatalf("failed to get expected block headers: %v", err) + } + if !headersMatch(expected, headers) { + t.Fatalf("header mismatch: \nexpected %v \ngot %v", expected, headers) } } @@ -122,95 +397,387 @@ func (s *Suite) TestGetBlockHeaders(t *utesting.T) { func (s *Suite) TestGetBlockBodies(t *utesting.T) { conn, err := s.dial() if err != nil { - t.Fatalf("could not dial: %v", err) + t.Fatalf("dial failed: %v", err) + } + defer conn.Close() + if err := conn.peer(s.chain, nil); err != nil { + t.Fatalf("peering failed: %v", err) } - - conn.handshake(t) - conn.statusExchange(t, s.chain) // create block bodies request - req := &GetBlockBodies{s.chain.blocks[54].Hash(), s.chain.blocks[75].Hash()} + req := &GetBlockBodies{ + s.chain.blocks[54].Hash(), + s.chain.blocks[75].Hash(), + } if err := conn.Write(req); err != nil { t.Fatalf("could not write to connection: %v", err) } - - switch msg := conn.ReadAndServe(s.chain).(type) { + // wait for response + switch msg := conn.readAndServe(s.chain, timeout).(type) { case *BlockBodies: - bodies := msg - for _, body := range *bodies { - t.Logf("\nBODY: %+v\n", body) + t.Logf("received %d block bodies", len(*msg)) + if len(*msg) != len(*req) { + t.Fatalf("wrong bodies in response: expected %d bodies, "+ + "got %d", len(*req), len(*msg)) } default: - t.Fatalf("unexpected: %#v", msg) + t.Fatalf("unexpected: %s", pretty.Sdump(msg)) + } +} + +// TestGetBlockBodies66 tests whether the given node can respond to +// a `GetBlockBodies` request and that the response is accurate over +// the eth66 protocol. +func (s *Suite) TestGetBlockBodies66(t *utesting.T) { + conn, err := s.dial66() + if err != nil { + t.Fatalf("dial failed: %v", err) + } + defer conn.Close() + if err := conn.peer(s.chain, nil); err != nil { + t.Fatalf("peering failed: %v", err) + } + // create block bodies request + req := ð.GetBlockBodiesPacket66{ + RequestId: uint64(55), + GetBlockBodiesPacket: eth.GetBlockBodiesPacket{ + s.chain.blocks[54].Hash(), + s.chain.blocks[75].Hash(), + }, + } + if err := conn.Write66(req, GetBlockBodies{}.Code()); err != nil { + t.Fatalf("could not write to connection: %v", err) + } + // wait for block bodies response + msg := conn.waitForResponse(s.chain, timeout, req.RequestId) + blockBodies, ok := msg.(BlockBodies) + if !ok { + t.Fatalf("unexpected: %s", pretty.Sdump(msg)) + } + t.Logf("received %d block bodies", len(blockBodies)) + if len(blockBodies) != len(req.GetBlockBodiesPacket) { + t.Fatalf("wrong bodies in response: expected %d bodies, "+ + "got %d", len(req.GetBlockBodiesPacket), len(blockBodies)) } } // TestBroadcast tests whether a block announcement is correctly // propagated to the given node's peer(s). func (s *Suite) TestBroadcast(t *utesting.T) { - // create conn to send block announcement - sendConn, err := s.dial() - if err != nil { - t.Fatalf("could not dial: %v", err) + if err := s.sendNextBlock(eth65); err != nil { + t.Fatalf("block broadcast failed: %v", err) + } +} + +// TestBroadcast66 tests whether a block announcement is correctly +// propagated to the given node's peer(s) on the eth66 protocol. +func (s *Suite) TestBroadcast66(t *utesting.T) { + if err := s.sendNextBlock(eth66); err != nil { + t.Fatalf("block broadcast failed: %v", err) + } +} + +// TestLargeAnnounce tests the announcement mechanism with a large block. +func (s *Suite) TestLargeAnnounce(t *utesting.T) { + nextBlock := len(s.chain.blocks) + blocks := []*NewBlock{ + { + Block: largeBlock(), + TD: s.fullChain.TotalDifficultyAt(nextBlock), + }, + { + Block: s.fullChain.blocks[nextBlock], + TD: largeNumber(2), + }, + { + Block: largeBlock(), + TD: largeNumber(2), + }, + } + + for i, blockAnnouncement := range blocks { + t.Logf("Testing malicious announcement: %v\n", i) + conn, err := s.dial() + if err != nil { + t.Fatalf("dial failed: %v", err) + } + if err = conn.peer(s.chain, nil); err != nil { + t.Fatalf("peering failed: %v", err) + } + if err = conn.Write(blockAnnouncement); err != nil { + t.Fatalf("could not write to connection: %v", err) + } + // Invalid announcement, check that peer disconnected + switch msg := conn.readAndServe(s.chain, time.Second*8).(type) { + case *Disconnect: + case *Error: + break + default: + t.Fatalf("unexpected: %s wanted disconnect", pretty.Sdump(msg)) + } + conn.Close() + } + // Test the last block as a valid block + if err := s.sendNextBlock(eth65); err != nil { + t.Fatalf("failed to broadcast next block: %v", err) + } +} + +// TestLargeAnnounce66 tests the announcement mechanism with a large +// block over the eth66 protocol. +func (s *Suite) TestLargeAnnounce66(t *utesting.T) { + nextBlock := len(s.chain.blocks) + blocks := []*NewBlock{ + { + Block: largeBlock(), + TD: s.fullChain.TotalDifficultyAt(nextBlock), + }, + { + Block: s.fullChain.blocks[nextBlock], + TD: largeNumber(2), + }, + { + Block: largeBlock(), + TD: largeNumber(2), + }, + } + + for i, blockAnnouncement := range blocks[0:3] { + t.Logf("Testing malicious announcement: %v\n", i) + conn, err := s.dial66() + if err != nil { + t.Fatalf("dial failed: %v", err) + } + if err := conn.peer(s.chain, nil); err != nil { + t.Fatalf("peering failed: %v", err) + } + if err := conn.Write(blockAnnouncement); err != nil { + t.Fatalf("could not write to connection: %v", err) + } + // Invalid announcement, check that peer disconnected + switch msg := conn.readAndServe(s.chain, time.Second*8).(type) { + case *Disconnect: + case *Error: + break + default: + t.Fatalf("unexpected: %s wanted disconnect", pretty.Sdump(msg)) + } + conn.Close() + } + // Test the last block as a valid block + if err := s.sendNextBlock(eth66); err != nil { + t.Fatalf("failed to broadcast next block: %v", err) + } +} + +// TestOldAnnounce tests the announcement mechanism with an old block. +func (s *Suite) TestOldAnnounce(t *utesting.T) { + if err := s.oldAnnounce(eth65); err != nil { + t.Fatal(err) + } +} + +// TestOldAnnounce66 tests the announcement mechanism with an old block, +// over the eth66 protocol. +func (s *Suite) TestOldAnnounce66(t *utesting.T) { + if err := s.oldAnnounce(eth66); err != nil { + t.Fatal(err) + } +} + +// TestBlockHashAnnounce sends a new block hash announcement and expects +// the node to perform a `GetBlockHeaders` request. +func (s *Suite) TestBlockHashAnnounce(t *utesting.T) { + if err := s.hashAnnounce(eth65); err != nil { + t.Fatalf("block hash announcement failed: %v", err) + } +} + +// TestBlockHashAnnounce66 sends a new block hash announcement and expects +// the node to perform a `GetBlockHeaders` request. +func (s *Suite) TestBlockHashAnnounce66(t *utesting.T) { + if err := s.hashAnnounce(eth66); err != nil { + t.Fatalf("block hash announcement failed: %v", err) + } +} + +// TestMaliciousHandshake tries to send malicious data during the handshake. +func (s *Suite) TestMaliciousHandshake(t *utesting.T) { + if err := s.maliciousHandshakes(t, eth65); err != nil { + t.Fatal(err) } - // create conn to receive block announcement - receiveConn, err := s.dial() +} + +// TestMaliciousHandshake66 tries to send malicious data during the handshake. +func (s *Suite) TestMaliciousHandshake66(t *utesting.T) { + if err := s.maliciousHandshakes(t, eth66); err != nil { + t.Fatal(err) + } +} + +// TestMaliciousStatus sends a status package with a large total difficulty. +func (s *Suite) TestMaliciousStatus(t *utesting.T) { + conn, err := s.dial() if err != nil { - t.Fatalf("could not dial: %v", err) + t.Fatalf("dial failed: %v", err) } + defer conn.Close() - sendConn.handshake(t) - receiveConn.handshake(t) + if err := s.maliciousStatus(conn); err != nil { + t.Fatal(err) + } +} - sendConn.statusExchange(t, s.chain) - receiveConn.statusExchange(t, s.chain) +// TestMaliciousStatus66 sends a status package with a large total +// difficulty over the eth66 protocol. +func (s *Suite) TestMaliciousStatus66(t *utesting.T) { + conn, err := s.dial66() + if err != nil { + t.Fatalf("dial failed: %v", err) + } + defer conn.Close() - // sendConn sends the block announcement - blockAnnouncement := &NewBlock{ - Block: s.fullChain.blocks[1000], - TD: s.fullChain.TD(1001), + if err := s.maliciousStatus(conn); err != nil { + t.Fatal(err) } - if err := sendConn.Write(blockAnnouncement); err != nil { - t.Fatalf("could not write to connection: %v", err) +} + +// TestTransaction sends a valid transaction to the node and +// checks if the transaction gets propagated. +func (s *Suite) TestTransaction(t *utesting.T) { + if err := s.sendSuccessfulTxs(t, eth65); err != nil { + t.Fatal(err) } +} - switch msg := receiveConn.ReadAndServe(s.chain).(type) { - case *NewBlock: - assert.Equal(t, blockAnnouncement.Block.Header(), msg.Block.Header(), - "wrong block header in announcement") - assert.Equal(t, blockAnnouncement.TD, msg.TD, - "wrong TD in announcement") - case *NewBlockHashes: - hashes := *msg - assert.Equal(t, blockAnnouncement.Block.Hash(), hashes[0].Hash, - "wrong block hash in announcement") - default: - t.Fatalf("unexpected: %#v", msg) +// TestTransaction66 sends a valid transaction to the node and +// checks if the transaction gets propagated. +func (s *Suite) TestTransaction66(t *utesting.T) { + if err := s.sendSuccessfulTxs(t, eth66); err != nil { + t.Fatal(err) } - // update test suite chain - s.chain.blocks = append(s.chain.blocks, s.fullChain.blocks[1000]) - // wait for client to update its chain - if err := receiveConn.waitForBlock(s.chain.Head()); err != nil { +} + +// TestMaliciousTx sends several invalid transactions and tests whether +// the node will propagate them. +func (s *Suite) TestMaliciousTx(t *utesting.T) { + if err := s.sendMaliciousTxs(t, eth65); err != nil { t.Fatal(err) } } -// dial attempts to dial the given node and perform a handshake, -// returning the created Conn if successful. -func (s *Suite) dial() (*Conn, error) { - var conn Conn +// TestMaliciousTx66 sends several invalid transactions and tests whether +// the node will propagate them. +func (s *Suite) TestMaliciousTx66(t *utesting.T) { + if err := s.sendMaliciousTxs(t, eth66); err != nil { + t.Fatal(err) + } +} - fd, err := net.Dial("tcp", fmt.Sprintf("%v:%d", s.Dest.IP(), s.Dest.TCP())) +// TestLargeTxRequest66 tests whether a node can fulfill a large GetPooledTransactions +// request. +func (s *Suite) TestLargeTxRequest66(t *utesting.T) { + // send the next block to ensure the node is no longer syncing and + // is able to accept txs + if err := s.sendNextBlock(eth66); err != nil { + t.Fatalf("failed to send next block: %v", err) + } + // send 2000 transactions to the node + hashMap, txs, err := generateTxs(s, 2000) if err != nil { - return nil, err + t.Fatalf("failed to generate transactions: %v", err) + } + if err = sendMultipleSuccessfulTxs(t, s, txs); err != nil { + t.Fatalf("failed to send multiple txs: %v", err) + } + // set up connection to receive to ensure node is peered with the receiving connection + // before tx request is sent + conn, err := s.dial66() + if err != nil { + t.Fatalf("dial failed: %v", err) + } + defer conn.Close() + if err = conn.peer(s.chain, nil); err != nil { + t.Fatalf("peering failed: %v", err) + } + // create and send pooled tx request + hashes := make([]common.Hash, 0) + for _, hash := range hashMap { + hashes = append(hashes, hash) } - conn.Conn = rlpx.NewConn(fd, s.Dest.Pubkey()) + getTxReq := ð.GetPooledTransactionsPacket66{ + RequestId: 1234, + GetPooledTransactionsPacket: hashes, + } + if err = conn.Write66(getTxReq, GetPooledTransactions{}.Code()); err != nil { + t.Fatalf("could not write to conn: %v", err) + } + // check that all received transactions match those that were sent to node + switch msg := conn.waitForResponse(s.chain, timeout, getTxReq.RequestId).(type) { + case PooledTransactions: + for _, gotTx := range msg { + if _, exists := hashMap[gotTx.Hash()]; !exists { + t.Fatalf("unexpected tx received: %v", gotTx.Hash()) + } + } + default: + t.Fatalf("unexpected %s", pretty.Sdump(msg)) + } +} - // do encHandshake - conn.ourKey, _ = crypto.GenerateKey() - _, err = conn.Handshake(conn.ourKey) +// TestNewPooledTxs_66 tests whether a node will do a GetPooledTransactions +// request upon receiving a NewPooledTransactionHashes announcement. +func (s *Suite) TestNewPooledTxs66(t *utesting.T) { + // send the next block to ensure the node is no longer syncing and + // is able to accept txs + if err := s.sendNextBlock(eth66); err != nil { + t.Fatalf("failed to send next block: %v", err) + } + + // generate 50 txs + hashMap, _, err := generateTxs(s, 50) if err != nil { - return nil, err + t.Fatalf("failed to generate transactions: %v", err) + } + + // create new pooled tx hashes announcement + hashes := make([]common.Hash, 0) + for _, hash := range hashMap { + hashes = append(hashes, hash) + } + announce := NewPooledTransactionHashes(hashes) + + // send announcement + conn, err := s.dial66() + if err != nil { + t.Fatalf("dial failed: %v", err) + } + defer conn.Close() + if err = conn.peer(s.chain, nil); err != nil { + t.Fatalf("peering failed: %v", err) + } + if err = conn.Write(announce); err != nil { + t.Fatalf("failed to write to connection: %v", err) } - return &conn, nil + // wait for GetPooledTxs request + for { + _, msg := conn.readAndServe66(s.chain, timeout) + switch msg := msg.(type) { + case GetPooledTransactions: + if len(msg) != len(hashes) { + t.Fatalf("unexpected number of txs requested: wanted %d, got %d", len(hashes), len(msg)) + } + return + // ignore propagated txs from previous tests + case *NewPooledTransactionHashes: + continue + // ignore block announcements from previous tests + case *NewBlockHashes: + continue + case *NewBlock: + continue + default: + t.Fatalf("unexpected %s", pretty.Sdump(msg)) + } + } } diff --git a/cmd/devp2p/internal/ethtest/suite_test.go b/cmd/devp2p/internal/ethtest/suite_test.go new file mode 100644 index 000000000..6e3217151 --- /dev/null +++ b/cmd/devp2p/internal/ethtest/suite_test.go @@ -0,0 +1,107 @@ +// Copyright 2020 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . + +package ethtest + +import ( + "os" + "testing" + "time" + + "github.com/ethereum/go-ethereum/eth" + "github.com/ethereum/go-ethereum/eth/ethconfig" + "github.com/ethereum/go-ethereum/internal/utesting" + "github.com/ethereum/go-ethereum/node" + "github.com/ethereum/go-ethereum/p2p" +) + +var ( + genesisFile = "./testdata/genesis.json" + halfchainFile = "./testdata/halfchain.rlp" + fullchainFile = "./testdata/chain.rlp" +) + +func TestEthSuite(t *testing.T) { + geth, err := runGeth() + if err != nil { + t.Fatalf("could not run geth: %v", err) + } + defer geth.Close() + + suite, err := NewSuite(geth.Server().Self(), fullchainFile, genesisFile) + if err != nil { + t.Fatalf("could not create new test suite: %v", err) + } + for _, test := range suite.AllEthTests() { + t.Run(test.Name, func(t *testing.T) { + result := utesting.RunTAP([]utesting.Test{{Name: test.Name, Fn: test.Fn}}, os.Stdout) + if result[0].Failed { + t.Fatal() + } + }) + } +} + +// runGeth creates and starts a geth node +func runGeth() (*node.Node, error) { + stack, err := node.New(&node.Config{ + P2P: p2p.Config{ + ListenAddr: "127.0.0.1:0", + NoDiscovery: true, + MaxPeers: 10, // in case a test requires multiple connections, can be changed in the future + NoDial: true, + }, + }) + if err != nil { + return nil, err + } + + err = setupGeth(stack) + if err != nil { + stack.Close() + return nil, err + } + if err = stack.Start(); err != nil { + stack.Close() + return nil, err + } + return stack, nil +} + +func setupGeth(stack *node.Node) error { + chain, err := loadChain(halfchainFile, genesisFile) + if err != nil { + return err + } + + backend, err := eth.New(stack, ðconfig.Config{ + Genesis: &chain.genesis, + NetworkId: chain.genesis.Config.ChainID.Uint64(), // 19763 + DatabaseCache: 10, + TrieCleanCache: 10, + TrieCleanCacheJournal: "", + TrieCleanCacheRejournal: 60 * time.Minute, + TrieDirtyCache: 16, + TrieTimeout: 60 * time.Minute, + SnapshotCache: 10, + }) + if err != nil { + return err + } + + _, err = backend.BlockChain().InsertChain(chain.blocks[1:]) + return err +} diff --git a/cmd/devp2p/internal/ethtest/testdata/chain.rlp b/cmd/devp2p/internal/ethtest/testdata/chain.rlp new file mode 100644 index 000000000..5ebc2f3bb Binary files /dev/null and b/cmd/devp2p/internal/ethtest/testdata/chain.rlp differ diff --git a/cmd/devp2p/internal/ethtest/testdata/chain.rlp.gz b/cmd/devp2p/internal/ethtest/testdata/chain.rlp.gz deleted file mode 100755 index bdd6290ce..000000000 Binary files a/cmd/devp2p/internal/ethtest/testdata/chain.rlp.gz and /dev/null differ diff --git a/cmd/devp2p/internal/ethtest/testdata/genesis.json b/cmd/devp2p/internal/ethtest/testdata/genesis.json index ea5e2725b..d8b5d2250 100644 --- a/cmd/devp2p/internal/ethtest/testdata/genesis.json +++ b/cmd/devp2p/internal/ethtest/testdata/genesis.json @@ -1,6 +1,6 @@ { "config": { - "chainId": 1, + "chainId": 19763, "homesteadBlock": 0, "eip150Block": 0, "eip155Block": 0, @@ -11,16 +11,16 @@ "nonce": "0xdeadbeefdeadbeef", "timestamp": "0x0", "extraData": "0x0000000000000000000000000000000000000000000000000000000000000000", - "gasLimit": "0x8000000", - "difficulty": "0x10", + "gasLimit": "0x80000000", + "difficulty": "0x20000", "mixHash": "0x0000000000000000000000000000000000000000000000000000000000000000", "coinbase": "0x0000000000000000000000000000000000000000", "alloc": { "71562b71999873db5b286df957af199ec94617f7": { - "balance": "0xf4240" + "balance": "0xffffffffffffffffffffffffff" } }, "number": "0x0", "gasUsed": "0x0", "parentHash": "0x0000000000000000000000000000000000000000000000000000000000000000" -} \ No newline at end of file +} diff --git a/cmd/devp2p/internal/ethtest/testdata/halfchain.rlp b/cmd/devp2p/internal/ethtest/testdata/halfchain.rlp new file mode 100644 index 000000000..1a820734e Binary files /dev/null and b/cmd/devp2p/internal/ethtest/testdata/halfchain.rlp differ diff --git a/cmd/devp2p/internal/ethtest/transaction.go b/cmd/devp2p/internal/ethtest/transaction.go new file mode 100644 index 000000000..d2dbe0a7d --- /dev/null +++ b/cmd/devp2p/internal/ethtest/transaction.go @@ -0,0 +1,419 @@ +// Copyright 2020 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . + +package ethtest + +import ( + "fmt" + "math/big" + "strings" + "time" + + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/crypto" + "github.com/ethereum/go-ethereum/internal/utesting" + "github.com/ethereum/go-ethereum/params" +) + +//var faucetAddr = common.HexToAddress("0x71562b71999873DB5b286dF957af199Ec94617F7") +var faucetKey, _ = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291") + +func (s *Suite) sendSuccessfulTxs(t *utesting.T, isEth66 bool) error { + tests := []*types.Transaction{ + getNextTxFromChain(s), + unknownTx(s), + } + for i, tx := range tests { + if tx == nil { + return fmt.Errorf("could not find tx to send") + } + t.Logf("Testing tx propagation %d: sending tx %v %v %v\n", i, tx.Hash().String(), tx.GasPrice(), tx.Gas()) + // get previous tx if exists for reference in case of old tx propagation + var prevTx *types.Transaction + if i != 0 { + prevTx = tests[i-1] + } + // write tx to connection + if err := sendSuccessfulTx(s, tx, prevTx, isEth66); err != nil { + return fmt.Errorf("send successful tx test failed: %v", err) + } + } + return nil +} + +func sendSuccessfulTx(s *Suite, tx *types.Transaction, prevTx *types.Transaction, isEth66 bool) error { + sendConn, recvConn, err := s.createSendAndRecvConns(isEth66) + if err != nil { + return err + } + defer sendConn.Close() + defer recvConn.Close() + if err = sendConn.peer(s.chain, nil); err != nil { + return fmt.Errorf("peering failed: %v", err) + } + // Send the transaction + if err = sendConn.Write(&Transactions{tx}); err != nil { + return fmt.Errorf("failed to write to connection: %v", err) + } + // peer receiving connection to node + if err = recvConn.peer(s.chain, nil); err != nil { + return fmt.Errorf("peering failed: %v", err) + } + // update last nonce seen + nonce = tx.Nonce() + // Wait for the transaction announcement + for { + switch msg := recvConn.readAndServe(s.chain, timeout).(type) { + case *Transactions: + recTxs := *msg + // if you receive an old tx propagation, read from connection again + if len(recTxs) == 1 && prevTx != nil { + if recTxs[0] == prevTx { + continue + } + } + for _, gotTx := range recTxs { + if gotTx.Hash() == tx.Hash() { + // Ok + return nil + } + } + return fmt.Errorf("missing transaction: got %v missing %v", recTxs, tx.Hash()) + case *NewPooledTransactionHashes: + txHashes := *msg + // if you receive an old tx propagation, read from connection again + if len(txHashes) == 1 && prevTx != nil { + if txHashes[0] == prevTx.Hash() { + continue + } + } + for _, gotHash := range txHashes { + if gotHash == tx.Hash() { + // Ok + return nil + } + } + return fmt.Errorf("missing transaction announcement: got %v missing %v", txHashes, tx.Hash()) + default: + return fmt.Errorf("unexpected message in sendSuccessfulTx: %s", pretty.Sdump(msg)) + } + } +} + +func (s *Suite) sendMaliciousTxs(t *utesting.T, isEth66 bool) error { + badTxs := []*types.Transaction{ + getOldTxFromChain(s), + invalidNonceTx(s), + hugeAmount(s), + hugeGasPrice(s), + hugeData(s), + } + // setup receiving connection before sending malicious txs + var ( + recvConn *Conn + err error + ) + if isEth66 { + recvConn, err = s.dial66() + } else { + recvConn, err = s.dial() + } + if err != nil { + return fmt.Errorf("dial failed: %v", err) + } + defer recvConn.Close() + if err = recvConn.peer(s.chain, nil); err != nil { + return fmt.Errorf("peering failed: %v", err) + } + for i, tx := range badTxs { + t.Logf("Testing malicious tx propagation: %v\n", i) + if err = sendMaliciousTx(s, tx, isEth66); err != nil { + return fmt.Errorf("malicious tx test failed:\ntx: %v\nerror: %v", tx, err) + } + } + // check to make sure bad txs aren't propagated + return checkMaliciousTxPropagation(s, badTxs, recvConn) +} + +func sendMaliciousTx(s *Suite, tx *types.Transaction, isEth66 bool) error { + // setup connection + var ( + conn *Conn + err error + ) + if isEth66 { + conn, err = s.dial66() + } else { + conn, err = s.dial() + } + if err != nil { + return fmt.Errorf("dial failed: %v", err) + } + defer conn.Close() + if err = conn.peer(s.chain, nil); err != nil { + return fmt.Errorf("peering failed: %v", err) + } + // write malicious tx + if err = conn.Write(&Transactions{tx}); err != nil { + return fmt.Errorf("failed to write to connection: %v", err) + } + return nil +} + +var nonce = uint64(99) + +// sendMultipleSuccessfulTxs sends the given transactions to the node and +// expects the node to accept and propagate them. +func sendMultipleSuccessfulTxs(t *utesting.T, s *Suite, txs []*types.Transaction) error { + txMsg := Transactions(txs) + t.Logf("sending %d txs\n", len(txs)) + + sendConn, recvConn, err := s.createSendAndRecvConns(true) + if err != nil { + return err + } + defer sendConn.Close() + defer recvConn.Close() + if err = sendConn.peer(s.chain, nil); err != nil { + return fmt.Errorf("peering failed: %v", err) + } + if err = recvConn.peer(s.chain, nil); err != nil { + return fmt.Errorf("peering failed: %v", err) + } + // Send the transactions + if err = sendConn.Write(&txMsg); err != nil { + return fmt.Errorf("failed to write message to connection: %v", err) + } + // update nonce + nonce = txs[len(txs)-1].Nonce() + // Wait for the transaction announcement(s) and make sure all sent txs are being propagated + recvHashes := make([]common.Hash, 0) + // all txs should be announced within 3 announcements + for i := 0; i < 3; i++ { + switch msg := recvConn.readAndServe(s.chain, timeout).(type) { + case *Transactions: + for _, tx := range *msg { + recvHashes = append(recvHashes, tx.Hash()) + } + case *NewPooledTransactionHashes: + recvHashes = append(recvHashes, *msg...) + default: + if !strings.Contains(pretty.Sdump(msg), "i/o timeout") { + return fmt.Errorf("unexpected message while waiting to receive txs: %s", pretty.Sdump(msg)) + } + } + // break once all 2000 txs have been received + if len(recvHashes) == 2000 { + break + } + if len(recvHashes) > 0 { + _, missingTxs := compareReceivedTxs(recvHashes, txs) + if len(missingTxs) > 0 { + continue + } else { + t.Logf("successfully received all %d txs", len(txs)) + return nil + } + } + } + _, missingTxs := compareReceivedTxs(recvHashes, txs) + if len(missingTxs) > 0 { + for _, missing := range missingTxs { + t.Logf("missing tx: %v", missing.Hash()) + } + return fmt.Errorf("missing %d txs", len(missingTxs)) + } + return nil +} + +// checkMaliciousTxPropagation checks whether the given malicious transactions were +// propagated by the node. +func checkMaliciousTxPropagation(s *Suite, txs []*types.Transaction, conn *Conn) error { + switch msg := conn.readAndServe(s.chain, time.Second*8).(type) { + case *Transactions: + // check to see if any of the failing txs were in the announcement + recvTxs := make([]common.Hash, len(*msg)) + for i, recvTx := range *msg { + recvTxs[i] = recvTx.Hash() + } + badTxs, _ := compareReceivedTxs(recvTxs, txs) + if len(badTxs) > 0 { + return fmt.Errorf("received %d bad txs: \n%v", len(badTxs), badTxs) + } + case *NewPooledTransactionHashes: + badTxs, _ := compareReceivedTxs(*msg, txs) + if len(badTxs) > 0 { + return fmt.Errorf("received %d bad txs: \n%v", len(badTxs), badTxs) + } + case *Error: + // Transaction should not be announced -> wait for timeout + return nil + default: + return fmt.Errorf("unexpected message in sendFailingTx: %s", pretty.Sdump(msg)) + } + return nil +} + +// compareReceivedTxs compares the received set of txs against the given set of txs, +// returning both the set received txs that were present within the given txs, and +// the set of txs that were missing from the set of received txs +func compareReceivedTxs(recvTxs []common.Hash, txs []*types.Transaction) (present []*types.Transaction, missing []*types.Transaction) { + // create a map of the hashes received from node + recvHashes := make(map[common.Hash]common.Hash) + for _, hash := range recvTxs { + recvHashes[hash] = hash + } + + // collect present txs and missing txs separately + present = make([]*types.Transaction, 0) + missing = make([]*types.Transaction, 0) + for _, tx := range txs { + if _, exists := recvHashes[tx.Hash()]; exists { + present = append(present, tx) + } else { + missing = append(missing, tx) + } + } + return present, missing +} + +func unknownTx(s *Suite) *types.Transaction { + tx := getNextTxFromChain(s) + if tx == nil { + return nil + } + var to common.Address + if tx.To() != nil { + to = *tx.To() + } + txNew := types.NewTransaction(tx.Nonce()+1, to, tx.Value(), tx.Gas(), tx.GasPrice(), tx.Data()) + return signWithFaucet(s.chain.chainConfig, txNew) +} + +func getNextTxFromChain(s *Suite) *types.Transaction { + // Get a new transaction + for _, blocks := range s.fullChain.blocks[s.chain.Len():] { + txs := blocks.Transactions() + if txs.Len() != 0 { + return txs[0] + } + } + return nil +} + +func generateTxs(s *Suite, numTxs int) (map[common.Hash]common.Hash, []*types.Transaction, error) { + txHashMap := make(map[common.Hash]common.Hash, numTxs) + txs := make([]*types.Transaction, numTxs) + + nextTx := getNextTxFromChain(s) + if nextTx == nil { + return nil, nil, fmt.Errorf("failed to get the next transaction") + } + gas := nextTx.Gas() + + nonce = nonce + 1 + // generate txs + for i := 0; i < numTxs; i++ { + tx := generateTx(s.chain.chainConfig, nonce, gas) + if tx == nil { + return nil, nil, fmt.Errorf("failed to get the next transaction") + } + txHashMap[tx.Hash()] = tx.Hash() + txs[i] = tx + nonce = nonce + 1 + } + return txHashMap, txs, nil +} + +func generateTx(chainConfig *params.ChainConfig, nonce uint64, gas uint64) *types.Transaction { + var to common.Address + tx := types.NewTransaction(nonce, to, big.NewInt(1), gas, big.NewInt(1), []byte{}) + return signWithFaucet(chainConfig, tx) +} + +func getOldTxFromChain(s *Suite) *types.Transaction { + for _, blocks := range s.fullChain.blocks[:s.chain.Len()-1] { + txs := blocks.Transactions() + if txs.Len() != 0 { + return txs[0] + } + } + return nil +} + +func invalidNonceTx(s *Suite) *types.Transaction { + tx := getNextTxFromChain(s) + if tx == nil { + return nil + } + var to common.Address + if tx.To() != nil { + to = *tx.To() + } + txNew := types.NewTransaction(tx.Nonce()-2, to, tx.Value(), tx.Gas(), tx.GasPrice(), tx.Data()) + return signWithFaucet(s.chain.chainConfig, txNew) +} + +func hugeAmount(s *Suite) *types.Transaction { + tx := getNextTxFromChain(s) + if tx == nil { + return nil + } + amount := largeNumber(2) + var to common.Address + if tx.To() != nil { + to = *tx.To() + } + txNew := types.NewTransaction(tx.Nonce(), to, amount, tx.Gas(), tx.GasPrice(), tx.Data()) + return signWithFaucet(s.chain.chainConfig, txNew) +} + +func hugeGasPrice(s *Suite) *types.Transaction { + tx := getNextTxFromChain(s) + if tx == nil { + return nil + } + gasPrice := largeNumber(2) + var to common.Address + if tx.To() != nil { + to = *tx.To() + } + txNew := types.NewTransaction(tx.Nonce(), to, tx.Value(), tx.Gas(), gasPrice, tx.Data()) + return signWithFaucet(s.chain.chainConfig, txNew) +} + +func hugeData(s *Suite) *types.Transaction { + tx := getNextTxFromChain(s) + if tx == nil { + return nil + } + var to common.Address + if tx.To() != nil { + to = *tx.To() + } + txNew := types.NewTransaction(tx.Nonce(), to, tx.Value(), tx.Gas(), tx.GasPrice(), largeBuffer(2)) + return signWithFaucet(s.chain.chainConfig, txNew) +} + +func signWithFaucet(chainConfig *params.ChainConfig, tx *types.Transaction) *types.Transaction { + signer := types.LatestSigner(chainConfig) + signedTx, err := types.SignTx(tx, signer, faucetKey) + if err != nil { + return nil + } + return signedTx +} diff --git a/cmd/devp2p/internal/ethtest/types.go b/cmd/devp2p/internal/ethtest/types.go index b6298e808..e49ea284e 100644 --- a/cmd/devp2p/internal/ethtest/types.go +++ b/cmd/devp2p/internal/ethtest/types.go @@ -19,16 +19,8 @@ package ethtest import ( "crypto/ecdsa" "fmt" - "io" - "math/big" - "reflect" - "time" - - "github.com/ethereum/go-ethereum/common" - "github.com/ethereum/go-ethereum/core/forkid" - "github.com/ethereum/go-ethereum/core/types" - "github.com/ethereum/go-ethereum/crypto" - "github.com/ethereum/go-ethereum/internal/utesting" + + "github.com/ethereum/go-ethereum/eth/protocols/eth" "github.com/ethereum/go-ethereum/p2p" "github.com/ethereum/go-ethereum/p2p/rlpx" "github.com/ethereum/go-ethereum/rlp" @@ -42,10 +34,14 @@ type Error struct { err error } -func (e *Error) Unwrap() error { return e.err } -func (e *Error) Error() string { return e.err.Error() } -func (e *Error) Code() int { return -1 } -func (e *Error) GoString() string { return e.Error() } +func (e *Error) Unwrap() error { return e.err } +func (e *Error) Error() string { return e.err.Error() } +func (e *Error) Code() int { return -1 } +func (e *Error) String() string { return e.Error() } + +func errorf(format string, args ...interface{}) *Error { + return &Error{fmt.Errorf(format, args...)} +} // Hello is the RLP structure of the protocol handshake. type Hello struct { @@ -77,104 +73,70 @@ type Pong struct{} func (p Pong) Code() int { return 0x03 } // Status is the network packet for the status message for eth/64 and later. -type Status struct { - ProtocolVersion uint32 - NetworkID uint64 - TD *big.Int - Head common.Hash - Genesis common.Hash - ForkID forkid.ID -} +type Status eth.StatusPacket func (s Status) Code() int { return 16 } // NewBlockHashes is the network packet for the block announcements. -type NewBlockHashes []struct { - Hash common.Hash // Hash of one particular block being announced - Number uint64 // Number of one particular block being announced -} +type NewBlockHashes eth.NewBlockHashesPacket func (nbh NewBlockHashes) Code() int { return 17 } -// NewBlock is the network packet for the block propagation message. -type NewBlock struct { - Block *types.Block - TD *big.Int -} +type Transactions eth.TransactionsPacket -func (nb NewBlock) Code() int { return 23 } +func (t Transactions) Code() int { return 18 } // GetBlockHeaders represents a block header query. -type GetBlockHeaders struct { - Origin hashOrNumber // Block from which to retrieve headers - Amount uint64 // Maximum number of headers to retrieve - Skip uint64 // Blocks to skip between consecutive headers - Reverse bool // Query direction (false = rising towards latest, true = falling towards genesis) -} +type GetBlockHeaders eth.GetBlockHeadersPacket func (g GetBlockHeaders) Code() int { return 19 } -type BlockHeaders []*types.Header +type BlockHeaders eth.BlockHeadersPacket func (bh BlockHeaders) Code() int { return 20 } -// HashOrNumber is a combined field for specifying an origin block. -type hashOrNumber struct { - Hash common.Hash // Block hash from which to retrieve headers (excludes Number) - Number uint64 // Block hash from which to retrieve headers (excludes Hash) -} - -// EncodeRLP is a specialized encoder for hashOrNumber to encode only one of the -// two contained union fields. -func (hn *hashOrNumber) EncodeRLP(w io.Writer) error { - if hn.Hash == (common.Hash{}) { - return rlp.Encode(w, hn.Number) - } - if hn.Number != 0 { - return fmt.Errorf("both origin hash (%x) and number (%d) provided", hn.Hash, hn.Number) - } - return rlp.Encode(w, hn.Hash) -} - -// DecodeRLP is a specialized decoder for hashOrNumber to decode the contents -// into either a block hash or a block number. -func (hn *hashOrNumber) DecodeRLP(s *rlp.Stream) error { - _, size, _ := s.Kind() - origin, err := s.Raw() - if err == nil { - switch { - case size == 32: - err = rlp.DecodeBytes(origin, &hn.Hash) - case size <= 8: - err = rlp.DecodeBytes(origin, &hn.Number) - default: - err = fmt.Errorf("invalid input size %d for origin", size) - } - } - return err -} - // GetBlockBodies represents a GetBlockBodies request -type GetBlockBodies []common.Hash +type GetBlockBodies eth.GetBlockBodiesPacket func (gbb GetBlockBodies) Code() int { return 21 } // BlockBodies is the network packet for block content distribution. -type BlockBodies []*types.Body +type BlockBodies eth.BlockBodiesPacket func (bb BlockBodies) Code() int { return 22 } +// NewBlock is the network packet for the block propagation message. +type NewBlock eth.NewBlockPacket + +func (nb NewBlock) Code() int { return 23 } + +// NewPooledTransactionHashes is the network packet for the tx hash propagation message. +type NewPooledTransactionHashes eth.NewPooledTransactionHashesPacket + +func (nb NewPooledTransactionHashes) Code() int { return 24 } + +type GetPooledTransactions eth.GetPooledTransactionsPacket + +func (gpt GetPooledTransactions) Code() int { return 25 } + +type PooledTransactions eth.PooledTransactionsPacket + +func (pt PooledTransactions) Code() int { return 26 } + // Conn represents an individual connection with a peer type Conn struct { *rlpx.Conn - ourKey *ecdsa.PrivateKey - ethProtocolVersion uint + ourKey *ecdsa.PrivateKey + negotiatedProtoVersion uint + ourHighestProtoVersion uint + caps []p2p.Cap } +// Read reads an eth packet from the connection. func (c *Conn) Read() Message { code, rawData, _, err := c.Conn.Read() if err != nil { - return &Error{fmt.Errorf("could not read from connection: %v", err)} + return errorf("could not read from connection: %v", err) } var msg Message @@ -201,166 +163,121 @@ func (c *Conn) Read() Message { msg = new(NewBlock) case (NewBlockHashes{}).Code(): msg = new(NewBlockHashes) + case (Transactions{}).Code(): + msg = new(Transactions) + case (NewPooledTransactionHashes{}).Code(): + msg = new(NewPooledTransactionHashes) + case (GetPooledTransactions{}.Code()): + msg = new(GetPooledTransactions) + case (PooledTransactions{}.Code()): + msg = new(PooledTransactions) default: - return &Error{fmt.Errorf("invalid message code: %d", code)} + return errorf("invalid message code: %d", code) } - + // if message is devp2p, decode here if err := rlp.DecodeBytes(rawData, msg); err != nil { - return &Error{fmt.Errorf("could not rlp decode message: %v", err)} + return errorf("could not rlp decode message: %v", err) } - return msg } -// ReadAndServe serves GetBlockHeaders requests while waiting -// on another message from the node. -func (c *Conn) ReadAndServe(chain *Chain) Message { - for { - switch msg := c.Read().(type) { - case *Ping: - c.Write(&Pong{}) - case *GetBlockHeaders: - req := *msg - headers, err := chain.GetHeaders(req) - if err != nil { - return &Error{fmt.Errorf("could not get headers for inbound header request: %v", err)} - } - - if err := c.Write(headers); err != nil { - return &Error{fmt.Errorf("could not write to connection: %v", err)} - } - default: - return msg - } - } -} - -func (c *Conn) Write(msg Message) error { - payload, err := rlp.EncodeToBytes(msg) +// Read66 reads an eth66 packet from the connection. +func (c *Conn) Read66() (uint64, Message) { + code, rawData, _, err := c.Conn.Read() if err != nil { - return err + return 0, errorf("could not read from connection: %v", err) } - _, err = c.Conn.Write(uint64(msg.Code()), payload) - return err -} - -// handshake checks to make sure a `HELLO` is received. -func (c *Conn) handshake(t *utesting.T) Message { - // write protoHandshake to client - pub0 := crypto.FromECDSAPub(&c.ourKey.PublicKey)[1:] - ourHandshake := &Hello{ - Version: 5, - Caps: []p2p.Cap{ - {Name: "eth", Version: 64}, - {Name: "eth", Version: 65}, - }, - ID: pub0, - } - if err := c.Write(ourHandshake); err != nil { - t.Fatalf("could not write to connection: %v", err) - } - // read protoHandshake from client - switch msg := c.Read().(type) { - case *Hello: - // set snappy if version is at least 5 - if msg.Version >= 5 { - c.SetSnappy(true) + var msg Message + switch int(code) { + case (Hello{}).Code(): + msg = new(Hello) + case (Ping{}).Code(): + msg = new(Ping) + case (Pong{}).Code(): + msg = new(Pong) + case (Disconnect{}).Code(): + msg = new(Disconnect) + case (Status{}).Code(): + msg = new(Status) + case (GetBlockHeaders{}).Code(): + ethMsg := new(eth.GetBlockHeadersPacket66) + if err := rlp.DecodeBytes(rawData, ethMsg); err != nil { + return 0, errorf("could not rlp decode message: %v", err) } - - c.negotiateEthProtocol(msg.Caps) - if c.ethProtocolVersion == 0 { - t.Fatalf("unexpected eth protocol version") + return ethMsg.RequestId, GetBlockHeaders(*ethMsg.GetBlockHeadersPacket) + case (BlockHeaders{}).Code(): + ethMsg := new(eth.BlockHeadersPacket66) + if err := rlp.DecodeBytes(rawData, ethMsg); err != nil { + return 0, errorf("could not rlp decode message: %v", err) + } + return ethMsg.RequestId, BlockHeaders(ethMsg.BlockHeadersPacket) + case (GetBlockBodies{}).Code(): + ethMsg := new(eth.GetBlockBodiesPacket66) + if err := rlp.DecodeBytes(rawData, ethMsg); err != nil { + return 0, errorf("could not rlp decode message: %v", err) + } + return ethMsg.RequestId, GetBlockBodies(ethMsg.GetBlockBodiesPacket) + case (BlockBodies{}).Code(): + ethMsg := new(eth.BlockBodiesPacket66) + if err := rlp.DecodeBytes(rawData, ethMsg); err != nil { + return 0, errorf("could not rlp decode message: %v", err) + } + return ethMsg.RequestId, BlockBodies(ethMsg.BlockBodiesPacket) + case (NewBlock{}).Code(): + msg = new(NewBlock) + case (NewBlockHashes{}).Code(): + msg = new(NewBlockHashes) + case (Transactions{}).Code(): + msg = new(Transactions) + case (NewPooledTransactionHashes{}).Code(): + msg = new(NewPooledTransactionHashes) + case (GetPooledTransactions{}.Code()): + ethMsg := new(eth.GetPooledTransactionsPacket66) + if err := rlp.DecodeBytes(rawData, ethMsg); err != nil { + return 0, errorf("could not rlp decode message: %v", err) } - return msg + return ethMsg.RequestId, GetPooledTransactions(ethMsg.GetPooledTransactionsPacket) + case (PooledTransactions{}.Code()): + ethMsg := new(eth.PooledTransactionsPacket66) + if err := rlp.DecodeBytes(rawData, ethMsg); err != nil { + return 0, errorf("could not rlp decode message: %v", err) + } + return ethMsg.RequestId, PooledTransactions(ethMsg.PooledTransactionsPacket) default: - t.Fatalf("bad handshake: %#v", msg) - return nil + msg = errorf("invalid message code: %d", code) } -} -// negotiateEthProtocol sets the Conn's eth protocol version -// to highest advertised capability from peer -func (c *Conn) negotiateEthProtocol(caps []p2p.Cap) { - var highestEthVersion uint - for _, capability := range caps { - if capability.Name != "eth" { - continue - } - if capability.Version > highestEthVersion && capability.Version <= 65 { - highestEthVersion = capability.Version + if msg != nil { + if err := rlp.DecodeBytes(rawData, msg); err != nil { + return 0, errorf("could not rlp decode message: %v", err) } + return 0, msg } - c.ethProtocolVersion = highestEthVersion + return 0, errorf("invalid message: %s", string(rawData)) } -// statusExchange performs a `Status` message exchange with the given -// node. -func (c *Conn) statusExchange(t *utesting.T, chain *Chain) Message { - // read status message from client - var message Message - -loop: - for { - switch msg := c.Read().(type) { - case *Status: - if msg.Head != chain.blocks[chain.Len()-1].Hash() { - t.Fatalf("wrong head in status: %v", msg.Head) - } - if msg.TD.Cmp(chain.TD(chain.Len())) != 0 { - t.Fatalf("wrong TD in status: %v", msg.TD) - } - if !reflect.DeepEqual(msg.ForkID, chain.ForkID()) { - t.Fatalf("wrong fork ID in status: %v", msg.ForkID) - } - message = msg - break loop - case *Disconnect: - t.Fatalf("disconnect received: %v", msg.Reason) - case *Ping: - c.Write(&Pong{}) // TODO (renaynay): in the future, this should be an error - // (PINGs should not be a response upon fresh connection) - default: - t.Fatalf("bad status message: %#v", msg) - } - } - // make sure eth protocol version is set for negotiation - if c.ethProtocolVersion == 0 { - t.Fatalf("eth protocol version must be set in Conn") - } - // write status message to client - status := Status{ - ProtocolVersion: uint32(c.ethProtocolVersion), - NetworkID: 1, - TD: chain.TD(chain.Len()), - Head: chain.blocks[chain.Len()-1].Hash(), - Genesis: chain.blocks[0].Hash(), - ForkID: chain.ForkID(), - } - if err := c.Write(status); err != nil { - t.Fatalf("could not write to connection: %v", err) +// Write writes a eth packet to the connection. +func (c *Conn) Write(msg Message) error { + // check if message is eth protocol message + var ( + payload []byte + err error + ) + payload, err = rlp.EncodeToBytes(msg) + if err != nil { + return err } - - return message + _, err = c.Conn.Write(uint64(msg.Code()), payload) + return err } -// waitForBlock waits for confirmation from the client that it has -// imported the given block. -func (c *Conn) waitForBlock(block *types.Block) error { - for { - req := &GetBlockHeaders{Origin: hashOrNumber{Hash: block.Hash()}, Amount: 1} - if err := c.Write(req); err != nil { - return err - } - - switch msg := c.Read().(type) { - case *BlockHeaders: - if len(*msg) > 0 { - return nil - } - time.Sleep(100 * time.Millisecond) - default: - return fmt.Errorf("invalid message: %v", msg) - } +// Write66 writes an eth66 packet to the connection. +func (c *Conn) Write66(req eth.Packet, code int) error { + payload, err := rlp.EncodeToBytes(req) + if err != nil { + return err } + _, err = c.Conn.Write(uint64(code), payload) + return err } diff --git a/cmd/devp2p/internal/v4test/discv4tests.go b/cmd/devp2p/internal/v4test/discv4tests.go index 140b96bfa..04ad67637 100644 --- a/cmd/devp2p/internal/v4test/discv4tests.go +++ b/cmd/devp2p/internal/v4test/discv4tests.go @@ -21,7 +21,6 @@ import ( "crypto/rand" "fmt" "net" - "reflect" "time" "github.com/ethereum/go-ethereum/crypto" @@ -80,25 +79,57 @@ func BasicPing(t *utesting.T) { To: te.remoteEndpoint(), Expiration: futureExpiration(), }) - - reply, _, _ := te.read(te.l1) - if err := te.checkPong(reply, pingHash); err != nil { + if err := te.checkPingPong(pingHash); err != nil { t.Fatal(err) } } -// checkPong verifies that reply is a valid PONG matching the given ping hash. +// checkPingPong verifies that the remote side sends both a PONG with the +// correct hash, and a PING. +// The two packets do not have to be in any particular order. +func (te *testenv) checkPingPong(pingHash []byte) error { + var ( + pings int + pongs int + ) + for i := 0; i < 2; i++ { + reply, _, err := te.read(te.l1) + if err != nil { + return err + } + switch reply.Kind() { + case v4wire.PongPacket: + if err := te.checkPong(reply, pingHash); err != nil { + return err + } + pongs++ + case v4wire.PingPacket: + pings++ + default: + return fmt.Errorf("expected PING or PONG, got %v %v", reply.Name(), reply) + } + } + if pongs == 1 && pings == 1 { + return nil + } + return fmt.Errorf("expected 1 PING (got %d) and 1 PONG (got %d)", pings, pongs) +} + +// checkPong verifies that reply is a valid PONG matching the given ping hash, +// and a PING. The two packets do not have to be in any particular order. func (te *testenv) checkPong(reply v4wire.Packet, pingHash []byte) error { - if reply == nil || reply.Kind() != v4wire.PongPacket { - return fmt.Errorf("expected PONG reply, got %v", reply) + if reply == nil { + return fmt.Errorf("expected PONG reply, got nil") + } + if reply.Kind() != v4wire.PongPacket { + return fmt.Errorf("expected PONG reply, got %v %v", reply.Name(), reply) } pong := reply.(*v4wire.Pong) if !bytes.Equal(pong.ReplyTok, pingHash) { return fmt.Errorf("PONG reply token mismatch: got %x, want %x", pong.ReplyTok, pingHash) } - wantEndpoint := te.localEndpoint(te.l1) - if !reflect.DeepEqual(pong.To, wantEndpoint) { - return fmt.Errorf("PONG 'to' endpoint mismatch: got %+v, want %+v", pong.To, wantEndpoint) + if want := te.localEndpoint(te.l1); !want.IP.Equal(pong.To.IP) || want.UDP != pong.To.UDP { + return fmt.Errorf("PONG 'to' endpoint mismatch: got %+v, want %+v", pong.To, want) } if v4wire.Expired(pong.Expiration) { return fmt.Errorf("PONG is expired (%v)", pong.Expiration) @@ -118,9 +149,7 @@ func PingWrongTo(t *utesting.T) { To: wrongEndpoint, Expiration: futureExpiration(), }) - - reply, _, _ := te.read(te.l1) - if err := te.checkPong(reply, pingHash); err != nil { + if err := te.checkPingPong(pingHash); err != nil { t.Fatal(err) } } @@ -138,8 +167,7 @@ func PingWrongFrom(t *utesting.T) { Expiration: futureExpiration(), }) - reply, _, _ := te.read(te.l1) - if err := te.checkPong(reply, pingHash); err != nil { + if err := te.checkPingPong(pingHash); err != nil { t.Fatal(err) } } @@ -160,8 +188,7 @@ func PingExtraData(t *utesting.T) { JunkData2: []byte{9, 8, 7, 6, 5, 4, 3, 2, 1}, }) - reply, _, _ := te.read(te.l1) - if err := te.checkPong(reply, pingHash); err != nil { + if err := te.checkPingPong(pingHash); err != nil { t.Fatal(err) } } @@ -182,8 +209,7 @@ func PingExtraDataWrongFrom(t *utesting.T) { JunkData2: []byte{9, 8, 7, 6, 5, 4, 3, 2, 1}, } pingHash := te.send(te.l1, &req) - reply, _, _ := te.read(te.l1) - if err := te.checkPong(reply, pingHash); err != nil { + if err := te.checkPingPong(pingHash); err != nil { t.Fatal(err) } } @@ -239,9 +265,9 @@ func BondThenPingWithWrongFrom(t *utesting.T) { To: te.remoteEndpoint(), Expiration: futureExpiration(), }) - - reply, _, _ := te.read(te.l1) - if err := te.checkPong(reply, pingHash); err != nil { + if reply, _, err := te.read(te.l1); err != nil { + t.Fatal(err) + } else if err := te.checkPong(reply, pingHash); err != nil { t.Fatal(err) } } diff --git a/cmd/devp2p/nodeset.go b/cmd/devp2p/nodeset.go index 2d86c3f65..1d78e34c7 100644 --- a/cmd/devp2p/nodeset.go +++ b/cmd/devp2p/nodeset.go @@ -71,6 +71,7 @@ func writeNodesJSON(file string, nodes nodeSet) { } } +// nodes returns the node records contained in the set. func (ns nodeSet) nodes() []*enode.Node { result := make([]*enode.Node, 0, len(ns)) for _, n := range ns { @@ -83,12 +84,37 @@ func (ns nodeSet) nodes() []*enode.Node { return result } +// add ensures the given nodes are present in the set. func (ns nodeSet) add(nodes ...*enode.Node) { for _, n := range nodes { - ns[n.ID()] = nodeJSON{Seq: n.Seq(), N: n} + v := ns[n.ID()] + v.N = n + v.Seq = n.Seq() + ns[n.ID()] = v } } +// topN returns the top n nodes by score as a new set. +func (ns nodeSet) topN(n int) nodeSet { + if n >= len(ns) { + return ns + } + + byscore := make([]nodeJSON, 0, len(ns)) + for _, v := range ns { + byscore = append(byscore, v) + } + sort.Slice(byscore, func(i, j int) bool { + return byscore[i].Score >= byscore[j].Score + }) + result := make(nodeSet, n) + for _, v := range byscore[:n] { + result[v.N.ID()] = v + } + return result +} + +// verify performs integrity checks on the node set. func (ns nodeSet) verify() error { for id, n := range ns { if n.N.ID() != id { diff --git a/cmd/devp2p/nodesetcmd.go b/cmd/devp2p/nodesetcmd.go index bb807b0d8..8ebe51a81 100644 --- a/cmd/devp2p/nodesetcmd.go +++ b/cmd/devp2p/nodesetcmd.go @@ -17,8 +17,12 @@ package main import ( + "errors" "fmt" "net" + "sort" + "strconv" + "strings" "time" "github.com/ethereum/go-ethereum/core/forkid" @@ -60,25 +64,64 @@ func nodesetInfo(ctx *cli.Context) error { ns := loadNodesJSON(ctx.Args().First()) fmt.Printf("Set contains %d nodes.\n", len(ns)) + showAttributeCounts(ns) return nil } +// showAttributeCounts prints the distribution of ENR attributes in a node set. +func showAttributeCounts(ns nodeSet) { + attrcount := make(map[string]int) + var attrlist []interface{} + for _, n := range ns { + r := n.N.Record() + attrlist = r.AppendElements(attrlist[:0])[1:] + for i := 0; i < len(attrlist); i += 2 { + key := attrlist[i].(string) + attrcount[key]++ + } + } + + var keys []string + var maxlength int + for key := range attrcount { + keys = append(keys, key) + if len(key) > maxlength { + maxlength = len(key) + } + } + sort.Strings(keys) + fmt.Println("ENR attribute counts:") + for _, key := range keys { + fmt.Printf("%s%s: %d\n", strings.Repeat(" ", maxlength-len(key)+1), key, attrcount[key]) + } +} + func nodesetFilter(ctx *cli.Context) error { if ctx.NArg() < 1 { return fmt.Errorf("need nodes file as argument") } - ns := loadNodesJSON(ctx.Args().First()) + // Parse -limit. + limit, err := parseFilterLimit(ctx.Args().Tail()) + if err != nil { + return err + } + // Parse the filters. filter, err := andFilter(ctx.Args().Tail()) if err != nil { return err } + // Load nodes and apply filters. + ns := loadNodesJSON(ctx.Args().First()) result := make(nodeSet) for id, n := range ns { if filter(n) { result[id] = n } } + if limit >= 0 { + result = result.topN(limit) + } writeNodesJSON("-", result) return nil } @@ -91,12 +134,15 @@ type nodeFilterC struct { } var filterFlags = map[string]nodeFilterC{ + "-limit": {1, trueFilter}, // needed to skip over -limit "-ip": {1, ipFilter}, "-min-age": {1, minAgeFilter}, "-eth-network": {1, ethFilter}, "-les-server": {0, lesFilter}, + "-snap": {0, snapFilter}, } +// parseFilters parses nodeFilters from args. func parseFilters(args []string) ([]nodeFilter, error) { var filters []nodeFilter for len(args) > 0 { @@ -104,19 +150,39 @@ func parseFilters(args []string) ([]nodeFilter, error) { if !ok { return nil, fmt.Errorf("invalid filter %q", args[0]) } - if len(args) < fc.narg { - return nil, fmt.Errorf("filter %q wants %d arguments, have %d", args[0], fc.narg, len(args)) + if len(args)-1 < fc.narg { + return nil, fmt.Errorf("filter %q wants %d arguments, have %d", args[0], fc.narg, len(args)-1) } - filter, err := fc.fn(args[1:]) + filter, err := fc.fn(args[1 : 1+fc.narg]) if err != nil { return nil, fmt.Errorf("%s: %v", args[0], err) } filters = append(filters, filter) - args = args[fc.narg+1:] + args = args[1+fc.narg:] } return filters, nil } +// parseFilterLimit parses the -limit option in args. It returns -1 if there is no limit. +func parseFilterLimit(args []string) (int, error) { + limit := -1 + for i, arg := range args { + if arg == "-limit" { + if i == len(args)-1 { + return -1, errors.New("-limit requires an argument") + } + n, err := strconv.Atoi(args[i+1]) + if err != nil { + return -1, fmt.Errorf("invalid -limit %q", args[i+1]) + } + limit = n + } + } + return limit, nil +} + +// andFilter parses node filters in args and and returns a single filter that requires all +// of them to match. func andFilter(args []string) (nodeFilter, error) { checks, err := parseFilters(args) if err != nil { @@ -133,6 +199,10 @@ func andFilter(args []string) (nodeFilter, error) { return f, nil } +func trueFilter(args []string) (nodeFilter, error) { + return func(n nodeJSON) bool { return true }, nil +} + func ipFilter(args []string) (nodeFilter, error) { _, cidr, err := net.ParseCIDR(args[0]) if err != nil { @@ -168,7 +238,7 @@ func ethFilter(args []string) (nodeFilter, error) { f := func(n nodeJSON) bool { var eth struct { ForkID forkid.ID - _ []rlp.RawValue `rlp:"tail"` + Tail []rlp.RawValue `rlp:"tail"` } if n.N.Load(enr.WithEntry("eth", ð)) != nil { return false @@ -181,9 +251,19 @@ func ethFilter(args []string) (nodeFilter, error) { func lesFilter(args []string) (nodeFilter, error) { f := func(n nodeJSON) bool { var les struct { - _ []rlp.RawValue `rlp:"tail"` + Tail []rlp.RawValue `rlp:"tail"` } return n.N.Load(enr.WithEntry("les", &les)) == nil } return f, nil } + +func snapFilter(args []string) (nodeFilter, error) { + f := func(n nodeJSON) bool { + var snap struct { + Tail []rlp.RawValue `rlp:"tail"` + } + return n.N.Load(enr.WithEntry("snap", &snap)) == nil + } + return f, nil +} diff --git a/cmd/devp2p/rlpxcmd.go b/cmd/devp2p/rlpxcmd.go index 17019aee0..24a16f0b3 100644 --- a/cmd/devp2p/rlpxcmd.go +++ b/cmd/devp2p/rlpxcmd.go @@ -19,7 +19,6 @@ package main import ( "fmt" "net" - "os" "github.com/ethereum/go-ethereum/cmd/devp2p/internal/ethtest" "github.com/ethereum/go-ethereum/crypto" @@ -47,9 +46,12 @@ var ( rlpxEthTestCommand = cli.Command{ Name: "eth-test", Usage: "Runs tests against a node", - ArgsUsage: " ", + ArgsUsage: " ", Action: rlpxEthTest, - Flags: []cli.Flag{testPatternFlag}, + Flags: []cli.Flag{ + testPatternFlag, + testTAPFlag, + }, } ) @@ -88,22 +90,19 @@ func rlpxPing(ctx *cli.Context) error { return nil } +// rlpxEthTest runs the eth protocol test suite. func rlpxEthTest(ctx *cli.Context) error { if ctx.NArg() < 3 { exit("missing path to chain.rlp as command-line argument") } - - suite := ethtest.NewSuite(getNodeArg(ctx), ctx.Args()[1], ctx.Args()[2]) - - // Filter and run test cases. - tests := suite.AllTests() - if ctx.IsSet(testPatternFlag.Name) { - tests = utesting.MatchTests(tests, ctx.String(testPatternFlag.Name)) + suite, err := ethtest.NewSuite(getNodeArg(ctx), ctx.Args()[1], ctx.Args()[2]) + if err != nil { + exit(err) } - results := utesting.RunTests(tests, os.Stdout) - if fails := utesting.CountFailures(results); fails > 0 { - return fmt.Errorf("%v of %v tests passed.", len(tests)-fails, len(tests)) + // check if given node supports eth66, and if so, run eth66 protocol tests as well + is66Failed, _ := utesting.Run(utesting.Test{Name: "Is_66", Fn: suite.Is_66}) + if is66Failed { + return runTests(ctx, suite.EthTests()) } - fmt.Printf("all tests passed\n") - return nil + return runTests(ctx, suite.AllEthTests()) } diff --git a/cmd/devp2p/runtest.go b/cmd/devp2p/runtest.go new file mode 100644 index 000000000..4168f8555 --- /dev/null +++ b/cmd/devp2p/runtest.go @@ -0,0 +1,69 @@ +// Copyright 2020 The go-ethereum Authors +// This file is part of go-ethereum. +// +// go-ethereum is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// go-ethereum is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. +// +// You should have received a copy of the GNU General Public License +// along with go-ethereum. If not, see . + +package main + +import ( + "os" + + "github.com/ethereum/go-ethereum/cmd/devp2p/internal/v4test" + "github.com/ethereum/go-ethereum/internal/utesting" + "github.com/ethereum/go-ethereum/log" + "gopkg.in/urfave/cli.v1" +) + +var ( + testPatternFlag = cli.StringFlag{ + Name: "run", + Usage: "Pattern of test suite(s) to run", + } + testTAPFlag = cli.BoolFlag{ + Name: "tap", + Usage: "Output TAP", + } + // These two are specific to the discovery tests. + testListen1Flag = cli.StringFlag{ + Name: "listen1", + Usage: "IP address of the first tester", + Value: v4test.Listen1, + } + testListen2Flag = cli.StringFlag{ + Name: "listen2", + Usage: "IP address of the second tester", + Value: v4test.Listen2, + } +) + +func runTests(ctx *cli.Context, tests []utesting.Test) error { + // Filter test cases. + if ctx.IsSet(testPatternFlag.Name) { + tests = utesting.MatchTests(tests, ctx.String(testPatternFlag.Name)) + } + // Disable logging unless explicitly enabled. + if !ctx.GlobalIsSet("verbosity") && !ctx.GlobalIsSet("vmodule") { + log.Root().SetHandler(log.DiscardHandler()) + } + // Run the tests. + var run = utesting.RunTests + if ctx.Bool(testTAPFlag.Name) { + run = utesting.RunTAP + } + results := run(tests, os.Stdout) + if utesting.CountFailures(results) > 0 { + os.Exit(1) + } + return nil +} diff --git a/cmd/ethkey/generate.go b/cmd/ethkey/generate.go index c2aa1c6fb..629d23da5 100644 --- a/cmd/ethkey/generate.go +++ b/cmd/ethkey/generate.go @@ -26,7 +26,7 @@ import ( "github.com/ethereum/go-ethereum/accounts/keystore" "github.com/ethereum/go-ethereum/cmd/utils" "github.com/ethereum/go-ethereum/crypto" - "github.com/pborman/uuid" + "github.com/google/uuid" "gopkg.in/urfave/cli.v1" ) @@ -86,9 +86,12 @@ If you want to encrypt an existing private key, it can be specified by setting } // Create the keyfile object with a random UUID. - id := uuid.NewRandom() + UUID, err := uuid.NewRandom() + if err != nil { + utils.Fatalf("Failed to generate random uuid: %v", err) + } key := &keystore.Key{ - Id: id, + Id: UUID, Address: crypto.PubkeyToAddress(privateKey.PublicKey), PrivateKey: privateKey, } diff --git a/cmd/evm/README.md b/cmd/evm/README.md index 8f0848bde..d5257069f 100644 --- a/cmd/evm/README.md +++ b/cmd/evm/README.md @@ -4,15 +4,15 @@ The `evm t8n` tool is a stateless state transition utility. It is a utility which can 1. Take a prestate, including - - Accounts, - - Block context information, - - Previous blockshashes (*optional) +- Accounts, +- Block context information, +- Previous blockshashes (*optional) 2. Apply a set of transactions, 3. Apply a mining-reward (*optional), 4. And generate a post-state, including - - State root, transaction root, receipt root, - - Information about rejected transactions, - - Optionally: a full or partial post-state dump +- State root, transaction root, receipt root, +- Information about rejected transactions, +- Optionally: a full or partial post-state dump ## Specification @@ -30,13 +30,15 @@ Command line params that has to be supported are --trace.nomemory Disable full memory dump in traces --trace.nostack Disable stack output in traces --trace.noreturndata Disable return data output in traces - --output.basedir value Specifies where output files are placed. Will be created if it does not exist. (default: ".") + --output.basedir value Specifies where output files are placed. Will be created if it does not exist. --output.alloc alloc Determines where to put the alloc of the post-state. `stdout` - into the stdout output `stderr` - into the stderr output --output.result result Determines where to put the result (stateroot, txroot etc) of the post-state. `stdout` - into the stdout output `stderr` - into the stderr output + --output.body value If set, the RLP of the transactions (block body) will be written to this file. + --input.txs stdin stdin or file name of where to find the transactions to apply. If the file prefix is '.rlp', then the data is interpreted as an RLP list of signed transactions.The '.rlp' format is identical to the output.body format. (default: "txs.json") --state.fork value Name of ruleset to use. --state.chainid value ChainID to use (default: 1) --state.reward value Mining reward. Set to -1 to disable (default: 0) @@ -110,7 +112,10 @@ Two resulting files: } ], "rejected": [ - 1 + { + "index": 1, + "error": "nonce too low: address 0x8A8eAFb1cf62BfBeb1741769DAE1a9dd47996192, tx: 0 state: 1" + } ] } ``` @@ -156,7 +161,10 @@ Output: } ], "rejected": [ - 1 + { + "index": 1, + "error": "nonce too low: address 0x8A8eAFb1cf62BfBeb1741769DAE1a9dd47996192, tx: 0 state: 1" + } ] } } @@ -168,9 +176,9 @@ Mining rewards and ommer rewards might need to be added. This is how those are a - `block_reward` is the block mining reward for the miner (`0xaa`), of a block at height `N`. - For each ommer (mined by `0xbb`), with blocknumber `N-delta` - - (where `delta` is the difference between the current block and the ommer) - - The account `0xbb` (ommer miner) is awarded `(8-delta)/ 8 * block_reward` - - The account `0xaa` (block miner) is awarded `block_reward / 32` + - (where `delta` is the difference between the current block and the ommer) + - The account `0xbb` (ommer miner) is awarded `(8-delta)/ 8 * block_reward` + - The account `0xaa` (block miner) is awarded `block_reward / 32` To make `state_t8n` apply these, the following inputs are required: @@ -220,7 +228,7 @@ Output: ### Future EIPS It is also possible to experiment with future eips that are not yet defined in a hard fork. -Example, putting EIP-1344 into Frontier: +Example, putting EIP-1344 into Frontier: ``` ./evm t8n --state.fork=Frontier+1344 --input.pre=./testdata/1/pre.json --input.txs=./testdata/1/txs.json --input.env=/testdata/1/env.json ``` @@ -229,42 +237,102 @@ Example, putting EIP-1344 into Frontier: The `BLOCKHASH` opcode requires blockhashes to be provided by the caller, inside the `env`. If a required blockhash is not provided, the exit code should be `4`: -Example where blockhashes are provided: +Example where blockhashes are provided: ``` -./evm t8n --input.alloc=./testdata/3/alloc.json --input.txs=./testdata/3/txs.json --input.env=./testdata/3/env.json --trace +./evm --verbosity=1 t8n --input.alloc=./testdata/3/alloc.json --input.txs=./testdata/3/txs.json --input.env=./testdata/3/env.json --trace +INFO [07-27|11:53:40.960] Trie dumping started root=b7341d..857ea1 +INFO [07-27|11:53:40.960] Trie dumping complete accounts=3 elapsed="103.298µs" +INFO [07-27|11:53:40.960] Wrote file file=alloc.json +INFO [07-27|11:53:40.960] Wrote file file=result.json + ``` + ``` cat trace-0-0x72fadbef39cd251a437eea619cfeda752271a5faaaa2147df012e112159ffb81.jsonl | grep BLOCKHASH -C2 ``` ``` -{"pc":0,"op":96,"gas":"0x5f58ef8","gasCost":"0x3","memory":"0x","memSize":0,"stack":[],"returnStack":[],"returnData":null,"depth":1,"refund":0,"opName":"PUSH1","error":""} -{"pc":2,"op":64,"gas":"0x5f58ef5","gasCost":"0x14","memory":"0x","memSize":0,"stack":["0x1"],"returnStack":[],"returnData":null,"depth":1,"refund":0,"opName":"BLOCKHASH","error":""} -{"pc":3,"op":0,"gas":"0x5f58ee1","gasCost":"0x0","memory":"0x","memSize":0,"stack":["0xdac58aa524e50956d0c0bae7f3f8bb9d35381365d07804dd5b48a5a297c06af4"],"returnStack":[],"returnData":null,"depth":1,"refund":0,"opName":"STOP","error":""} -{"output":"","gasUsed":"0x17","time":112885} +{"pc":0,"op":96,"gas":"0x5f58ef8","gasCost":"0x3","memory":"0x","memSize":0,"stack":[],"returnData":"0x","depth":1,"refund":0,"opName":"PUSH1","error":""} +{"pc":2,"op":64,"gas":"0x5f58ef5","gasCost":"0x14","memory":"0x","memSize":0,"stack":["0x1"],"returnData":"0x","depth":1,"refund":0,"opName":"BLOCKHASH","error":""} +{"pc":3,"op":0,"gas":"0x5f58ee1","gasCost":"0x0","memory":"0x","memSize":0,"stack":["0xdac58aa524e50956d0c0bae7f3f8bb9d35381365d07804dd5b48a5a297c06af4"],"returnData":"0x","depth":1,"refund":0,"opName":"STOP","error":""} +{"output":"","gasUsed":"0x17","time":156276} ``` In this example, the caller has not provided the required blockhash: ``` ./evm t8n --input.alloc=./testdata/4/alloc.json --input.txs=./testdata/4/txs.json --input.env=./testdata/4/env.json --trace -``` -``` ERROR(4): getHash(3) invoked, blockhash for that block not provided ``` Error code: 4 + ### Chaining Another thing that can be done, is to chain invocations: ``` ./evm t8n --input.alloc=./testdata/1/alloc.json --input.txs=./testdata/1/txs.json --input.env=./testdata/1/env.json --output.alloc=stdout | ./evm t8n --input.alloc=stdin --input.env=./testdata/1/env.json --input.txs=./testdata/1/txs.json -INFO [08-03|15:25:15.168] rejected tx index=1 hash="0557ba…18d673" from=0x8A8eAFb1cf62BfBeb1741769DAE1a9dd47996192 error="nonce too low" -INFO [08-03|15:25:15.169] rejected tx index=0 hash="0557ba…18d673" from=0x8A8eAFb1cf62BfBeb1741769DAE1a9dd47996192 error="nonce too low" -INFO [08-03|15:25:15.169] rejected tx index=1 hash="0557ba…18d673" from=0x8A8eAFb1cf62BfBeb1741769DAE1a9dd47996192 error="nonce too low" +INFO [07-27|11:53:41.049] rejected tx index=1 hash=0557ba..18d673 from=0x8A8eAFb1cf62BfBeb1741769DAE1a9dd47996192 error="nonce too low: address 0x8A8eAFb1cf62BfBeb1741769DAE1a9dd47996192, tx: 0 state: 1" +INFO [07-27|11:53:41.050] Trie dumping started root=84208a..ae4e13 +INFO [07-27|11:53:41.050] Trie dumping complete accounts=3 elapsed="59.412µs" +INFO [07-27|11:53:41.050] Wrote file file=result.json +INFO [07-27|11:53:41.051] rejected tx index=0 hash=0557ba..18d673 from=0x8A8eAFb1cf62BfBeb1741769DAE1a9dd47996192 error="nonce too low: address 0x8A8eAFb1cf62BfBeb1741769DAE1a9dd47996192, tx: 0 state: 1" +INFO [07-27|11:53:41.051] rejected tx index=1 hash=0557ba..18d673 from=0x8A8eAFb1cf62BfBeb1741769DAE1a9dd47996192 error="nonce too low: address 0x8A8eAFb1cf62BfBeb1741769DAE1a9dd47996192, tx: 0 state: 1" +INFO [07-27|11:53:41.052] Trie dumping started root=84208a..ae4e13 +INFO [07-27|11:53:41.052] Trie dumping complete accounts=3 elapsed="45.734µs" +INFO [07-27|11:53:41.052] Wrote file file=alloc.json +INFO [07-27|11:53:41.052] Wrote file file=result.json ``` -What happened here, is that we first applied two identical transactions, so the second one was rejected. +What happened here, is that we first applied two identical transactions, so the second one was rejected. Then, taking the poststate alloc as the input for the next state, we tried again to include the same two transactions: this time, both failed due to too low nonce. In order to meaningfully chain invocations, one would need to provide meaningful new `env`, otherwise the actual blocknumber (exposed to the EVM) would not increase. +### Transactions in RLP form + +It is possible to provide already-signed transactions as input to, using an `input.txs` which ends with the `rlp` suffix. +The input format for RLP-form transactions is _identical_ to the _output_ format for block bodies. Therefore, it's fully possible +to use the evm to go from `json` input to `rlp` input. + +The following command takes **json** the transactions in `./testdata/13/txs.json` and signs them. After execution, they are output to `signed_txs.rlp`.: +``` +./evm t8n --state.fork=London --input.alloc=./testdata/13/alloc.json --input.txs=./testdata/13/txs.json --input.env=./testdata/13/env.json --output.result=alloc_jsontx.json --output.body=signed_txs.rlp +INFO [07-27|11:53:41.124] Trie dumping started root=e4b924..6aef61 +INFO [07-27|11:53:41.124] Trie dumping complete accounts=3 elapsed="94.284µs" +INFO [07-27|11:53:41.125] Wrote file file=alloc.json +INFO [07-27|11:53:41.125] Wrote file file=alloc_jsontx.json +INFO [07-27|11:53:41.125] Wrote file file=signed_txs.rlp + +``` + +The `output.body` is the rlp-list of transactions, encoded in hex and placed in a string a'la `json` encoding rules: +``` +cat signed_txs.rlp +"0xf8d2b86702f864010180820fa08284d09411111111111111111111111111111111111111118080c001a0b7dfab36232379bb3d1497a4f91c1966b1f932eae3ade107bf5d723b9cb474e0a06261c359a10f2132f126d250485b90cf20f30340801244a08ef6142ab33d1904b86702f864010280820fa08284d09411111111111111111111111111111111111111118080c080a0d4ec563b6568cd42d998fc4134b36933c6568d01533b5adf08769270243c6c7fa072bf7c21eac6bbeae5143371eef26d5e279637f3bd73482b55979d76d935b1e9" +``` + +We can use `rlpdump` to check what the contents are: +``` +rlpdump -hex $(cat signed_txs.rlp | jq -r ) +[ + 02f864010180820fa08284d09411111111111111111111111111111111111111118080c001a0b7dfab36232379bb3d1497a4f91c1966b1f932eae3ade107bf5d723b9cb474e0a06261c359a10f2132f126d250485b90cf20f30340801244a08ef6142ab33d1904, + 02f864010280820fa08284d09411111111111111111111111111111111111111118080c080a0d4ec563b6568cd42d998fc4134b36933c6568d01533b5adf08769270243c6c7fa072bf7c21eac6bbeae5143371eef26d5e279637f3bd73482b55979d76d935b1e9, +] +``` +Now, we can now use those (or any other already signed transactions), as input, like so: +``` +./evm t8n --state.fork=London --input.alloc=./testdata/13/alloc.json --input.txs=./signed_txs.rlp --input.env=./testdata/13/env.json --output.result=alloc_rlptx.json +INFO [07-27|11:53:41.253] Trie dumping started root=e4b924..6aef61 +INFO [07-27|11:53:41.253] Trie dumping complete accounts=3 elapsed="128.445µs" +INFO [07-27|11:53:41.253] Wrote file file=alloc.json +INFO [07-27|11:53:41.255] Wrote file file=alloc_rlptx.json + +``` + +You might have noticed that the results from these two invocations were stored in two separate files. +And we can now finally check that they match. +``` +cat alloc_jsontx.json | jq .stateRoot && cat alloc_rlptx.json | jq .stateRoot +"0xe4b924a6adb5959fccf769d5b7bb2f6359e26d1e76a2443c5a91a36d826aef61" +"0xe4b924a6adb5959fccf769d5b7bb2f6359e26d1e76a2443c5a91a36d826aef61" +``` diff --git a/cmd/evm/compiler.go b/cmd/evm/compiler.go index c019a2fe7..40ad9313c 100644 --- a/cmd/evm/compiler.go +++ b/cmd/evm/compiler.go @@ -23,7 +23,7 @@ import ( "github.com/ethereum/go-ethereum/cmd/evm/internal/compiler" - cli "gopkg.in/urfave/cli.v1" + "gopkg.in/urfave/cli.v1" ) var compileCommand = cli.Command{ diff --git a/cmd/evm/disasm.go b/cmd/evm/disasm.go index 5bc743aa8..68a09cbf5 100644 --- a/cmd/evm/disasm.go +++ b/cmd/evm/disasm.go @@ -23,7 +23,7 @@ import ( "strings" "github.com/ethereum/go-ethereum/core/asm" - cli "gopkg.in/urfave/cli.v1" + "gopkg.in/urfave/cli.v1" ) var disasmCommand = cli.Command{ diff --git a/cmd/evm/internal/t8ntool/execution.go b/cmd/evm/internal/t8ntool/execution.go index 75586d588..1ab2f001e 100644 --- a/cmd/evm/internal/t8ntool/execution.go +++ b/cmd/evm/internal/t8ntool/execution.go @@ -52,7 +52,7 @@ type ExecutionResult struct { LogsHash common.Hash `json:"logsHash"` Bloom types.Bloom `json:"logsBloom" gencodec:"required"` Receipts types.Receipts `json:"receipts"` - Rejected []int `json:"rejected,omitempty"` + Rejected []*rejectedTx `json:"rejected,omitempty"` } type ommer struct { @@ -69,6 +69,7 @@ type stEnv struct { Timestamp uint64 `json:"currentTimestamp" gencodec:"required"` BlockHashes map[math.HexOrDecimal64]common.Hash `json:"blockHashes,omitempty"` Ommers []ommer `json:"ommers,omitempty"` + BaseFee *big.Int `json:"currentBaseFee,omitempty"` } type stEnvMarshaling struct { @@ -77,6 +78,12 @@ type stEnvMarshaling struct { GasLimit math.HexOrDecimal64 Number math.HexOrDecimal64 Timestamp math.HexOrDecimal64 + BaseFee *math.HexOrDecimal256 +} + +type rejectedTx struct { + Index int `json:"index"` + Err string `json:"error"` } // Apply applies a set of transactions to a pre-state @@ -103,14 +110,14 @@ func (pre *Prestate) Apply(vmConfig vm.Config, chainConfig *params.ChainConfig, signer = types.MakeSigner(chainConfig, new(big.Int).SetUint64(pre.Env.Number)) gaspool = new(core.GasPool) blockHash = common.Hash{0x13, 0x37} - rejectedTxs []int + rejectedTxs []*rejectedTx includedTxs types.Transactions gasUsed = uint64(0) receipts = make(types.Receipts, 0) txIndex = 0 ) gaspool.AddGas(pre.Env.GasLimit) - vmContext := vm.Context{ + vmContext := vm.BlockContext{ CanTransfer: core.CanTransfer, Transfer: core.Transfer, Coinbase: pre.Env.Coinbase, @@ -119,7 +126,10 @@ func (pre *Prestate) Apply(vmConfig vm.Config, chainConfig *params.ChainConfig, Difficulty: pre.Env.Difficulty, GasLimit: pre.Env.GasLimit, GetHash: getHash, - // GasPrice and Origin needs to be set per transaction + } + // If currentBaseFee is defined, add it to the vmContext. + if pre.Env.BaseFee != nil { + vmContext.BaseFee = new(big.Int).Set(pre.Env.BaseFee) } // If DAO is supported/enabled, we need to handle it here. In geth 'proper', it's // done in StateProcessor.Process(block, ...), right before transactions are applied. @@ -130,10 +140,10 @@ func (pre *Prestate) Apply(vmConfig vm.Config, chainConfig *params.ChainConfig, } for i, tx := range txs { - msg, err := tx.AsMessage(signer) + msg, err := tx.AsMessage(signer, pre.Env.BaseFee) if err != nil { - log.Info("rejected tx", "index", i, "hash", tx.Hash(), "error", err) - rejectedTxs = append(rejectedTxs, i) + log.Warn("rejected tx", "index", i, "hash", tx.Hash(), "error", err) + rejectedTxs = append(rejectedTxs, &rejectedTx{i, err.Error()}) continue } tracer, err := getTracerFn(txIndex, tx.Hash()) @@ -142,18 +152,17 @@ func (pre *Prestate) Apply(vmConfig vm.Config, chainConfig *params.ChainConfig, } vmConfig.Tracer = tracer vmConfig.Debug = (tracer != nil) - statedb.Prepare(tx.Hash(), blockHash, txIndex) - vmContext.GasPrice = msg.GasPrice() - vmContext.Origin = msg.From() - - evm := vm.NewEVM(vmContext, statedb, chainConfig, vmConfig) + statedb.Prepare(tx.Hash(), txIndex) + txContext := core.NewEVMTxContext(msg) snapshot := statedb.Snapshot() + evm := vm.NewEVM(vmContext, txContext, statedb, chainConfig, vmConfig) + // (ret []byte, usedGas uint64, failed bool, err error) msgResult, err := core.ApplyMessage(evm, msg, gaspool) if err != nil { statedb.RevertToSnapshot(snapshot) log.Info("rejected tx", "index", i, "hash", tx.Hash(), "from", msg.From(), "error", err) - rejectedTxs = append(rejectedTxs, i) + rejectedTxs = append(rejectedTxs, &rejectedTx{i, err.Error()}) continue } includedTxs = append(includedTxs, tx) @@ -161,7 +170,8 @@ func (pre *Prestate) Apply(vmConfig vm.Config, chainConfig *params.ChainConfig, return nil, nil, NewError(ErrorMissingBlockhash, hashError) } gasUsed += msgResult.UsedGas - // Create a new receipt for the transaction, storing the intermediate root and gas used by the tx + + // Receipt: { var root []byte if chainConfig.IsByzantium(vmContext.BlockNumber) { @@ -170,22 +180,32 @@ func (pre *Prestate) Apply(vmConfig vm.Config, chainConfig *params.ChainConfig, root = statedb.IntermediateRoot(chainConfig.IsEIP158(vmContext.BlockNumber)).Bytes() } - receipt := types.NewReceipt(root, msgResult.Failed(), gasUsed) + // Create a new receipt for the transaction, storing the intermediate root and + // gas used by the tx. + receipt := &types.Receipt{Type: tx.Type(), PostState: root, CumulativeGasUsed: gasUsed} + if msgResult.Failed() { + receipt.Status = types.ReceiptStatusFailed + } else { + receipt.Status = types.ReceiptStatusSuccessful + } receipt.TxHash = tx.Hash() receipt.GasUsed = msgResult.UsedGas - // if the transaction created a contract, store the creation address in the receipt. + + // If the transaction created a contract, store the creation address in the receipt. if msg.To() == nil { - receipt.ContractAddress = crypto.CreateAddress(evm.Context.Origin, tx.Nonce()) + receipt.ContractAddress = crypto.CreateAddress(evm.TxContext.Origin, tx.Nonce()) } - // Set the receipt logs and create a bloom for filtering - receipt.Logs = statedb.GetLogs(tx.Hash()) + + // Set the receipt logs and create the bloom filter. + receipt.Logs = statedb.GetLogs(tx.Hash(), blockHash) receipt.Bloom = types.CreateBloom(types.Receipts{receipt}) - // These three are non-consensus fields + // These three are non-consensus fields: //receipt.BlockHash - //receipt.BlockNumber = + //receipt.BlockNumber receipt.TransactionIndex = uint(txIndex) receipts = append(receipts, receipt) } + txIndex++ } statedb.IntermediateRoot(chainConfig.IsEIP158(vmContext.BlockNumber)) @@ -221,8 +241,8 @@ func (pre *Prestate) Apply(vmConfig vm.Config, chainConfig *params.ChainConfig, } execRs := &ExecutionResult{ StateRoot: root, - TxRoot: types.DeriveSha(includedTxs, new(trie.Trie)), - ReceiptRoot: types.DeriveSha(receipts, new(trie.Trie)), + TxRoot: types.DeriveSha(includedTxs, trie.NewStackTrie(nil)), + ReceiptRoot: types.DeriveSha(receipts, trie.NewStackTrie(nil)), Bloom: types.CreateBloom(receipts), LogsHash: rlpHash(statedb.Logs()), Receipts: receipts, diff --git a/cmd/evm/internal/t8ntool/flags.go b/cmd/evm/internal/t8ntool/flags.go index 424156ba8..626f974a0 100644 --- a/cmd/evm/internal/t8ntool/flags.go +++ b/cmd/evm/internal/t8ntool/flags.go @@ -47,6 +47,11 @@ var ( Usage: "Specifies where output files are placed. Will be created if it does not exist.", Value: "", } + OutputBodyFlag = cli.StringFlag{ + Name: "output.body", + Usage: "If set, the RLP of the transactions (block body) will be written to this file.", + Value: "", + } OutputAllocFlag = cli.StringFlag{ Name: "output.alloc", Usage: "Determines where to put the `alloc` of the post-state.\n" + @@ -74,8 +79,10 @@ var ( Value: "env.json", } InputTxsFlag = cli.StringFlag{ - Name: "input.txs", - Usage: "`stdin` or file name of where to find the transactions to apply.", + Name: "input.txs", + Usage: "`stdin` or file name of where to find the transactions to apply. " + + "If the file prefix is '.rlp', then the data is interpreted as an RLP list of signed transactions." + + "The '.rlp' format is identical to the output.body format.", Value: "txs.json", } RewardFlag = cli.Int64Flag{ diff --git a/cmd/evm/internal/t8ntool/gen_stenv.go b/cmd/evm/internal/t8ntool/gen_stenv.go index ab5951534..c7f079c02 100644 --- a/cmd/evm/internal/t8ntool/gen_stenv.go +++ b/cmd/evm/internal/t8ntool/gen_stenv.go @@ -23,6 +23,7 @@ func (s stEnv) MarshalJSON() ([]byte, error) { Timestamp math.HexOrDecimal64 `json:"currentTimestamp" gencodec:"required"` BlockHashes map[math.HexOrDecimal64]common.Hash `json:"blockHashes,omitempty"` Ommers []ommer `json:"ommers,omitempty"` + BaseFee *math.HexOrDecimal256 `json:"currentBaseFee,omitempty"` } var enc stEnv enc.Coinbase = common.UnprefixedAddress(s.Coinbase) @@ -32,6 +33,7 @@ func (s stEnv) MarshalJSON() ([]byte, error) { enc.Timestamp = math.HexOrDecimal64(s.Timestamp) enc.BlockHashes = s.BlockHashes enc.Ommers = s.Ommers + enc.BaseFee = (*math.HexOrDecimal256)(s.BaseFee) return json.Marshal(&enc) } @@ -45,6 +47,7 @@ func (s *stEnv) UnmarshalJSON(input []byte) error { Timestamp *math.HexOrDecimal64 `json:"currentTimestamp" gencodec:"required"` BlockHashes map[math.HexOrDecimal64]common.Hash `json:"blockHashes,omitempty"` Ommers []ommer `json:"ommers,omitempty"` + BaseFee *math.HexOrDecimal256 `json:"currentBaseFee,omitempty"` } var dec stEnv if err := json.Unmarshal(input, &dec); err != nil { @@ -76,5 +79,8 @@ func (s *stEnv) UnmarshalJSON(input []byte) error { if dec.Ommers != nil { s.Ommers = dec.Ommers } + if dec.BaseFee != nil { + s.BaseFee = (*big.Int)(dec.BaseFee) + } return nil } diff --git a/cmd/evm/internal/t8ntool/transition.go b/cmd/evm/internal/t8ntool/transition.go index 5119ed5fb..8334aa01d 100644 --- a/cmd/evm/internal/t8ntool/transition.go +++ b/cmd/evm/internal/t8ntool/transition.go @@ -17,20 +17,26 @@ package t8ntool import ( + "crypto/ecdsa" "encoding/json" + "errors" "fmt" "io/ioutil" "math/big" "os" "path" + "strings" "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/common/hexutil" "github.com/ethereum/go-ethereum/core" "github.com/ethereum/go-ethereum/core/state" "github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/core/vm" + "github.com/ethereum/go-ethereum/crypto" "github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/params" + "github.com/ethereum/go-ethereum/rlp" "github.com/ethereum/go-ethereum/tests" "gopkg.in/urfave/cli.v1" ) @@ -64,9 +70,10 @@ func (n *NumberedError) Code() int { } type input struct { - Alloc core.GenesisAlloc `json:"alloc,omitempty"` - Env *stEnv `json:"env,omitempty"` - Txs types.Transactions `json:"txs,omitempty"` + Alloc core.GenesisAlloc `json:"alloc,omitempty"` + Env *stEnv `json:"env,omitempty"` + Txs []*txWithKey `json:"txs,omitempty"` + TxRlp string `json:"txsRlp,omitempty"` } func Main(ctx *cli.Context) error { @@ -135,10 +142,12 @@ func Main(ctx *cli.Context) error { txStr = ctx.String(InputTxsFlag.Name) inputData = &input{} ) - + // Figure out the prestate alloc if allocStr == stdinSelector || envStr == stdinSelector || txStr == stdinSelector { decoder := json.NewDecoder(os.Stdin) - decoder.Decode(inputData) + if err := decoder.Decode(inputData); err != nil { + return NewError(ErrorJson, fmt.Errorf("failed unmarshaling stdin: %v", err)) + } } if allocStr != stdinSelector { inFile, err := os.Open(allocStr) @@ -148,10 +157,12 @@ func Main(ctx *cli.Context) error { defer inFile.Close() decoder := json.NewDecoder(inFile) if err := decoder.Decode(&inputData.Alloc); err != nil { - return NewError(ErrorJson, fmt.Errorf("Failed unmarshaling alloc-file: %v", err)) + return NewError(ErrorJson, fmt.Errorf("failed unmarshaling alloc-file: %v", err)) } } + prestate.Pre = inputData.Alloc + // Set the block environment if envStr != stdinSelector { inFile, err := os.Open(envStr) if err != nil { @@ -161,30 +172,12 @@ func Main(ctx *cli.Context) error { decoder := json.NewDecoder(inFile) var env stEnv if err := decoder.Decode(&env); err != nil { - return NewError(ErrorJson, fmt.Errorf("Failed unmarshaling env-file: %v", err)) + return NewError(ErrorJson, fmt.Errorf("failed unmarshaling env-file: %v", err)) } inputData.Env = &env } - - if txStr != stdinSelector { - inFile, err := os.Open(txStr) - if err != nil { - return NewError(ErrorIO, fmt.Errorf("failed reading txs file: %v", err)) - } - defer inFile.Close() - decoder := json.NewDecoder(inFile) - var txs types.Transactions - if err := decoder.Decode(&txs); err != nil { - return NewError(ErrorJson, fmt.Errorf("Failed unmarshaling txs-file: %v", err)) - } - inputData.Txs = txs - } - - prestate.Pre = inputData.Alloc prestate.Env = *inputData.Env - txs = inputData.Txs - // Iterate over all the tests, run them and aggregate the results vmConfig := vm.Config{ Tracer: tracer, Debug: (tracer != nil), @@ -192,7 +185,7 @@ func Main(ctx *cli.Context) error { // Construct the chainconfig var chainConfig *params.ChainConfig if cConf, extraEips, err := tests.GetChainConfig(ctx.String(ForknameFlag.Name)); err != nil { - return NewError(ErrorVMConfig, fmt.Errorf("Failed constructing chain configuration: %v", err)) + return NewError(ErrorVMConfig, fmt.Errorf("failed constructing chain configuration: %v", err)) } else { chainConfig = cConf vmConfig.ExtraEips = extraEips @@ -200,17 +193,140 @@ func Main(ctx *cli.Context) error { // Set the chain id chainConfig.ChainID = big.NewInt(ctx.Int64(ChainIDFlag.Name)) + var txsWithKeys []*txWithKey + if txStr != stdinSelector { + inFile, err := os.Open(txStr) + if err != nil { + return NewError(ErrorIO, fmt.Errorf("failed reading txs file: %v", err)) + } + defer inFile.Close() + decoder := json.NewDecoder(inFile) + if strings.HasSuffix(txStr, ".rlp") { + var body hexutil.Bytes + if err := decoder.Decode(&body); err != nil { + return err + } + var txs types.Transactions + if err := rlp.DecodeBytes(body, &txs); err != nil { + return err + } + for _, tx := range txs { + txsWithKeys = append(txsWithKeys, &txWithKey{ + key: nil, + tx: tx, + }) + } + } else { + if err := decoder.Decode(&txsWithKeys); err != nil { + return NewError(ErrorJson, fmt.Errorf("failed unmarshaling txs-file: %v", err)) + } + } + } else { + if len(inputData.TxRlp) > 0 { + // Decode the body of already signed transactions + body := common.FromHex(inputData.TxRlp) + var txs types.Transactions + if err := rlp.DecodeBytes(body, &txs); err != nil { + return err + } + for _, tx := range txs { + txsWithKeys = append(txsWithKeys, &txWithKey{ + key: nil, + tx: tx, + }) + } + } else { + // JSON encoded transactions + txsWithKeys = inputData.Txs + } + } + // We may have to sign the transactions. + signer := types.MakeSigner(chainConfig, big.NewInt(int64(prestate.Env.Number))) + + if txs, err = signUnsignedTransactions(txsWithKeys, signer); err != nil { + return NewError(ErrorJson, fmt.Errorf("failed signing transactions: %v", err)) + } + // Sanity check, to not `panic` in state_transition + if chainConfig.IsLondon(big.NewInt(int64(prestate.Env.Number))) { + if prestate.Env.BaseFee == nil { + return NewError(ErrorVMConfig, errors.New("EIP-1559 config but missing 'currentBaseFee' in env section")) + } + } // Run the test and aggregate the result - state, result, err := prestate.Apply(vmConfig, chainConfig, txs, ctx.Int64(RewardFlag.Name), getTracer) + s, result, err := prestate.Apply(vmConfig, chainConfig, txs, ctx.Int64(RewardFlag.Name), getTracer) if err != nil { return err } + body, _ := rlp.EncodeToBytes(txs) // Dump the excution result - //postAlloc := state.DumpGenesisFormat(false, false, false) collector := make(Alloc) - state.DumpToCollector(collector, false, false, false, nil, -1) - return dispatchOutput(ctx, baseDir, result, collector) + s.DumpToCollector(collector, nil) + return dispatchOutput(ctx, baseDir, result, collector, body) +} + +// txWithKey is a helper-struct, to allow us to use the types.Transaction along with +// a `secretKey`-field, for input +type txWithKey struct { + key *ecdsa.PrivateKey + tx *types.Transaction +} + +func (t *txWithKey) UnmarshalJSON(input []byte) error { + // Read the secretKey, if present + type sKey struct { + Key *common.Hash `json:"secretKey"` + } + var key sKey + if err := json.Unmarshal(input, &key); err != nil { + return err + } + if key.Key != nil { + k := key.Key.Hex()[2:] + if ecdsaKey, err := crypto.HexToECDSA(k); err != nil { + return err + } else { + t.key = ecdsaKey + } + } + // Now, read the transaction itself + var tx types.Transaction + if err := json.Unmarshal(input, &tx); err != nil { + return err + } + t.tx = &tx + return nil +} +// signUnsignedTransactions converts the input txs to canonical transactions. +// +// The transactions can have two forms, either +// 1. unsigned or +// 2. signed +// For (1), r, s, v, need so be zero, and the `secretKey` needs to be set. +// If so, we sign it here and now, with the given `secretKey` +// If the condition above is not met, then it's considered a signed transaction. +// +// To manage this, we read the transactions twice, first trying to read the secretKeys, +// and secondly to read them with the standard tx json format +func signUnsignedTransactions(txs []*txWithKey, signer types.Signer) (types.Transactions, error) { + var signedTxs []*types.Transaction + for i, txWithKey := range txs { + tx := txWithKey.tx + key := txWithKey.key + v, r, s := tx.RawSignatureValues() + if key != nil && v.BitLen()+r.BitLen()+s.BitLen() == 0 { + // This transaction needs to be signed + signed, err := types.SignTx(tx, signer, key) + if err != nil { + return nil, NewError(ErrorJson, fmt.Errorf("tx %d: failed to sign tx: %v", i, err)) + } + signedTxs = append(signedTxs, signed) + } else { + // Already signed + signedTxs = append(signedTxs, tx) + } + } + return signedTxs, nil } type Alloc map[common.Address]core.GenesisAccount @@ -227,7 +343,7 @@ func (g Alloc) OnAccount(addr common.Address, dumpAccount state.DumpAccount) { } } genesisAccount := core.GenesisAccount{ - Code: common.FromHex(dumpAccount.Code), + Code: dumpAccount.Code, Storage: storage, Balance: balance, Nonce: dumpAccount.Nonce, @@ -241,15 +357,17 @@ func saveFile(baseDir, filename string, data interface{}) error { if err != nil { return NewError(ErrorJson, fmt.Errorf("failed marshalling output: %v", err)) } - if err = ioutil.WriteFile(path.Join(baseDir, filename), b, 0644); err != nil { + location := path.Join(baseDir, filename) + if err = ioutil.WriteFile(location, b, 0644); err != nil { return NewError(ErrorIO, fmt.Errorf("failed writing output: %v", err)) } + log.Info("Wrote file", "file", location) return nil } // dispatchOutput writes the output data to either stderr or stdout, or to the specified // files -func dispatchOutput(ctx *cli.Context, baseDir string, result *ExecutionResult, alloc Alloc) error { +func dispatchOutput(ctx *cli.Context, baseDir string, result *ExecutionResult, alloc Alloc, body hexutil.Bytes) error { stdOutObject := make(map[string]interface{}) stdErrObject := make(map[string]interface{}) dispatch := func(baseDir, fName, name string, obj interface{}) error { @@ -258,6 +376,8 @@ func dispatchOutput(ctx *cli.Context, baseDir string, result *ExecutionResult, a stdOutObject[name] = obj case "stderr": stdErrObject[name] = obj + case "": + // don't save default: // save to file if err := saveFile(baseDir, fName, obj); err != nil { return err @@ -271,12 +391,16 @@ func dispatchOutput(ctx *cli.Context, baseDir string, result *ExecutionResult, a if err := dispatch(baseDir, ctx.String(OutputResultFlag.Name), "result", result); err != nil { return err } + if err := dispatch(baseDir, ctx.String(OutputBodyFlag.Name), "body", body); err != nil { + return err + } if len(stdOutObject) > 0 { b, err := json.MarshalIndent(stdOutObject, "", " ") if err != nil { return NewError(ErrorJson, fmt.Errorf("failed marshalling output: %v", err)) } os.Stdout.Write(b) + os.Stdout.Write([]byte("\n")) } if len(stdErrObject) > 0 { b, err := json.MarshalIndent(stdErrObject, "", " ") @@ -284,6 +408,7 @@ func dispatchOutput(ctx *cli.Context, baseDir string, result *ExecutionResult, a return NewError(ErrorJson, fmt.Errorf("failed marshalling output: %v", err)) } os.Stderr.Write(b) + os.Stderr.Write([]byte("\n")) } return nil } diff --git a/cmd/evm/main.go b/cmd/evm/main.go index 35c672142..b9c0d17f3 100644 --- a/cmd/evm/main.go +++ b/cmd/evm/main.go @@ -129,11 +129,6 @@ var ( Name: "noreturndata", Usage: "disable return data output", } - EVMInterpreterFlag = cli.StringFlag{ - Name: "vm.evm", - Usage: "External EVM configuration (default = built-in interpreter)", - Value: "", - } ) var stateTransitionCommand = cli.Command{ @@ -149,6 +144,7 @@ var stateTransitionCommand = cli.Command{ t8ntool.OutputBasedir, t8ntool.OutputAllocFlag, t8ntool.OutputResultFlag, + t8ntool.OutputBodyFlag, t8ntool.InputAllocFlag, t8ntool.InputEnvFlag, t8ntool.InputTxsFlag, @@ -184,7 +180,6 @@ func init() { DisableStackFlag, DisableStorageFlag, DisableReturnDataFlag, - EVMInterpreterFlag, } app.Commands = []cli.Command{ compileCommand, diff --git a/cmd/evm/poststate.json b/cmd/evm/poststate.json deleted file mode 100644 index 9ee17f18d..000000000 --- a/cmd/evm/poststate.json +++ /dev/null @@ -1,23 +0,0 @@ -{ - "root": "f4157bb27bcb1d1a63001434a249a80948f2e9fe1f53d551244c1dae826b5b23", - "accounts": { - "0x8a8eafb1cf62bfbeb1741769dae1a9dd47996192": { - "balance": "4276951709", - "nonce": 1, - "root": "56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421", - "codeHash": "c5d2460186f7233c927e7db2dcc703c0e500b653ca82273b7bfad8045d85a470" - }, - "0xa94f5374fce5edbc8e2a8697c15331677e6ebf0b": { - "balance": "6916764286133345652", - "nonce": 172, - "root": "56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421", - "codeHash": "c5d2460186f7233c927e7db2dcc703c0e500b653ca82273b7bfad8045d85a470" - }, - "0xc94f5374fce5edbc8e2a8697c15331677e6ebf0b": { - "balance": "42500", - "nonce": 0, - "root": "56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421", - "codeHash": "c5d2460186f7233c927e7db2dcc703c0e500b653ca82273b7bfad8045d85a470" - } - } -} \ No newline at end of file diff --git a/cmd/evm/runner.go b/cmd/evm/runner.go index d0be6ca1e..e409d2692 100644 --- a/cmd/evm/runner.go +++ b/cmd/evm/runner.go @@ -38,7 +38,7 @@ import ( "github.com/ethereum/go-ethereum/core/vm/runtime" "github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/params" - cli "gopkg.in/urfave/cli.v1" + "gopkg.in/urfave/cli.v1" ) var runCommand = cli.Command{ @@ -211,9 +211,8 @@ func runCmd(ctx *cli.Context) error { Coinbase: genesisConfig.Coinbase, BlockNumber: new(big.Int).SetUint64(genesisConfig.Number), EVMConfig: vm.Config{ - Tracer: tracer, - Debug: ctx.GlobalBool(DebugFlag.Name) || ctx.GlobalBool(MachineFlag.Name), - EVMInterpreter: ctx.GlobalString(EVMInterpreterFlag.Name), + Tracer: tracer, + Debug: ctx.GlobalBool(DebugFlag.Name) || ctx.GlobalBool(MachineFlag.Name), }, } @@ -270,7 +269,7 @@ func runCmd(ctx *cli.Context) error { if ctx.GlobalBool(DumpFlag.Name) { statedb.Commit(true) statedb.IntermediateRoot(true) - fmt.Println(string(statedb.Dump(false, false, true))) + fmt.Println(string(statedb.Dump(nil))) } if memProfilePath := ctx.GlobalString(MemProfileFlag.Name); memProfilePath != "" { diff --git a/cmd/evm/staterunner.go b/cmd/evm/staterunner.go index f9a6b06b8..d8bc4eae8 100644 --- a/cmd/evm/staterunner.go +++ b/cmd/evm/staterunner.go @@ -28,7 +28,7 @@ import ( "github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/tests" - cli "gopkg.in/urfave/cli.v1" + "gopkg.in/urfave/cli.v1" ) var stateTestCommand = cli.Command{ @@ -98,16 +98,16 @@ func stateTestCmd(ctx *cli.Context) error { for _, st := range test.Subtests() { // Run the test and aggregate the result result := &StatetestResult{Name: key, Fork: st.Fork, Pass: true} - _, state, err := test.Run(st, cfg, false) + _, s, err := test.Run(st, cfg, false) // print state root for evmlab tracing - if ctx.GlobalBool(MachineFlag.Name) && state != nil { - fmt.Fprintf(os.Stderr, "{\"stateRoot\": \"%x\"}\n", state.IntermediateRoot(false)) + if ctx.GlobalBool(MachineFlag.Name) && s != nil { + fmt.Fprintf(os.Stderr, "{\"stateRoot\": \"%x\"}\n", s.IntermediateRoot(false)) } if err != nil { // Test failed, mark as so and dump any state to aid debugging result.Pass, result.Error = false, err.Error() - if ctx.GlobalBool(DumpFlag.Name) && state != nil { - dump := state.RawDump(false, false, true) + if ctx.GlobalBool(DumpFlag.Name) && s != nil { + dump := s.RawDump(nil) result.State = &dump } } diff --git a/cmd/evm/testdata/10/alloc.json b/cmd/evm/testdata/10/alloc.json new file mode 100644 index 000000000..6e98e7513 --- /dev/null +++ b/cmd/evm/testdata/10/alloc.json @@ -0,0 +1,23 @@ +{ + "0x1111111111111111111111111111111111111111" : { + "balance" : "0x010000000000", + "code" : "0xfe", + "nonce" : "0x01", + "storage" : { + } + }, + "0xa94f5374fce5edbc8e2a8697c15331677e6ebf0b" : { + "balance" : "0x010000000000", + "code" : "0x", + "nonce" : "0x01", + "storage" : { + } + }, + "0xd02d72e067e77158444ef2020ff2d325f929b363" : { + "balance" : "0x01000000000000", + "code" : "0x", + "nonce" : "0x01", + "storage" : { + } + } +} \ No newline at end of file diff --git a/cmd/evm/testdata/10/env.json b/cmd/evm/testdata/10/env.json new file mode 100644 index 000000000..3a82d46a7 --- /dev/null +++ b/cmd/evm/testdata/10/env.json @@ -0,0 +1,12 @@ +{ + "currentCoinbase" : "0x2adc25665018aa1fe0e6bc666dac8fc2697ff9ba", + "currentDifficulty" : "0x020000", + "currentNumber" : "0x01", + "currentTimestamp" : "0x079e", + "previousHash" : "0xcb23ee65a163121f640673b41788ee94633941405f95009999b502eedfbbfd4f", + "currentGasLimit" : "0x40000000", + "currentBaseFee" : "0x036b", + "blockHashes" : { + "0" : "0xcb23ee65a163121f640673b41788ee94633941405f95009999b502eedfbbfd4f" + } +} \ No newline at end of file diff --git a/cmd/evm/testdata/10/readme.md b/cmd/evm/testdata/10/readme.md new file mode 100644 index 000000000..c34be80bb --- /dev/null +++ b/cmd/evm/testdata/10/readme.md @@ -0,0 +1,79 @@ +## EIP-1559 testing + +This test contains testcases for EIP-1559, which were reported by Ori as misbehaving. + +``` +[user@work evm]$ dir=./testdata/10 && ./evm t8n --state.fork=London --input.alloc=$dir/alloc.json --input.txs=$dir/txs.json --input.env=$dir/env.json --output.alloc=stdout --output.result=stdout 2>&1 +INFO [05-09|22:11:59.436] rejected tx index=3 hash=db07bf..ede1e8 from=0xd02d72E067e77158444ef2020Ff2d325f929B363 error="gas limit reached" +``` +Output: +```json +{ + "alloc": { + "0x1111111111111111111111111111111111111111": { + "code": "0xfe", + "balance": "0x10000000000", + "nonce": "0x1" + }, + "0xa94f5374fce5edbc8e2a8697c15331677e6ebf0b": { + "balance": "0x10000000000", + "nonce": "0x1" + }, + "0xd02d72e067e77158444ef2020ff2d325f929b363": { + "balance": "0xff5beffffc95", + "nonce": "0x4" + } + }, + "result": { + "stateRoot": "0xf91a7ec08e4bfea88719aab34deabb000c86902360532b52afa9599d41f2bb8b", + "txRoot": "0xda925f2306a52fa24c15d5cd212d736ee016415fd8dd0c45fd368de7917d64bb", + "receiptRoot": "0x439a25f7fc424c10fb1f89800e4aa1df74156b137239d9ac3eaa7c911c353cd5", + "logsHash": "0x1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347", + "logsBloom": "0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000", + "receipts": [ + { + "type": "0x2", + "root": "0x", + "status": "0x0", + "cumulativeGasUsed": "0x10000001", + "logsBloom": "0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000", + "logs": null, + "transactionHash": "0x88980f6efcc5358d9c359663e7b9414722d430497637340ea056b076bc206701", + "contractAddress": "0x0000000000000000000000000000000000000000", + "gasUsed": "0x10000001", + "blockHash": "0x0000000000000000000000000000000000000000000000000000000000000000", + "transactionIndex": "0x0" + }, + { + "type": "0x2", + "root": "0x", + "status": "0x0", + "cumulativeGasUsed": "0x20000001", + "logsBloom": "0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000", + "logs": null, + "transactionHash": "0xd7bf3886f4e2aef74d525ae072c680f3846f550254401b67cbfda4a233757582", + "contractAddress": "0x0000000000000000000000000000000000000000", + "gasUsed": "0x10000000", + "blockHash": "0x0000000000000000000000000000000000000000000000000000000000000000", + "transactionIndex": "0x1" + }, + { + "type": "0x2", + "root": "0x", + "status": "0x0", + "cumulativeGasUsed": "0x30000001", + "logsBloom": "0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000", + "logs": null, + "transactionHash": "0x50308296760f01f1eeec7500e9e73cad67469249b1f59e9a9f55e6625a4923db", + "contractAddress": "0x0000000000000000000000000000000000000000", + "gasUsed": "0x10000000", + "blockHash": "0x0000000000000000000000000000000000000000000000000000000000000000", + "transactionIndex": "0x2" + } + ], + "rejected": [ + 3 + ] + } +} +``` diff --git a/cmd/evm/testdata/10/txs.json b/cmd/evm/testdata/10/txs.json new file mode 100644 index 000000000..f7c9baa26 --- /dev/null +++ b/cmd/evm/testdata/10/txs.json @@ -0,0 +1,70 @@ +[ + { + "input" : "0x", + "gas" : "0x10000001", + "nonce" : "0x1", + "to" : "0x1111111111111111111111111111111111111111", + "value" : "0x0", + "v" : "0x0", + "r" : "0x7a45f00bcde9036b026cdf1628b023cd8a31a95c62b5e4dbbee2fa7debe668fb", + "s" : "0x3cc9d6f2cd00a045b0263f2d6dad7d60938d5d13d061af4969f95928aa934d4a", + "secretKey" : "0x41f6e321b31e72173f8ff2e292359e1862f24fba42fe6f97efaf641980eff298", + "chainId" : "0x1", + "type" : "0x2", + "maxFeePerGas" : "0xfa0", + "maxPriorityFeePerGas" : "0x0", + "accessList" : [ + ] + }, + { + "input" : "0x", + "gas" : "0x10000000", + "nonce" : "0x2", + "to" : "0x1111111111111111111111111111111111111111", + "value" : "0x0", + "v" : "0x0", + "r" : "0x4c564b94b0281a8210eeec2dd1fe2e16ff1c1903a8c3a1078d735d7f8208b2af", + "s" : "0x56432b2593e6de95db1cb997b7385217aca03f1615327e231734446b39f266d", + "secretKey" : "0x41f6e321b31e72173f8ff2e292359e1862f24fba42fe6f97efaf641980eff298", + "chainId" : "0x1", + "type" : "0x2", + "maxFeePerGas" : "0xfa0", + "maxPriorityFeePerGas" : "0x0", + "accessList" : [ + ] + }, + { + "input" : "0x", + "gas" : "0x10000000", + "nonce" : "0x3", + "to" : "0x1111111111111111111111111111111111111111", + "value" : "0x0", + "v" : "0x0", + "r" : "0x2ed2ef52f924f59d4a21e1f2a50d3b1109303ce5e32334a7ece9b46f4fbc2a57", + "s" : "0x2980257129cbd3da987226f323d50ba3975a834d165e0681f991b75615605c44", + "secretKey" : "0x41f6e321b31e72173f8ff2e292359e1862f24fba42fe6f97efaf641980eff298", + "chainId" : "0x1", + "type" : "0x2", + "maxFeePerGas" : "0xfa0", + "maxPriorityFeePerGas" : "0x0", + "accessList" : [ + ] + }, + { + "input" : "0x", + "gas" : "0x10000000", + "nonce" : "0x4", + "to" : "0x1111111111111111111111111111111111111111", + "value" : "0x0", + "v" : "0x0", + "r" : "0x5df7d7f8f8e15b36fc9f189cacb625040fad10398d08fc90812595922a2c49b2", + "s" : "0x565fc1803f77a84d754ffe3c5363ab54a8d93a06ea1bb9d4c73c73a282b35917", + "secretKey" : "0x41f6e321b31e72173f8ff2e292359e1862f24fba42fe6f97efaf641980eff298", + "chainId" : "0x1", + "type" : "0x2", + "maxFeePerGas" : "0xfa0", + "maxPriorityFeePerGas" : "0x0", + "accessList" : [ + ] + } +] \ No newline at end of file diff --git a/cmd/evm/testdata/11/alloc.json b/cmd/evm/testdata/11/alloc.json new file mode 100644 index 000000000..86938230f --- /dev/null +++ b/cmd/evm/testdata/11/alloc.json @@ -0,0 +1,25 @@ +{ + "0x0f572e5295c57f15886f9b263e2f6d2d6c7b5ec6" : { + "balance" : "0x0de0b6b3a7640000", + "code" : "0x61ffff5060046000f3", + "nonce" : "0x01", + "storage" : { + } + }, + "0xa94f5374fce5edbc8e2a8697c15331677e6ebf0b" : { + "balance" : "0x0de0b6b3a7640000", + "code" : "0x", + "nonce" : "0x00", + "storage" : { + "0x00" : "0x00" + } + }, + "0xb94f5374fce5edbc8e2a8697c15331677e6ebf0b" : { + "balance" : "0x00", + "code" : "0x6001600055", + "nonce" : "0x00", + "storage" : { + } + } +} + diff --git a/cmd/evm/testdata/11/env.json b/cmd/evm/testdata/11/env.json new file mode 100644 index 000000000..37dedf094 --- /dev/null +++ b/cmd/evm/testdata/11/env.json @@ -0,0 +1,12 @@ +{ + "currentCoinbase" : "0x2adc25665018aa1fe0e6bc666dac8fc2697ff9ba", + "currentDifficulty" : "0x020000", + "currentNumber" : "0x01", + "currentTimestamp" : "0x03e8", + "previousHash" : "0xfda4419b3660e99f37e536dae1ab081c180136bb38c837a93e93d9aab58553b2", + "currentGasLimit" : "0x0f4240", + "blockHashes" : { + "0" : "0xfda4419b3660e99f37e536dae1ab081c180136bb38c837a93e93d9aab58553b2" + } +} + diff --git a/cmd/evm/testdata/11/readme.md b/cmd/evm/testdata/11/readme.md new file mode 100644 index 000000000..d499f8e99 --- /dev/null +++ b/cmd/evm/testdata/11/readme.md @@ -0,0 +1,13 @@ +## Test missing basefee + +In this test, the `currentBaseFee` is missing from the env portion. +On a live blockchain, the basefee is present in the header, and verified as part of header validation. + +In `evm t8n`, we don't have blocks, so it needs to be added in the `env`instead. + +When it's missing, an error is expected. + +``` +dir=./testdata/11 && ./evm t8n --state.fork=London --input.alloc=$dir/alloc.json --input.txs=$dir/txs.json --input.env=$dir/env.json --output.alloc=stdout --output.result=stdout 2>&1>/dev/null +ERROR(3): EIP-1559 config but missing 'currentBaseFee' in env section +``` \ No newline at end of file diff --git a/cmd/evm/testdata/11/txs.json b/cmd/evm/testdata/11/txs.json new file mode 100644 index 000000000..c54b0a1f5 --- /dev/null +++ b/cmd/evm/testdata/11/txs.json @@ -0,0 +1,14 @@ +[ + { + "input" : "0x38600060013960015160005560006000f3", + "gas" : "0x61a80", + "gasPrice" : "0x1", + "nonce" : "0x0", + "value" : "0x186a0", + "v" : "0x1c", + "r" : "0x2e1391fd903387f1cc2b51df083805fb4bbb0d4710a2cdf4a044d191ff7be63e", + "s" : "0x7f10a933c42ab74927db02b1db009e923d9d2ab24ac24d63c399f2fe5d9c9b22", + "secretKey" : "0x45a915e4d060149eb4365960e6a7a45f334393093061116b197e3240065ff2d8" + } +] + diff --git a/cmd/evm/testdata/12/alloc.json b/cmd/evm/testdata/12/alloc.json new file mode 100644 index 000000000..3ed96894f --- /dev/null +++ b/cmd/evm/testdata/12/alloc.json @@ -0,0 +1,11 @@ +{ + "0xa94f5374fce5edbc8e2a8697c15331677e6ebf0b" : { + "balance" : "84000000", + "code" : "0x", + "nonce" : "0x00", + "storage" : { + "0x00" : "0x00" + } + } +} + diff --git a/cmd/evm/testdata/12/env.json b/cmd/evm/testdata/12/env.json new file mode 100644 index 000000000..8ae546536 --- /dev/null +++ b/cmd/evm/testdata/12/env.json @@ -0,0 +1,10 @@ +{ + "currentCoinbase" : "0x2adc25665018aa1fe0e6bc666dac8fc2697ff9ba", + "currentDifficulty" : "0x020000", + "currentNumber" : "0x01", + "currentTimestamp" : "0x03e8", + "previousHash" : "0xfda4419b3660e99f37e536dae1ab081c180136bb38c837a93e93d9aab58553b2", + "currentGasLimit" : "0x0f4240", + "currentBaseFee" : "0x20" +} + diff --git a/cmd/evm/testdata/12/readme.md b/cmd/evm/testdata/12/readme.md new file mode 100644 index 000000000..b0177ecc2 --- /dev/null +++ b/cmd/evm/testdata/12/readme.md @@ -0,0 +1,40 @@ +## Test 1559 balance + gasCap + +This test contains an EIP-1559 consensus issue which happened on Ropsten, where +`geth` did not properly account for the value transfer while doing the check on `max_fee_per_gas * gas_limit`. + +Before the issue was fixed, this invocation allowed the transaction to pass into a block: +``` +dir=./testdata/12 && ./evm t8n --state.fork=London --input.alloc=$dir/alloc.json --input.txs=$dir/txs.json --input.env=$dir/env.json --output.alloc=stdout --output.result=stdout +``` + +With the fix applied, the result is: +``` +dir=./testdata/12 && ./evm t8n --state.fork=London --input.alloc=$dir/alloc.json --input.txs=$dir/txs.json --input.env=$dir/env.json --output.alloc=stdout --output.result=stdout +INFO [07-21|19:03:50.276] rejected tx index=0 hash=ccc996..d83435 from=0xa94f5374Fce5edBC8E2a8697C15331677e6EbF0B error="insufficient funds for gas * price + value: address 0xa94f5374Fce5edBC8E2a8697C15331677e6EbF0B have 84000000 want 84000032" +INFO [07-21|19:03:50.276] Trie dumping started root=e05f81..6597a5 +INFO [07-21|19:03:50.276] Trie dumping complete accounts=1 elapsed="39.549µs" +{ + "alloc": { + "0xa94f5374fce5edbc8e2a8697c15331677e6ebf0b": { + "balance": "0x501bd00" + } + }, + "result": { + "stateRoot": "0xe05f81f8244a76503ceec6f88abfcd03047a612a1001217f37d30984536597a5", + "txRoot": "0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421", + "receiptRoot": "0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421", + "logsHash": "0x1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347", + "logsBloom": "0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000", + "receipts": [], + "rejected": [ + { + "index": 0, + "error": "insufficient funds for gas * price + value: address 0xa94f5374Fce5edBC8E2a8697C15331677e6EbF0B have 84000000 want 84000032" + } + ] + } +} +``` + +The transaction is rejected. \ No newline at end of file diff --git a/cmd/evm/testdata/12/txs.json b/cmd/evm/testdata/12/txs.json new file mode 100644 index 000000000..cd683f271 --- /dev/null +++ b/cmd/evm/testdata/12/txs.json @@ -0,0 +1,20 @@ +[ + { + "input" : "0x", + "gas" : "0x5208", + "nonce" : "0x0", + "to" : "0x1111111111111111111111111111111111111111", + "value" : "0x20", + "v" : "0x0", + "r" : "0x0", + "s" : "0x0", + "secretKey" : "0x45a915e4d060149eb4365960e6a7a45f334393093061116b197e3240065ff2d8", + "chainId" : "0x1", + "type" : "0x2", + "maxFeePerGas" : "0xfa0", + "maxPriorityFeePerGas" : "0x20", + "accessList" : [ + ] + } +] + diff --git a/cmd/evm/testdata/13/alloc.json b/cmd/evm/testdata/13/alloc.json new file mode 100644 index 000000000..6e98e7513 --- /dev/null +++ b/cmd/evm/testdata/13/alloc.json @@ -0,0 +1,23 @@ +{ + "0x1111111111111111111111111111111111111111" : { + "balance" : "0x010000000000", + "code" : "0xfe", + "nonce" : "0x01", + "storage" : { + } + }, + "0xa94f5374fce5edbc8e2a8697c15331677e6ebf0b" : { + "balance" : "0x010000000000", + "code" : "0x", + "nonce" : "0x01", + "storage" : { + } + }, + "0xd02d72e067e77158444ef2020ff2d325f929b363" : { + "balance" : "0x01000000000000", + "code" : "0x", + "nonce" : "0x01", + "storage" : { + } + } +} \ No newline at end of file diff --git a/cmd/evm/testdata/13/env.json b/cmd/evm/testdata/13/env.json new file mode 100644 index 000000000..3a82d46a7 --- /dev/null +++ b/cmd/evm/testdata/13/env.json @@ -0,0 +1,12 @@ +{ + "currentCoinbase" : "0x2adc25665018aa1fe0e6bc666dac8fc2697ff9ba", + "currentDifficulty" : "0x020000", + "currentNumber" : "0x01", + "currentTimestamp" : "0x079e", + "previousHash" : "0xcb23ee65a163121f640673b41788ee94633941405f95009999b502eedfbbfd4f", + "currentGasLimit" : "0x40000000", + "currentBaseFee" : "0x036b", + "blockHashes" : { + "0" : "0xcb23ee65a163121f640673b41788ee94633941405f95009999b502eedfbbfd4f" + } +} \ No newline at end of file diff --git a/cmd/evm/testdata/13/readme.md b/cmd/evm/testdata/13/readme.md new file mode 100644 index 000000000..64f52fc9a --- /dev/null +++ b/cmd/evm/testdata/13/readme.md @@ -0,0 +1,4 @@ +## Input transactions in RLP form + +This testdata folder is used to examplify how transaction input can be provided in rlp form. +Please see the README in `evm` folder for how this is performed. \ No newline at end of file diff --git a/cmd/evm/testdata/13/txs.json b/cmd/evm/testdata/13/txs.json new file mode 100644 index 000000000..c45ef1e13 --- /dev/null +++ b/cmd/evm/testdata/13/txs.json @@ -0,0 +1,34 @@ +[ + { + "input" : "0x", + "gas" : "0x84d0", + "nonce" : "0x1", + "to" : "0x1111111111111111111111111111111111111111", + "value" : "0x0", + "v" : "0x0", + "r" : "0x0", + "s" : "0x0", + "secretKey" : "0x41f6e321b31e72173f8ff2e292359e1862f24fba42fe6f97efaf641980eff298", + "chainId" : "0x1", + "type" : "0x2", + "maxFeePerGas" : "0xfa0", + "maxPriorityFeePerGas" : "0x0", + "accessList" : [] + }, + { + "input" : "0x", + "gas" : "0x84d0", + "nonce" : "0x2", + "to" : "0x1111111111111111111111111111111111111111", + "value" : "0x0", + "v" : "0x0", + "r" : "0x0", + "s" : "0x0", + "secretKey" : "0x41f6e321b31e72173f8ff2e292359e1862f24fba42fe6f97efaf641980eff298", + "chainId" : "0x1", + "type" : "0x2", + "maxFeePerGas" : "0xfa0", + "maxPriorityFeePerGas" : "0x0", + "accessList" : [] + } +] \ No newline at end of file diff --git a/cmd/evm/testdata/8/alloc.json b/cmd/evm/testdata/8/alloc.json new file mode 100644 index 000000000..1d1b5f86c --- /dev/null +++ b/cmd/evm/testdata/8/alloc.json @@ -0,0 +1,11 @@ +{ + "0x000000000000000000000000000000000000aaaa": { + "balance": "0x03", + "code": "0x5854505854", + "nonce": "0x1" + }, + "0xa94f5374fce5edbc8e2a8697c15331677e6ebf0b": { + "balance": "0x100000", + "nonce": "0x00" + } +} \ No newline at end of file diff --git a/cmd/evm/testdata/8/env.json b/cmd/evm/testdata/8/env.json new file mode 100644 index 000000000..8b9193472 --- /dev/null +++ b/cmd/evm/testdata/8/env.json @@ -0,0 +1,7 @@ +{ + "currentCoinbase": "0x2adc25665018aa1fe0e6bc666dac8fc2697ff9ba", + "currentDifficulty": "0x20000", + "currentGasLimit": "0x1000000000", + "currentNumber": "0x1000000", + "currentTimestamp": "0x04" +} \ No newline at end of file diff --git a/cmd/evm/testdata/8/readme.md b/cmd/evm/testdata/8/readme.md new file mode 100644 index 000000000..e021cd7e2 --- /dev/null +++ b/cmd/evm/testdata/8/readme.md @@ -0,0 +1,63 @@ +## EIP-2930 testing + +This test contains testcases for EIP-2930, which uses transactions with access lists. + +### Prestate + +The alloc portion contains one contract (`0x000000000000000000000000000000000000aaaa`), containing the +following code: `0x5854505854`: `PC ;SLOAD; POP; PC; SLOAD`. + +Essentialy, this contract does `SLOAD(0)` and `SLOAD(3)`. + +The alloc also contains some funds on `0xa94f5374fce5edbc8e2a8697c15331677e6ebf0b`. + +## Transactions + +There are three transactions, each invokes the contract above. + +1. ACL-transaction, which contains some non-used slots +2. Regular transaction +3. ACL-transaction, which contains the slots `1` and `3` in `0x000000000000000000000000000000000000aaaa` + +## Execution + +Running it yields: +``` +dir=./testdata/8 && ./evm t8n --state.fork=Berlin --input.alloc=$dir/alloc.json --input.txs=$dir/txs.json --input.env=$dir/env.json --trace && cat trace-* | grep SLOAD +{"pc":1,"op":84,"gas":"0x484be","gasCost":"0x834","memory":"0x","memSize":0,"stack":["0x0"],"returnStack":[],"returnData":"0x","depth":1,"refund":0,"opName":"SLOAD","error":""} +{"pc":4,"op":84,"gas":"0x47c86","gasCost":"0x834","memory":"0x","memSize":0,"stack":["0x3"],"returnStack":[],"returnData":"0x","depth":1,"refund":0,"opName":"SLOAD","error":""} +{"pc":1,"op":84,"gas":"0x49cf6","gasCost":"0x834","memory":"0x","memSize":0,"stack":["0x0"],"returnStack":[],"returnData":"0x","depth":1,"refund":0,"opName":"SLOAD","error":""} +{"pc":4,"op":84,"gas":"0x494be","gasCost":"0x834","memory":"0x","memSize":0,"stack":["0x3"],"returnStack":[],"returnData":"0x","depth":1,"refund":0,"opName":"SLOAD","error":""} +{"pc":1,"op":84,"gas":"0x484be","gasCost":"0x64","memory":"0x","memSize":0,"stack":["0x0"],"returnStack":[],"returnData":"0x","depth":1,"refund":0,"opName":"SLOAD","error":""} +{"pc":4,"op":84,"gas":"0x48456","gasCost":"0x64","memory":"0x","memSize":0,"stack":["0x3"],"returnStack":[],"returnData":"0x","depth":1,"refund":0,"opName":"SLOAD","error":""} + +``` + +Simlarly, we can provide the input transactions via `stdin` instead of as file: + +``` +dir=./testdata/8 \ + && cat $dir/txs.json | jq "{txs: .}" \ + | ./evm t8n --state.fork=Berlin \ + --input.alloc=$dir/alloc.json \ + --input.txs=stdin \ + --input.env=$dir/env.json \ + --trace \ + && cat trace-* | grep SLOAD + +{"pc":1,"op":84,"gas":"0x484be","gasCost":"0x834","memory":"0x","memSize":0,"stack":["0x0"],"returnStack":[],"returnData":"0x","depth":1,"refund":0,"opName":"SLOAD","error":""} +{"pc":4,"op":84,"gas":"0x47c86","gasCost":"0x834","memory":"0x","memSize":0,"stack":["0x3"],"returnStack":[],"returnData":"0x","depth":1,"refund":0,"opName":"SLOAD","error":""} +{"pc":1,"op":84,"gas":"0x49cf6","gasCost":"0x834","memory":"0x","memSize":0,"stack":["0x0"],"returnStack":[],"returnData":"0x","depth":1,"refund":0,"opName":"SLOAD","error":""} +{"pc":4,"op":84,"gas":"0x494be","gasCost":"0x834","memory":"0x","memSize":0,"stack":["0x3"],"returnStack":[],"returnData":"0x","depth":1,"refund":0,"opName":"SLOAD","error":""} +{"pc":1,"op":84,"gas":"0x484be","gasCost":"0x64","memory":"0x","memSize":0,"stack":["0x0"],"returnStack":[],"returnData":"0x","depth":1,"refund":0,"opName":"SLOAD","error":""} +{"pc":4,"op":84,"gas":"0x48456","gasCost":"0x64","memory":"0x","memSize":0,"stack":["0x3"],"returnStack":[],"returnData":"0x","depth":1,"refund":0,"opName":"SLOAD","error":""} +``` + +If we try to execute it on older rules: +``` +dir=./testdata/8 && ./evm t8n --state.fork=Istanbul --input.alloc=$dir/alloc.json --input.txs=$dir/txs.json --input.env=$dir/env.json +INFO [01-21|23:21:51.265] rejected tx index=0 hash=d2818d..6ab3da error="tx type not supported" +INFO [01-21|23:21:51.265] rejected tx index=1 hash=26ea00..81c01b from=0xa94f5374Fce5edBC8E2a8697C15331677e6EbF0B error="nonce too high: address 0xa94f5374Fce5edBC8E2a8697C15331677e6EbF0B, tx: 1 state: 0" +INFO [01-21|23:21:51.265] rejected tx index=2 hash=698d01..369cee error="tx type not supported" +``` +Number `1` and `3` are not applicable, and therefore number `2` has wrong nonce. \ No newline at end of file diff --git a/cmd/evm/testdata/8/txs.json b/cmd/evm/testdata/8/txs.json new file mode 100644 index 000000000..35142ba23 --- /dev/null +++ b/cmd/evm/testdata/8/txs.json @@ -0,0 +1,58 @@ +[ + { + "gas": "0x4ef00", + "gasPrice": "0x1", + "chainId": "0x1", + "input": "0x", + "nonce": "0x0", + "to": "0x000000000000000000000000000000000000aaaa", + "value": "0x1", + "type" : "0x1", + "accessList": [ + {"address": "0x0000000000000000000000000000000000000000", + "storageKeys": [ + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000" + ] + } + ], + "v": "0x0", + "r": "0x0", + "s": "0x0", + "secretKey": "0x45a915e4d060149eb4365960e6a7a45f334393093061116b197e3240065ff2d8" + }, + { + "gas": "0x4ef00", + "gasPrice": "0x1", + "input": "0x", + "nonce": "0x1", + "to": "0x000000000000000000000000000000000000aaaa", + "value": "0x2", + "v": "0x0", + "r": "0x0", + "s": "0x0", + "secretKey": "0x45a915e4d060149eb4365960e6a7a45f334393093061116b197e3240065ff2d8" + }, + { + "gas": "0x4ef00", + "gasPrice": "0x1", + "chainId": "0x1", + "input": "0x", + "nonce": "0x2", + "to": "0x000000000000000000000000000000000000aaaa", + "value": "0x1", + "type" : "0x1", + "accessList": [ + {"address": "0x000000000000000000000000000000000000aaaa", + "storageKeys": [ + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000003" + ] + } + ], + "v": "0x0", + "r": "0x0", + "s": "0x0", + "secretKey": "0x45a915e4d060149eb4365960e6a7a45f334393093061116b197e3240065ff2d8" + } +] diff --git a/cmd/evm/testdata/9/alloc.json b/cmd/evm/testdata/9/alloc.json new file mode 100644 index 000000000..c14e38e84 --- /dev/null +++ b/cmd/evm/testdata/9/alloc.json @@ -0,0 +1,11 @@ +{ + "0x000000000000000000000000000000000000aaaa": { + "balance": "0x03", + "code": "0x58585454", + "nonce": "0x1" + }, + "0xa94f5374fce5edbc8e2a8697c15331677e6ebf0b": { + "balance": "0x100000000000000", + "nonce": "0x00" + } +} diff --git a/cmd/evm/testdata/9/env.json b/cmd/evm/testdata/9/env.json new file mode 100644 index 000000000..ec5164b99 --- /dev/null +++ b/cmd/evm/testdata/9/env.json @@ -0,0 +1,8 @@ +{ + "currentCoinbase": "0x2adc25665018aa1fe0e6bc666dac8fc2697ff9ba", + "currentDifficulty": "0x20000", + "currentGasTarget": "0x1000000000", + "currentBaseFee": "0x3B9ACA00", + "currentNumber": "0x1000000", + "currentTimestamp": "0x04" +} diff --git a/cmd/evm/testdata/9/readme.md b/cmd/evm/testdata/9/readme.md new file mode 100644 index 000000000..88f0f12aa --- /dev/null +++ b/cmd/evm/testdata/9/readme.md @@ -0,0 +1,75 @@ +## EIP-1559 testing + +This test contains testcases for EIP-1559, which uses an new transaction type and has a new block parameter. + +### Prestate + +The alloc portion contains one contract (`0x000000000000000000000000000000000000aaaa`), containing the +following code: `0x58585454`: `PC; PC; SLOAD; SLOAD`. + +Essentialy, this contract does `SLOAD(0)` and `SLOAD(1)`. + +The alloc also contains some funds on `0xa94f5374fce5edbc8e2a8697c15331677e6ebf0b`. + +## Transactions + +There are two transactions, each invokes the contract above. + +1. EIP-1559 ACL-transaction, which contains the `0x0` slot for `0xaaaa` +2. Legacy transaction + +## Execution + +Running it yields: +``` +$ dir=./testdata/9 && ./evm t8n --state.fork=London --input.alloc=$dir/alloc.json --input.txs=$dir/txs.json --input.env=$dir/env.json --trace && cat trace-* | grep SLOAD +{"pc":2,"op":84,"gas":"0x48c28","gasCost":"0x834","memory":"0x","memSize":0,"stack":["0x0","0x1"],"returnStack":null,"returnD +ata":"0x","depth":1,"refund":0,"opName":"SLOAD","error":""} +{"pc":3,"op":84,"gas":"0x483f4","gasCost":"0x64","memory":"0x","memSize":0,"stack":["0x0","0x0"],"returnStack":null,"returnDa +ta":"0x","depth":1,"refund":0,"opName":"SLOAD","error":""} +{"pc":2,"op":84,"gas":"0x49cf4","gasCost":"0x834","memory":"0x","memSize":0,"stack":["0x0","0x1"],"returnStack":null,"returnD +ata":"0x","depth":1,"refund":0,"opName":"SLOAD","error":""} +{"pc":3,"op":84,"gas":"0x494c0","gasCost":"0x834","memory":"0x","memSize":0,"stack":["0x0","0x0"],"returnStack":null,"returnD +ata":"0x","depth":1,"refund":0,"opName":"SLOAD","error":""} +``` + +We can also get the post-alloc: +``` +$ dir=./testdata/9 && ./evm t8n --state.fork=London --input.alloc=$dir/alloc.json --input.txs=$dir/txs.json --input.env=$dir/env.json --output.alloc=stdout +{ + "alloc": { + "0x000000000000000000000000000000000000aaaa": { + "code": "0x58585454", + "balance": "0x3", + "nonce": "0x1" + }, + "0x2adc25665018aa1fe0e6bc666dac8fc2697ff9ba": { + "balance": "0xbfc02677a000" + }, + "0xa94f5374fce5edbc8e2a8697c15331677e6ebf0b": { + "balance": "0xff104fcfea7800", + "nonce": "0x2" + } + } +} +``` + +If we try to execute it on older rules: +``` +dir=./testdata/9 && ./evm t8n --state.fork=Berlin --input.alloc=$dir/alloc.json --input.txs=$dir/txs.json --input.env=$dir/env.json --output.alloc=stdout +ERROR(10): Failed signing transactions: ERROR(10): Tx 0: failed to sign tx: transaction type not supported +``` + +It fails, due to the `evm t8n` cannot sign them in with the given signer. We can bypass that, however, +by feeding it presigned transactions, located in `txs_signed.json`. + +``` +dir=./testdata/9 && ./evm t8n --state.fork=Berlin --input.alloc=$dir/alloc.json --input.txs=$dir/txs_signed.json --input.env=$dir/env.json +INFO [05-07|12:28:42.072] rejected tx index=0 hash=b4821e..536819 error="transaction type not supported" +INFO [05-07|12:28:42.072] rejected tx index=1 hash=a9c6c6..fa4036 from=0xa94f5374Fce5edBC8E2a8697C15331677e6EbF0B error="nonce too high: address 0xa94f5374Fce5edBC8E2a8697C15331677e6EbF0B, tx: 1 state: 0" +INFO [05-07|12:28:42.073] Wrote file file=alloc.json +INFO [05-07|12:28:42.073] Wrote file file=result.json +``` + +Number `0` is not applicable, and therefore number `1` has wrong nonce, and both are rejected. + diff --git a/cmd/evm/testdata/9/txs.json b/cmd/evm/testdata/9/txs.json new file mode 100644 index 000000000..740abce07 --- /dev/null +++ b/cmd/evm/testdata/9/txs.json @@ -0,0 +1,37 @@ +[ + { + "gas": "0x4ef00", + "maxPriorityFeePerGas": "0x2", + "maxFeePerGas": "0x12A05F200", + "chainId": "0x1", + "input": "0x", + "nonce": "0x0", + "to": "0x000000000000000000000000000000000000aaaa", + "value": "0x0", + "type" : "0x2", + "accessList": [ + {"address": "0x000000000000000000000000000000000000aaaa", + "storageKeys": [ + "0x0000000000000000000000000000000000000000000000000000000000000000" + ] + } + ], + "v": "0x0", + "r": "0x0", + "s": "0x0", + "secretKey": "0x45a915e4d060149eb4365960e6a7a45f334393093061116b197e3240065ff2d8" + }, + { + "gas": "0x4ef00", + "gasPrice": "0x12A05F200", + "chainId": "0x1", + "input": "0x", + "nonce": "0x1", + "to": "0x000000000000000000000000000000000000aaaa", + "value": "0x0", + "v": "0x0", + "r": "0x0", + "s": "0x0", + "secretKey": "0x45a915e4d060149eb4365960e6a7a45f334393093061116b197e3240065ff2d8" + } +] diff --git a/cmd/evm/transition-test.sh b/cmd/evm/transition-test.sh index 34c924985..250238d16 100644 --- a/cmd/evm/transition-test.sh +++ b/cmd/evm/transition-test.sh @@ -11,6 +11,8 @@ function showjson(){ function demo(){ echo "$ticks" echo "$1" + $1 + echo "" echo "$ticks" echo "" } @@ -152,9 +154,7 @@ echo "" echo "The \`BLOCKHASH\` opcode requires blockhashes to be provided by the caller, inside the \`env\`." echo "If a required blockhash is not provided, the exit code should be \`4\`:" echo "Example where blockhashes are provided: " -cmd="./evm t8n --input.alloc=./testdata/3/alloc.json --input.txs=./testdata/3/txs.json --input.env=./testdata/3/env.json --trace" -tick && echo $cmd && tick -$cmd 2>&1 >/dev/null +demo "./evm --verbosity=1 t8n --input.alloc=./testdata/3/alloc.json --input.txs=./testdata/3/txs.json --input.env=./testdata/3/env.json --trace" cmd="cat trace-0-0x72fadbef39cd251a437eea619cfeda752271a5faaaa2147df012e112159ffb81.jsonl | grep BLOCKHASH -C2" tick && echo $cmd && tick echo "$ticks" @@ -164,13 +164,11 @@ echo "" echo "In this example, the caller has not provided the required blockhash:" cmd="./evm t8n --input.alloc=./testdata/4/alloc.json --input.txs=./testdata/4/txs.json --input.env=./testdata/4/env.json --trace" -tick && echo $cmd && tick -tick -$cmd +tick && echo $cmd && $cmd errc=$? tick echo "Error code: $errc" - +echo "" echo "### Chaining" echo "" @@ -189,3 +187,28 @@ echo "" echo "In order to meaningfully chain invocations, one would need to provide meaningful new \`env\`, otherwise the" echo "actual blocknumber (exposed to the EVM) would not increase." echo "" + +echo "### Transactions in RLP form" +echo "" +echo "It is possible to provide already-signed transactions as input to, using an \`input.txs\` which ends with the \`rlp\` suffix." +echo "The input format for RLP-form transactions is _identical_ to the _output_ format for block bodies. Therefore, it's fully possible" +echo "to use the evm to go from \`json\` input to \`rlp\` input." +echo "" +echo "The following command takes **json** the transactions in \`./testdata/13/txs.json\` and signs them. After execution, they are output to \`signed_txs.rlp\`.:" +demo "./evm t8n --state.fork=London --input.alloc=./testdata/13/alloc.json --input.txs=./testdata/13/txs.json --input.env=./testdata/13/env.json --output.result=alloc_jsontx.json --output.body=signed_txs.rlp" +echo "The \`output.body\` is the rlp-list of transactions, encoded in hex and placed in a string a'la \`json\` encoding rules:" +demo "cat signed_txs.rlp" +echo "We can use \`rlpdump\` to check what the contents are: " +echo "$ticks" +echo "rlpdump -hex \$(cat signed_txs.rlp | jq -r )" +rlpdump -hex $(cat signed_txs.rlp | jq -r ) +echo "$ticks" +echo "Now, we can now use those (or any other already signed transactions), as input, like so: " +demo "./evm t8n --state.fork=London --input.alloc=./testdata/13/alloc.json --input.txs=./signed_txs.rlp --input.env=./testdata/13/env.json --output.result=alloc_rlptx.json" + +echo "You might have noticed that the results from these two invocations were stored in two separate files. " +echo "And we can now finally check that they match." +echo "$ticks" +echo "cat alloc_jsontx.json | jq .stateRoot && cat alloc_rlptx.json | jq .stateRoot" +cat alloc_jsontx.json | jq .stateRoot && cat alloc_rlptx.json | jq .stateRoot +echo "$ticks" diff --git a/cmd/faucet/README.md b/cmd/faucet/README.md new file mode 100644 index 000000000..364689a78 --- /dev/null +++ b/cmd/faucet/README.md @@ -0,0 +1,50 @@ +# Faucet + +The `faucet` is a simplistic web application with the goal of distributing small amounts of Ether in private and test networks. + +Users need to post their Ethereum addresses to fund in a Twitter status update or public Facebook post and share the link to the faucet. The faucet will in turn deduplicate user requests and send the Ether. After a funding round, the faucet prevents the same user requesting again for a pre-configured amount of time, proportional to the amount of Ether requested. + +## Operation + +The `faucet` is a single binary app (everything included) with all configurations set via command line flags and a few files. + +First thing's first, the `faucet` needs to connect to an Ethereum network, for which it needs the necessary genesis and network infos. Each of the following flags must be set: + +- `--genesis` is a path to a file containin the network `genesis.json` +- `--network` is the devp2p network id used during connection +- `--bootnodes` is a list of `enode://` ids to join the network through + +The `faucet` will use the `les` protocol to join the configured Ethereum network and will store its data in `$HOME/.faucet` (currently not configurable). + +## Funding + +To be able to distribute funds, the `faucet` needs access to an already funded Ethereum account. This can be configured via: + +- `--account.json` is a path to the Ethereum account's JSON key file +- `--account.pass` is a path to a text file with the decryption passphrase + +The faucet is able to distribute various amounts of Ether in exchange for various timeouts. These can be configured via: + +- `--faucet.amount` is the number of Ethers to send by default +- `--faucet.minutes` is the time to wait before allowing a rerequest +- `--faucet.tiers` is the funding tiers to support (x3 time, x2.5 funds) + +## Sybil protection + +To prevent the same user from exhausting funds in a loop, the `faucet` ties requests to social networks and captcha resolvers. + +Captcha protection uses Google's invisible ReCaptcha, thus the `faucet` needs to run on a live domain. The domain needs to be registered in Google's systems to retrieve the captcha API token and secrets. After doing so, captcha protection may be enabled via: + +- `--captcha.token` is the API token for ReCaptcha +- `--captcha.secret` is the API secret for ReCaptcha + +Sybil protection via Twitter requires an API key as of 15th December, 2020. To obtain it, a Twitter user must be upgraded to developer status and a new Twitter App deployed with it. The app's `Bearer` token is required by the faucet to retrieve tweet data: + +- `--twitter.token` is the Bearer token for `v2` API access +- `--twitter.token.v1` is the Bearer token for `v1` API access + +Sybil protection via Facebook uses the website to directly download post data thus does not currently require an API configuration. + +## Miscellaneous + +Beside the above - mostly essential - CLI flags, there are a number that can be used to fine tune the `faucet`'s operation. Please see `faucet --help` for a full list. \ No newline at end of file diff --git a/cmd/faucet/faucet.go b/cmd/faucet/faucet.go index 346c412ac..c502f67bc 100644 --- a/cmd/faucet/faucet.go +++ b/cmd/faucet/faucet.go @@ -14,7 +14,7 @@ // You should have received a copy of the GNU General Public License // along with go-ethereum. If not, see . -// faucet is a Ether faucet backed by a light client. +// faucet is an Ether faucet backed by a light client. package main //go:generate go-bindata -nometadata -o website.go faucet.html @@ -47,15 +47,14 @@ import ( "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/core" "github.com/ethereum/go-ethereum/core/types" - "github.com/ethereum/go-ethereum/eth" "github.com/ethereum/go-ethereum/eth/downloader" + "github.com/ethereum/go-ethereum/eth/ethconfig" "github.com/ethereum/go-ethereum/ethclient" "github.com/ethereum/go-ethereum/ethstats" "github.com/ethereum/go-ethereum/les" "github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/node" "github.com/ethereum/go-ethereum/p2p" - "github.com/ethereum/go-ethereum/p2p/discv5" "github.com/ethereum/go-ethereum/p2p/enode" "github.com/ethereum/go-ethereum/p2p/nat" "github.com/ethereum/go-ethereum/params" @@ -83,6 +82,12 @@ var ( noauthFlag = flag.Bool("noauth", false, "Enables funding requests without authentication") logFlag = flag.Int("loglevel", 3, "Log level to use for Ethereum and the faucet") + + twitterTokenFlag = flag.String("twitter.token", "", "Bearer token to authenticate with the v2 Twitter API") + twitterTokenV1Flag = flag.String("twitter.token.v1", "", "Bearer token to authenticate with the v1.1 Twitter API") + + goerliFlag = flag.Bool("goerli", false, "Initializes the faucet with Görli network config") + rinkebyFlag = flag.Bool("rinkeby", false, "Initializes the faucet with Rinkeby network config") ) var ( @@ -142,25 +147,22 @@ func main() { log.Crit("Failed to render the faucet template", "err", err) } // Load and parse the genesis block requested by the user - blob, err := ioutil.ReadFile(*genesisFlag) + genesis, err := getGenesis(genesisFlag, *goerliFlag, *rinkebyFlag) if err != nil { - log.Crit("Failed to read genesis block contents", "genesis", *genesisFlag, "err", err) - } - genesis := new(core.Genesis) - if err = json.Unmarshal(blob, genesis); err != nil { - log.Crit("Failed to parse genesis block json", "err", err) + log.Crit("Failed to parse genesis config", "err", err) } // Convert the bootnodes to internal enode representations - var enodes []*discv5.Node + var enodes []*enode.Node for _, boot := range strings.Split(*bootFlag, ",") { - if url, err := discv5.ParseNode(boot); err == nil { + if url, err := enode.Parse(enode.ValidSchemes, boot); err == nil { enodes = append(enodes, url) } else { log.Error("Failed to parse bootnode URL", "url", boot, "err", err) } } // Load up the account key and decrypt its password - if blob, err = ioutil.ReadFile(*accPassFlag); err != nil { + blob, err := ioutil.ReadFile(*accPassFlag) + if err != nil { log.Crit("Failed to read account password contents", "file", *accPassFlag, "err", err) } pass := strings.TrimSuffix(string(blob), "\n") @@ -210,7 +212,7 @@ type faucet struct { nonce uint64 // Current pending nonce of the faucet price *big.Int // Current gas price to issue funds with - conns []*websocket.Conn // Currently live websocket connections + conns []*wsConn // Currently live websocket connections timeouts map[string]time.Time // History of users and their funding timeouts reqs []*request // Currently pending funding requests update chan struct{} // Channel to signal request updates @@ -218,7 +220,14 @@ type faucet struct { lock sync.RWMutex // Lock protecting the faucet's internals } -func newFaucet(genesis *core.Genesis, port int, enodes []*discv5.Node, network uint64, stats string, ks *keystore.KeyStore, index []byte) (*faucet, error) { +// wsConn wraps a websocket connection with a write mutex as the underlying +// websocket library does not synchronize access to the stream. +type wsConn struct { + conn *websocket.Conn + wlock sync.Mutex +} + +func newFaucet(genesis *core.Genesis, port int, enodes []*enode.Node, network uint64, stats string, ks *keystore.KeyStore, index []byte) (*faucet, error) { // Assemble the raw devp2p protocol stack stack, err := node.New(&node.Config{ Name: "geth", @@ -238,11 +247,12 @@ func newFaucet(genesis *core.Genesis, port int, enodes []*discv5.Node, network u } // Assemble the Ethereum light client protocol - cfg := eth.DefaultConfig + cfg := ethconfig.Defaults cfg.SyncMode = downloader.LightSync cfg.NetworkId = network cfg.Genesis = genesis utils.SetDNSDiscoveryDefaults(&cfg, genesis.ToBlock(nil).Hash()) + lesBackend, err := les.New(stack, &cfg) if err != nil { return nil, fmt.Errorf("Failed to register the Ethereum service: %w", err) @@ -317,13 +327,14 @@ func (f *faucet) apiHandler(w http.ResponseWriter, r *http.Request) { defer conn.Close() f.lock.Lock() - f.conns = append(f.conns, conn) + wsconn := &wsConn{conn: conn} + f.conns = append(f.conns, wsconn) f.lock.Unlock() defer func() { f.lock.Lock() for i, c := range f.conns { - if c == conn { + if c.conn == conn { f.conns = append(f.conns[:i], f.conns[i+1:]...) break } @@ -351,7 +362,7 @@ func (f *faucet) apiHandler(w http.ResponseWriter, r *http.Request) { if head == nil || balance == nil { // Report the faucet offline until initial stats are ready //lint:ignore ST1005 This error is to be displayed in the browser - if err = sendError(conn, errors.New("Faucet offline")); err != nil { + if err = sendError(wsconn, errors.New("Faucet offline")); err != nil { log.Warn("Failed to send faucet error to client", "err", err) return } @@ -362,7 +373,7 @@ func (f *faucet) apiHandler(w http.ResponseWriter, r *http.Request) { f.lock.RLock() reqs := f.reqs f.lock.RUnlock() - if err = send(conn, map[string]interface{}{ + if err = send(wsconn, map[string]interface{}{ "funds": new(big.Int).Div(balance, ether), "funded": nonce, "peers": f.stack.Server().PeerCount(), @@ -371,7 +382,7 @@ func (f *faucet) apiHandler(w http.ResponseWriter, r *http.Request) { log.Warn("Failed to send initial stats to client", "err", err) return } - if err = send(conn, head, 3*time.Second); err != nil { + if err = send(wsconn, head, 3*time.Second); err != nil { log.Warn("Failed to send initial header to client", "err", err) return } @@ -386,9 +397,8 @@ func (f *faucet) apiHandler(w http.ResponseWriter, r *http.Request) { if err = conn.ReadJSON(&msg); err != nil { return } - if !*noauthFlag && !strings.HasPrefix(msg.URL, "https://gist.github.com/") && !strings.HasPrefix(msg.URL, "https://twitter.com/") && - !strings.HasPrefix(msg.URL, "https://plus.google.com/") && !strings.HasPrefix(msg.URL, "https://www.facebook.com/") { - if err = sendError(conn, errors.New("URL doesn't link to supported services")); err != nil { + if !*noauthFlag && !strings.HasPrefix(msg.URL, "https://twitter.com/") && !strings.HasPrefix(msg.URL, "https://www.facebook.com/") { + if err = sendError(wsconn, errors.New("URL doesn't link to supported services")); err != nil { log.Warn("Failed to send URL error to client", "err", err) return } @@ -396,7 +406,7 @@ func (f *faucet) apiHandler(w http.ResponseWriter, r *http.Request) { } if msg.Tier >= uint(*tiersFlag) { //lint:ignore ST1005 This error is to be displayed in the browser - if err = sendError(conn, errors.New("Invalid funding tier requested")); err != nil { + if err = sendError(wsconn, errors.New("Invalid funding tier requested")); err != nil { log.Warn("Failed to send tier error to client", "err", err) return } @@ -412,7 +422,7 @@ func (f *faucet) apiHandler(w http.ResponseWriter, r *http.Request) { res, err := http.PostForm("https://www.google.com/recaptcha/api/siteverify", form) if err != nil { - if err = sendError(conn, err); err != nil { + if err = sendError(wsconn, err); err != nil { log.Warn("Failed to send captcha post error to client", "err", err) return } @@ -425,7 +435,7 @@ func (f *faucet) apiHandler(w http.ResponseWriter, r *http.Request) { err = json.NewDecoder(res.Body).Decode(&result) res.Body.Close() if err != nil { - if err = sendError(conn, err); err != nil { + if err = sendError(wsconn, err); err != nil { log.Warn("Failed to send captcha decode error to client", "err", err) return } @@ -434,7 +444,7 @@ func (f *faucet) apiHandler(w http.ResponseWriter, r *http.Request) { if !result.Success { log.Warn("Captcha verification failed", "err", string(result.Errors)) //lint:ignore ST1005 it's funny and the robot won't mind - if err = sendError(conn, errors.New("Beep-bop, you're a robot!")); err != nil { + if err = sendError(wsconn, errors.New("Beep-bop, you're a robot!")); err != nil { log.Warn("Failed to send captcha failure to client", "err", err) return } @@ -443,36 +453,26 @@ func (f *faucet) apiHandler(w http.ResponseWriter, r *http.Request) { } // Retrieve the Ethereum address to fund, the requesting user and a profile picture var ( + id string username string avatar string address common.Address ) switch { - case strings.HasPrefix(msg.URL, "https://gist.github.com/"): - if err = sendError(conn, errors.New("GitHub authentication discontinued at the official request of GitHub")); err != nil { - log.Warn("Failed to send GitHub deprecation to client", "err", err) - return - } - continue - case strings.HasPrefix(msg.URL, "https://plus.google.com/"): - //lint:ignore ST1005 Google is a company name and should be capitalized. - if err = sendError(conn, errors.New("Google+ authentication discontinued as the service was sunset")); err != nil { - log.Warn("Failed to send Google+ deprecation to client", "err", err) - return - } - continue case strings.HasPrefix(msg.URL, "https://twitter.com/"): - username, avatar, address, err = authTwitter(msg.URL) + id, username, avatar, address, err = authTwitter(msg.URL, *twitterTokenV1Flag, *twitterTokenFlag) case strings.HasPrefix(msg.URL, "https://www.facebook.com/"): username, avatar, address, err = authFacebook(msg.URL) + id = username case *noauthFlag: username, avatar, address, err = authNoAuth(msg.URL) + id = username default: //lint:ignore ST1005 This error is to be displayed in the browser err = errors.New("Something funky happened, please open an issue at https://github.com/ethereum/go-ethereum/issues") } if err != nil { - if err = sendError(conn, err); err != nil { + if err = sendError(wsconn, err); err != nil { log.Warn("Failed to send prefix error to client", "err", err) return } @@ -486,7 +486,7 @@ func (f *faucet) apiHandler(w http.ResponseWriter, r *http.Request) { fund bool timeout time.Time ) - if timeout = f.timeouts[username]; time.Now().After(timeout) { + if timeout = f.timeouts[id]; time.Now().After(timeout) { // User wasn't funded recently, create the funding transaction amount := new(big.Int).Mul(big.NewInt(int64(*payoutFlag)), ether) amount = new(big.Int).Mul(amount, new(big.Int).Exp(big.NewInt(5), big.NewInt(int64(msg.Tier)), nil)) @@ -496,7 +496,7 @@ func (f *faucet) apiHandler(w http.ResponseWriter, r *http.Request) { signed, err := f.keystore.SignTx(f.account, tx, f.config.ChainID) if err != nil { f.lock.Unlock() - if err = sendError(conn, err); err != nil { + if err = sendError(wsconn, err); err != nil { log.Warn("Failed to send transaction creation error to client", "err", err) return } @@ -505,7 +505,7 @@ func (f *faucet) apiHandler(w http.ResponseWriter, r *http.Request) { // Submit the transaction and mark as funded if successful if err := f.client.SendTransaction(context.Background(), signed); err != nil { f.lock.Unlock() - if err = sendError(conn, err); err != nil { + if err = sendError(wsconn, err); err != nil { log.Warn("Failed to send transaction transmission error to client", "err", err) return } @@ -520,20 +520,20 @@ func (f *faucet) apiHandler(w http.ResponseWriter, r *http.Request) { timeout := time.Duration(*minutesFlag*int(math.Pow(3, float64(msg.Tier)))) * time.Minute grace := timeout / 288 // 24h timeout => 5m grace - f.timeouts[username] = time.Now().Add(timeout - grace) + f.timeouts[id] = time.Now().Add(timeout - grace) fund = true } f.lock.Unlock() // Send an error if too frequent funding, othewise a success if !fund { - if err = sendError(conn, fmt.Errorf("%s left until next allowance", common.PrettyDuration(time.Until(timeout)))); err != nil { // nolint: gosimple + if err = sendError(wsconn, fmt.Errorf("%s left until next allowance", common.PrettyDuration(time.Until(timeout)))); err != nil { // nolint: gosimple log.Warn("Failed to send funding error to client", "err", err) return } continue } - if err = sendSuccess(conn, fmt.Sprintf("Funding request accepted for %s into %s", username, address.Hex())); err != nil { + if err = sendSuccess(wsconn, fmt.Sprintf("Funding request accepted for %s into %s", username, address.Hex())); err != nil { log.Warn("Failed to send funding success to client", "err", err) return } @@ -626,12 +626,12 @@ func (f *faucet) loop() { "requests": f.reqs, }, time.Second); err != nil { log.Warn("Failed to send stats to client", "err", err) - conn.Close() + conn.conn.Close() continue } if err := send(conn, head, time.Second); err != nil { log.Warn("Failed to send header to client", "err", err) - conn.Close() + conn.conn.Close() } } f.lock.RUnlock() @@ -653,7 +653,7 @@ func (f *faucet) loop() { for _, conn := range f.conns { if err := send(conn, map[string]interface{}{"requests": f.reqs}, time.Second); err != nil { log.Warn("Failed to send requests to client", "err", err) - conn.Close() + conn.conn.Close() } } f.lock.RUnlock() @@ -663,44 +663,63 @@ func (f *faucet) loop() { // sends transmits a data packet to the remote end of the websocket, but also // setting a write deadline to prevent waiting forever on the node. -func send(conn *websocket.Conn, value interface{}, timeout time.Duration) error { +func send(conn *wsConn, value interface{}, timeout time.Duration) error { if timeout == 0 { timeout = 60 * time.Second } - conn.SetWriteDeadline(time.Now().Add(timeout)) - return conn.WriteJSON(value) + conn.wlock.Lock() + defer conn.wlock.Unlock() + conn.conn.SetWriteDeadline(time.Now().Add(timeout)) + return conn.conn.WriteJSON(value) } // sendError transmits an error to the remote end of the websocket, also setting // the write deadline to 1 second to prevent waiting forever. -func sendError(conn *websocket.Conn, err error) error { +func sendError(conn *wsConn, err error) error { return send(conn, map[string]string{"error": err.Error()}, time.Second) } // sendSuccess transmits a success message to the remote end of the websocket, also // setting the write deadline to 1 second to prevent waiting forever. -func sendSuccess(conn *websocket.Conn, msg string) error { +func sendSuccess(conn *wsConn, msg string) error { return send(conn, map[string]string{"success": msg}, time.Second) } // authTwitter tries to authenticate a faucet request using Twitter posts, returning -// the username, avatar URL and Ethereum address to fund on success. -func authTwitter(url string) (string, string, common.Address, error) { +// the uniqueness identifier (user id/username), username, avatar URL and Ethereum address to fund on success. +func authTwitter(url string, tokenV1, tokenV2 string) (string, string, string, common.Address, error) { // Ensure the user specified a meaningful URL, no fancy nonsense parts := strings.Split(url, "/") if len(parts) < 4 || parts[len(parts)-2] != "status" { //lint:ignore ST1005 This error is to be displayed in the browser - return "", "", common.Address{}, errors.New("Invalid Twitter status URL") - } - // Twitter's API isn't really friendly with direct links. Still, we don't - // want to do ask read permissions from users, so just load the public posts + return "", "", "", common.Address{}, errors.New("Invalid Twitter status URL") + } + // Strip any query parameters from the tweet id and ensure it's numeric + tweetID := strings.Split(parts[len(parts)-1], "?")[0] + if !regexp.MustCompile("^[0-9]+$").MatchString(tweetID) { + return "", "", "", common.Address{}, errors.New("Invalid Tweet URL") + } + // Twitter's API isn't really friendly with direct links. + // It is restricted to 300 queries / 15 minute with an app api key. + // Anything more will require read only authorization from the users and that we want to avoid. + + // If Twitter bearer token is provided, use the API, selecting the version + // the user would prefer (currently there's a limit of 1 v2 app / developer + // but unlimited v1.1 apps). + switch { + case tokenV1 != "": + return authTwitterWithTokenV1(tweetID, tokenV1) + case tokenV2 != "": + return authTwitterWithTokenV2(tweetID, tokenV2) + } + // Twiter API token isn't provided so we just load the public posts // and scrape it for the Ethereum address and profile URL. We need to load // the mobile page though since the main page loads tweet contents via JS. url = strings.Replace(url, "https://twitter.com/", "https://mobile.twitter.com/", 1) res, err := http.Get(url) if err != nil { - return "", "", common.Address{}, err + return "", "", "", common.Address{}, err } defer res.Body.Close() @@ -708,31 +727,115 @@ func authTwitter(url string) (string, string, common.Address, error) { parts = strings.Split(res.Request.URL.String(), "/") if len(parts) < 4 || parts[len(parts)-2] != "status" { //lint:ignore ST1005 This error is to be displayed in the browser - return "", "", common.Address{}, errors.New("Invalid Twitter status URL") + return "", "", "", common.Address{}, errors.New("Invalid Twitter status URL") } username := parts[len(parts)-3] body, err := ioutil.ReadAll(res.Body) if err != nil { - return "", "", common.Address{}, err + return "", "", "", common.Address{}, err } address := common.HexToAddress(string(regexp.MustCompile("0x[0-9a-fA-F]{40}").Find(body))) if address == (common.Address{}) { //lint:ignore ST1005 This error is to be displayed in the browser - return "", "", common.Address{}, errors.New("No Ethereum address found to fund") + return "", "", "", common.Address{}, errors.New("No Ethereum address found to fund") } var avatar string if parts = regexp.MustCompile("src=\"([^\"]+twimg.com/profile_images[^\"]+)\"").FindStringSubmatch(string(body)); len(parts) == 2 { avatar = parts[1] } - return username + "@twitter", avatar, address, nil + return username + "@twitter", username, avatar, address, nil +} + +// authTwitterWithTokenV1 tries to authenticate a faucet request using Twitter's v1 +// API, returning the user id, username, avatar URL and Ethereum address to fund on +// success. +func authTwitterWithTokenV1(tweetID string, token string) (string, string, string, common.Address, error) { + // Query the tweet details from Twitter + url := fmt.Sprintf("https://api.twitter.com/1.1/statuses/show.json?id=%s", tweetID) + req, err := http.NewRequest("GET", url, nil) + if err != nil { + return "", "", "", common.Address{}, err + } + req.Header.Set("Authorization", fmt.Sprintf("Bearer %s", token)) + res, err := http.DefaultClient.Do(req) + if err != nil { + return "", "", "", common.Address{}, err + } + defer res.Body.Close() + + var result struct { + Text string `json:"text"` + User struct { + ID string `json:"id_str"` + Username string `json:"screen_name"` + Avatar string `json:"profile_image_url"` + } `json:"user"` + } + err = json.NewDecoder(res.Body).Decode(&result) + if err != nil { + return "", "", "", common.Address{}, err + } + address := common.HexToAddress(regexp.MustCompile("0x[0-9a-fA-F]{40}").FindString(result.Text)) + if address == (common.Address{}) { + //lint:ignore ST1005 This error is to be displayed in the browser + return "", "", "", common.Address{}, errors.New("No Ethereum address found to fund") + } + return result.User.ID + "@twitter", result.User.Username, result.User.Avatar, address, nil +} + +// authTwitterWithTokenV2 tries to authenticate a faucet request using Twitter's v2 +// API, returning the user id, username, avatar URL and Ethereum address to fund on +// success. +func authTwitterWithTokenV2(tweetID string, token string) (string, string, string, common.Address, error) { + // Query the tweet details from Twitter + url := fmt.Sprintf("https://api.twitter.com/2/tweets/%s?expansions=author_id&user.fields=profile_image_url", tweetID) + req, err := http.NewRequest("GET", url, nil) + if err != nil { + return "", "", "", common.Address{}, err + } + req.Header.Set("Authorization", fmt.Sprintf("Bearer %s", token)) + res, err := http.DefaultClient.Do(req) + if err != nil { + return "", "", "", common.Address{}, err + } + defer res.Body.Close() + + var result struct { + Data struct { + AuthorID string `json:"author_id"` + Text string `json:"text"` + } `json:"data"` + Includes struct { + Users []struct { + ID string `json:"id"` + Username string `json:"username"` + Avatar string `json:"profile_image_url"` + } `json:"users"` + } `json:"includes"` + } + + err = json.NewDecoder(res.Body).Decode(&result) + if err != nil { + return "", "", "", common.Address{}, err + } + + address := common.HexToAddress(regexp.MustCompile("0x[0-9a-fA-F]{40}").FindString(result.Data.Text)) + if address == (common.Address{}) { + //lint:ignore ST1005 This error is to be displayed in the browser + return "", "", "", common.Address{}, errors.New("No Ethereum address found to fund") + } + return result.Data.AuthorID + "@twitter", result.Includes.Users[0].Username, result.Includes.Users[0].Avatar, address, nil } // authFacebook tries to authenticate a faucet request using Facebook posts, // returning the username, avatar URL and Ethereum address to fund on success. func authFacebook(url string) (string, string, common.Address, error) { // Ensure the user specified a meaningful URL, no fancy nonsense - parts := strings.Split(url, "/") + parts := strings.Split(strings.Split(url, "?")[0], "/") + if parts[len(parts)-1] == "" { + parts = parts[0 : len(parts)-1] + } if len(parts) < 4 || parts[len(parts)-2] != "posts" { //lint:ignore ST1005 This error is to be displayed in the browser return "", "", common.Address{}, errors.New("Invalid Facebook post URL") @@ -742,7 +845,13 @@ func authFacebook(url string) (string, string, common.Address, error) { // Facebook's Graph API isn't really friendly with direct links. Still, we don't // want to do ask read permissions from users, so just load the public posts and // scrape it for the Ethereum address and profile URL. - res, err := http.Get(url) + // + // Facebook recently changed their desktop webpage to use AJAX for loading post + // content, so switch over to the mobile site for now. Will probably end up having + // to use the API eventually. + crawl := strings.Replace(url, "www.facebook.com", "m.facebook.com", 1) + + res, err := http.Get(crawl) if err != nil { return "", "", common.Address{}, err } @@ -775,3 +884,15 @@ func authNoAuth(url string) (string, string, common.Address, error) { } return address.Hex() + "@noauth", "", address, nil } + +// getGenesis returns a genesis based on input args +func getGenesis(genesisFlag *string, goerliFlag bool, rinkebyFlag bool) (*core.Genesis, error) { + switch { + case genesisFlag != nil: + var genesis core.Genesis + err := common.LoadJSON(*genesisFlag, &genesis) + return &genesis, err + default: + return nil, fmt.Errorf("no genesis flag provided") + } +} diff --git a/cmd/faucet/faucet.html b/cmd/faucet/faucet.html index ba1433318..dad5ad84f 100644 --- a/cmd/faucet/faucet.html +++ b/cmd/faucet/faucet.html @@ -177,7 +177,7 @@

How does this work?

} // Iterate over our entire local collection and re-render the funding table var content = ""; - for (var i=0; i= 0; i--) { var done = requests[i].time == ""; var elapsed = moment().unix()-moment(requests[i].time).unix(); diff --git a/cmd/faucet/faucet_test.go b/cmd/faucet/faucet_test.go new file mode 100644 index 000000000..58a1f22b5 --- /dev/null +++ b/cmd/faucet/faucet_test.go @@ -0,0 +1,45 @@ +// Copyright 2021 The go-ethereum Authors +// This file is part of go-ethereum. +// +// go-ethereum is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// go-ethereum is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. +// +// You should have received a copy of the GNU General Public License +// along with go-ethereum. If not, see . + +package main + +import ( + "testing" + + "github.com/ethereum/go-ethereum/common" +) + +func TestFacebook(t *testing.T) { + // TODO: Remove facebook auth or implement facebook api, which seems to require an API key + t.Skipf("The facebook access is flaky, needs to be reimplemented or removed") + for _, tt := range []struct { + url string + want common.Address + }{ + { + "https://www.facebook.com/fooz.gazonk/posts/2837228539847129", + common.HexToAddress("0xDeadDeaDDeaDbEefbEeFbEEfBeeFBeefBeeFbEEF"), + }, + } { + _, _, gotAddress, err := authFacebook(tt.url) + if err != nil { + t.Fatal(err) + } + if gotAddress != tt.want { + t.Fatalf("address wrong, have %v want %v", gotAddress, tt.want) + } + } +} diff --git a/cmd/faucet/website.go b/cmd/faucet/website.go index a091d2491..aed067893 100644 --- a/cmd/faucet/website.go +++ b/cmd/faucet/website.go @@ -1,6 +1,6 @@ // Code generated by go-bindata. DO NOT EDIT. // sources: -// faucet.html (11.27kB) +// faucet.html (11.276kB) package main @@ -69,7 +69,7 @@ func (fi bindataFileInfo) Sys() interface{} { return nil } -var _faucetHtml = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\xcc\x5a\x6d\x93\xdb\x36\x92\xfe\x3c\xfe\x15\x1d\x9e\xbd\x92\xce\x43\x52\x33\x63\x7b\x7d\x12\xa9\x94\xd7\x9b\xdd\xf3\xd5\x5d\x92\x4a\x9c\xba\xdb\xca\xa6\xae\x40\xb2\x25\xc2\x03\x02\x0c\x00\x4a\xa3\x4c\xe9\xbf\x5f\x35\x40\x52\xd4\xcb\x4c\xec\xb5\xaf\x6a\xfd\x61\x4c\x02\x8d\x46\xa3\xfb\x69\xf4\x0b\x95\x7c\xf5\xe7\xef\xde\xbe\xff\xdb\xf7\xdf\x40\x69\x2b\xb1\x78\x92\xd0\x7f\x20\x98\x5c\xa5\x01\xca\x60\xf1\xe4\x22\x29\x91\x15\x8b\x27\x17\x17\x49\x85\x96\x41\x5e\x32\x6d\xd0\xa6\x41\x63\x97\xe1\xeb\x60\x3f\x51\x5a\x5b\x87\xf8\x6b\xc3\xd7\x69\xf0\x3f\xe1\x4f\x6f\xc2\xb7\xaa\xaa\x99\xe5\x99\xc0\x00\x72\x25\x2d\x4a\x9b\x06\xef\xbe\x49\xb1\x58\xe1\x60\x9d\x64\x15\xa6\xc1\x9a\xe3\xa6\x56\xda\x0e\x48\x37\xbc\xb0\x65\x5a\xe0\x9a\xe7\x18\xba\x97\x4b\xe0\x92\x5b\xce\x44\x68\x72\x26\x30\xbd\x0a\x16\x4f\x88\x8f\xe5\x56\xe0\xe2\xfe\x3e\xfa\x16\xed\x46\xe9\xdb\xdd\x6e\x06\x6f\x1a\x5b\xa2\xb4\x3c\x67\x16\x0b\xf8\x0b\x6b\x72\xb4\x49\xec\x29\xdd\x22\xc1\xe5\x2d\x94\x1a\x97\x69\x40\xa2\x9b\x59\x1c\xe7\x85\xfc\x60\xa2\x5c\xa8\xa6\x58\x0a\xa6\x31\xca\x55\x15\xb3\x0f\xec\x2e\x16\x3c\x33\xb1\xdd\x70\x6b\x51\x87\x99\x52\xd6\x58\xcd\xea\xf8\x26\xba\x89\xfe\x18\xe7\xc6\xc4\xfd\x58\x54\x71\x19\xe5\xc6\x04\xa0\x51\xa4\x81\xb1\x5b\x81\xa6\x44\xb4\x01\xc4\x8b\x7f\x6c\xdf\xa5\x92\x36\x64\x1b\x34\xaa\xc2\xf8\x45\xf4\xc7\x68\xea\xb6\x1c\x0e\x3f\xbe\x2b\x6d\x6b\x72\xcd\x6b\x0b\x46\xe7\x1f\xbd\xef\x87\x5f\x1b\xd4\xdb\xf8\x26\xba\x8a\xae\xda\x17\xb7\xcf\x07\x13\x2c\x92\xd8\x33\x5c\x7c\x16\xef\x50\x2a\xbb\x8d\xaf\xa3\x17\xd1\x55\x5c\xb3\xfc\x96\xad\xb0\xe8\x76\xa2\xa9\xa8\x1b\xfc\x62\xfb\x3e\x64\xc3\x0f\xc7\x26\xfc\x12\x9b\x55\xaa\x42\x69\xa3\x0f\x26\xbe\x8e\xae\x5e\x47\xd3\x6e\xe0\x94\xbf\xdb\x80\x8c\x46\x5b\x5d\x44\x6b\xd4\x84\x5c\x11\xe6\x28\x2d\x6a\xb8\xa7\xd1\x8b\x8a\xcb\xb0\x44\xbe\x2a\xed\x0c\xae\xa6\xd3\x67\xf3\x73\xa3\xeb\xd2\x0f\x17\xdc\xd4\x82\x6d\x67\xb0\x14\x78\xe7\x87\x98\xe0\x2b\x19\x72\x8b\x95\x99\x81\xe7\xec\x26\x76\x6e\xcf\x5a\xab\x95\x46\x63\xda\xcd\x6a\x65\xb8\xe5\x4a\xce\x08\x51\xcc\xf2\x35\x9e\xa3\x35\x35\x93\x27\x0b\x58\x66\x94\x68\x2c\x1e\x09\x92\x09\x95\xdf\xfa\x31\xe7\xcd\xc3\x43\xe4\x4a\x28\x3d\x83\x4d\xc9\xdb\x65\xe0\x36\x82\x5a\x63\xcb\x1e\x6a\x56\x14\x5c\xae\x66\xf0\xaa\x6e\xcf\x03\x15\xd3\x2b\x2e\x67\x30\xdd\x2f\x49\xe2\x4e\x8d\x49\xec\x2f\xae\x27\x17\x49\xa6\x8a\xad\xb3\x61\xc1\xd7\x90\x0b\x66\x4c\x1a\x1c\xa9\xd8\x5d\x48\x07\x04\x74\x0f\x31\x2e\xbb\xa9\x83\x39\xad\x36\x01\xb8\x8d\xd2\xc0\x0b\x11\x66\xca\x5a\x55\xcd\xe0\x8a\xc4\x6b\x97\x1c\xf1\x13\xa1\x58\x85\x57\xd7\xdd\xe4\x45\x52\x5e\x75\x4c\x2c\xde\xd9\xd0\xd9\xa7\xb7\x4c\xb0\x48\x78\xb7\x76\xc9\x60\xc9\xc2\x8c\xd9\x32\x00\xa6\x39\x0b\x4b\x5e\x14\x28\xd3\xc0\xea\x06\x09\x47\x7c\x01\xc3\xeb\xef\x81\xdb\xaf\xbc\xea\xe4\x8a\x0b\xbe\x6e\x8f\x35\x78\x3c\x3a\xe1\xc3\x87\x78\x0d\xed\x83\x5a\x2e\x0d\xda\x70\x70\xa6\x01\x31\x97\x75\x63\xc3\x95\x56\x4d\xdd\xcf\x5f\x24\x6e\x14\x78\x91\x06\x8d\x16\x41\x7b\xfd\xbb\x47\xbb\xad\x5b\x55\x04\xfd\xc1\x95\xae\x42\xb2\x84\x56\x22\x80\x5a\xb0\x1c\x4b\x25\x0a\xd4\x69\xf0\xa3\xca\x39\x13\x20\xfd\x99\xe1\xa7\x1f\xfe\x13\x5a\x93\x71\xb9\x82\xad\x6a\x34\x7c\x63\x4b\xd4\xd8\x54\xc0\x8a\x82\xe0\x1a\x45\xd1\x40\x10\x87\xdd\x53\x51\xc3\xcc\xca\x3d\xd5\x45\x92\x35\xd6\xaa\x9e\x30\xb3\x12\x32\x2b\xc3\x02\x97\xac\x11\x16\x0a\xad\xea\x42\x6d\x64\x68\xd5\x6a\x45\x91\xce\x1f\xc2\x2f\x0a\xa0\x60\x96\xb5\x53\x69\xd0\xd1\x76\x36\x64\xa6\x56\x75\x53\xb7\x56\xf4\x83\x78\x57\x33\x59\x60\x41\x36\x17\x06\x83\xc5\x5f\xf9\x1a\xa1\x42\x7f\x96\x8b\x63\x48\xe4\x4c\xa3\x0d\x87\x4c\x4f\x80\x91\xc4\x5e\x18\x7f\x24\x68\xff\x25\x8d\xe8\x38\xf5\x47\xa8\x50\x36\x70\xf0\x16\x6a\xba\x57\x82\xc5\xfd\xbd\x66\x72\x85\xf0\x94\x17\x77\x97\xf0\x94\x55\xaa\x91\x16\x66\x29\x44\x6f\xdc\xa3\xd9\xed\x0e\xb8\x03\x24\x82\x2f\x12\xf6\x18\xbc\x41\xc9\x5c\xf0\xfc\x36\x0d\x2c\x47\x9d\xde\xdf\x13\xf3\xdd\x6e\x0e\xf7\xf7\x7c\x09\x4f\xa3\x1f\x30\x67\xb5\xcd\x4b\xb6\xdb\xad\x74\xf7\x1c\xe1\x1d\xe6\x8d\xc5\xf1\xe4\xfe\x1e\x85\xc1\xdd\xce\x34\x59\xc5\xed\xb8\x5b\x4e\xe3\xb2\xd8\xed\x48\xe6\x56\xce\xdd\x0e\x62\x62\x2a\x0b\xbc\x83\xa7\xd1\xf7\xa8\xb9\x2a\x0c\x78\xfa\x24\x66\x8b\x24\x16\x7c\xd1\xae\x3b\x54\x52\xdc\x88\x3d\x5e\x62\x02\x4c\x8f\x73\xe7\x36\x4e\xd4\xa1\xa4\x67\xbc\x60\x15\xf6\xd2\xb7\x78\x30\xdc\xe2\x2d\x6e\xd3\xe0\xfe\x7e\xb8\xb6\x9d\xcd\x99\x10\x19\x23\xbd\xf8\xa3\xf5\x8b\x7e\x43\xc2\xe9\x9a\x1b\x97\x52\x2d\x3a\x09\xf6\x62\x7f\xa4\x5b\x1f\x5d\x5c\x56\xd5\x33\xb8\xb9\x1e\xdc\x5a\xe7\x3c\xfe\xd5\x91\xc7\xdf\x9c\x25\xae\x99\x44\x01\xee\x6f\x68\x2a\x26\xba\xe7\xd6\x5b\x06\xce\x77\xbc\x28\xa4\x3b\xba\x17\xad\xbf\xeb\xa7\x73\x50\x6b\xd4\x4b\xa1\x36\x33\x60\x8d\x55\x73\xa8\xd8\x5d\x1f\xef\x6e\xa6\xd3\xa1\xdc\x94\x0a\xb2\x4c\xa0\xbb\x5d\x34\xfe\xda\xa0\xb1\xa6\xbf\x4b\xfc\x94\xfb\x4b\x57\x4a\x81\xd2\x60\x71\xa4\x0d\xda\x91\x54\xeb\xa8\x06\xa6\xef\x95\x79\x56\xf6\xa5\x52\x7d\x08\x19\x8a\xd1\xb2\x1e\x44\xbb\x60\x91\x58\xbd\xa7\xbb\x48\x6c\xf1\x49\x21\x40\x53\x8a\xf7\x50\x04\xf0\x37\x1a\x9d\xbd\x46\xd4\x3e\xbf\x20\xc8\x82\x7b\x4d\x62\x5b\x7c\xc6\xce\x04\xc2\x8c\x19\xfc\x98\xed\x5d\xa4\xdf\x6f\xef\x5e\x3f\x77\xff\x12\x99\xb6\x19\x32\xfb\x31\x02\x2c\x1b\x59\x0c\xce\xef\xee\xce\xcf\x15\xa0\x91\x7c\x8d\xda\x70\xbb\xfd\x58\x09\xb0\xd8\x8b\xe0\xdf\x0f\x45\x48\x62\xab\x1f\xc7\xda\xf0\xe5\x0b\x39\xf7\xef\xa5\x24\x37\x8b\x7f\x57\x1b\x28\x14\x1a\xb0\x25\x37\x40\xc1\xf5\xeb\x24\x2e\x6f\x7a\x92\x7a\xf1\x9e\x26\x9c\x52\x61\xe9\x52\x0b\xe0\x06\x74\x23\x5d\xe4\x55\x12\x6c\x89\x87\xe9\x48\x1b\xa4\x23\x78\xaf\x28\xa5\x5b\xa3\xb4\x50\x31\xc1\x73\xae\x1a\x03\x2c\xb7\x4a\x1b\x58\x6a\x55\x01\xde\x95\xac\x31\x96\x18\xd1\xf5\xc1\xd6\x8c\x0b\xe7\x4b\xce\xa4\xa0\x34\xb0\x3c\x6f\xaa\x86\x52\x52\xb9\x02\x94\xaa\x59\x95\xad\x2c\x56\x81\x0f\x4c\x42\xc9\x55\x2f\x8f\xa9\x59\x05\xcc\x5a\x96\xdf\x9a\x4b\xe8\x6e\x05\x60\x1a\xc1\x72\x2c\x68\x55\xae\xaa\x4a\x49\xb8\xd1\x05\xd4\x4c\xdb\x2d\x98\xc3\xdc\x82\xe5\xb9\x8b\x72\x11\xbc\x91\x5b\x25\x11\x4a\xb6\x76\x12\xc2\x7b\x5f\x4e\x90\x5c\x7f\x61\x39\x66\x4a\xf5\xd4\x50\xb1\x6d\xb7\x5d\x2b\xfd\x86\xdb\x92\x7b\xf5\xd4\xa8\x2b\x5a\x5a\x80\xe0\x15\xb7\x26\x4a\xe2\x7a\x7f\xa3\xee\x63\xb3\x08\x4b\xa5\xf9\x6f\x94\xd8\x88\xe1\xf5\x69\x8f\x2e\x97\xee\x6e\x74\x56\x17\xb8\xb4\x33\x78\xe1\xef\xc6\x63\x1c\xb7\x15\xd0\x39\x10\x77\x3c\x5d\x65\x49\x01\x67\x06\x37\x3e\x9d\xf5\x89\x44\x61\x07\x12\x14\x47\x50\xf3\x9b\xbe\x7e\x5d\xdf\xf5\x72\xf4\x39\xf1\xb4\x67\x42\x08\x38\x54\xca\x9a\xf7\x6a\xbc\x84\x8a\xdd\x22\x30\x48\xd8\x51\x85\xdc\x0a\xed\xea\x2b\xee\xfa\x03\xb1\xdd\x20\xda\xaf\xc9\x75\xd3\x1f\x3c\x43\x2e\x57\xcf\xae\xa7\x1e\x91\xf4\x40\xec\x9f\x5d\x4f\xb9\xb4\xea\xd9\xf5\x74\x7a\x37\xfd\xc8\x7f\xcf\xae\xa7\x4a\x3e\xbb\x9e\xda\x12\x9f\x5d\x4f\x9f\x5d\xdf\x0c\xb1\xec\x47\xba\xcc\x92\xa8\xd0\xd0\x6e\x1d\xc4\x03\xb0\x4c\xaf\xd0\xa6\xc1\xff\xb2\x4c\x35\x76\x96\x09\x26\x6f\x83\x85\x13\x97\xb2\x0d\x87\x82\xf3\xf9\x29\xd4\xcc\x10\x24\x48\x62\x87\x92\xb6\x17\x62\x60\x6c\x1a\xad\x55\x23\x29\x2a\x02\x9d\xd9\x79\xa8\x1c\x11\xca\x48\x31\x93\x28\xc9\x74\xbc\x78\xab\xea\x6d\xe8\x98\xb8\xe5\x27\x6a\x34\x4d\x5d\x2b\x6d\xa3\xa1\x3a\x19\xd5\x41\x02\x4d\xfc\x7a\xfa\xf2\xf5\xab\x47\xc5\x37\x94\x65\xbb\x33\xf4\x12\xb2\x4c\xad\x11\x7c\x4e\x9f\xa9\x3b\x60\xb2\x80\x25\xd7\x08\x6c\xc3\xb6\x5f\x25\x71\xe1\x2a\xb0\xcf\x47\xed\xb2\xf5\xae\x7f\x2a\xd8\x76\x2e\x7f\x09\x75\x93\x09\x6e\x4a\x60\x20\x71\x03\x89\xb1\x5a\xc9\xd5\xc2\x8d\xe6\x54\x92\xba\x57\xa8\x95\xb1\x8f\x99\x1f\xab\x0c\x8b\xe2\x0c\x00\xbe\x94\xfd\x37\x9b\x4d\xd4\x69\xd2\x19\xbf\x44\x51\xc7\x74\xfd\x35\x92\xdb\x6d\xec\xdd\x48\xc9\xf8\x6b\x5e\xa4\xd7\xaf\xaf\x5f\xbd\xba\x7e\xf1\x6f\xaf\x5f\xbe\xbc\x7e\xfd\xe2\xe5\x43\xc8\xa0\x43\x7d\x26\x30\x7c\x1a\xfd\xad\xa2\xaa\xb5\xcf\xa1\x3d\x5e\xba\xdc\x8d\x22\x74\x41\x35\x88\x0e\xfe\x61\x0c\x35\x92\x12\x91\x90\x89\xb3\x39\xc4\x27\xa0\xc8\xc1\xe8\x11\xc9\x3e\x13\x5a\x1d\x7c\x08\x29\xaa\xb1\x74\xc2\xae\x98\xe7\x4a\xf6\x70\xba\x04\xc3\xab\x5a\x6c\x21\xdf\x5b\xfd\x3c\xae\x1e\x34\xca\xef\xc2\xea\xd0\x6c\x1e\x64\x2e\xfa\x57\xaa\x40\x8a\xfa\xa6\x31\x39\xd6\xae\xcb\x4b\x91\xf4\x4f\xdb\xdf\x98\xb4\x5c\x62\x17\x71\x23\xf8\x4e\x8a\x2d\x34\x06\x61\xa9\x34\x14\x98\x35\xab\x95\x4b\x13\x34\xd4\x9a\xaf\x99\xc5\x2e\xcc\x9a\x16\x15\x3d\x28\x06\x95\x0d\xa5\x3c\x62\x90\x81\xfc\x4d\x35\x90\x33\x09\x56\xb3\xfc\xd6\x7b\x4a\xa3\x35\x79\x4a\x8d\xfe\x34\x7d\xa0\xcf\x50\xa8\x8d\x23\xf1\xe7\x5e\x72\x14\x2e\xea\x1b\x44\x28\xd5\x06\xaa\x26\x77\x0e\x49\x51\xdd\x1d\x62\xc3\xb8\x85\x46\x5a\x2e\xbc\x3e\x6d\xa3\x25\xe5\x08\x78\x10\xa5\x4f\x6a\xbf\x04\xab\xc5\xfb\x12\xcf\xa4\x44\x7d\xd5\x06\x1a\xdf\x7a\x72\xa8\xb5\xb2\x98\x93\x41\x81\xad\x18\x97\x86\x2c\xe2\xf2\x00\xac\x3e\xa2\xaa\xeb\x9f\xda\x87\x7d\x87\xd2\x4d\xc7\x31\xfc\x55\xa8\x8c\x09\x58\x13\xd2\x33\x41\xe9\x9c\x82\x52\xd1\xd1\x07\xda\x32\x96\xd9\xc6\x80\x5a\xba\x51\x2f\x39\xad\x5f\x33\x4d\x16\xc4\xaa\xb6\x90\xb6\xfd\x35\x1a\x33\xa8\xd7\x6d\xd7\x90\x5e\xa9\x72\x3f\x98\xef\xb5\x9e\xc2\xcf\xbf\xcc\x9f\xb4\xa2\xfc\x19\x97\x0e\x12\x84\x6f\x7f\x64\x5b\x32\x0b\xb9\x46\x66\xd1\x40\x2e\x94\x69\xb4\x97\xb0\xd0\xaa\x06\x92\xb2\xe3\xd4\x71\xa6\x89\xda\xed\xd6\x31\x19\x97\xcc\x94\x93\xb6\x3d\xa8\xd1\x59\xa9\x9f\xeb\xc6\x2f\x08\x75\x63\x62\xc0\xd3\xe9\x1c\x78\xd2\xf1\x8d\x04\xca\x95\x2d\xe7\xc0\x9f\x3f\xef\x89\x2f\xf8\x12\xc6\x1d\xc5\xcf\xfc\x97\xc8\xde\x45\xb4\x0b\xa4\x29\x0c\x77\x73\x1b\xb6\x7c\x4c\x2d\x78\x8e\x63\x7e\x09\x57\x93\x79\x37\x9b\x69\x64\xb7\xdd\x5b\x6b\x47\xff\x9f\xfb\xbb\x9b\x1f\x6a\xc6\x29\xff\x40\x37\xbe\xf6\x37\xc0\x60\xc5\x8d\x85\x46\x0b\x68\x7d\xd8\x9b\xa0\x37\x88\xa3\x1b\x6a\xe5\x04\x97\xed\x43\x8b\xa9\xee\x08\x9e\x4d\x64\x50\x16\xe3\xff\xf8\xf1\xbb\x6f\x23\x63\x35\x97\x2b\xbe\xdc\x8e\xef\x1b\x2d\x66\xf0\x74\x1c\xfc\x4b\xa3\x45\x30\xf9\x79\xfa\x4b\xb4\x66\xa2\xc1\x4b\x67\xef\x99\xfb\x7b\xb2\xcb\x25\xb4\x8f\x33\x38\xdc\x70\x37\x99\xcc\xcf\xf7\x49\x06\x6d\x1d\x8d\x06\xed\x98\x08\x7b\xe0\x1f\xeb\x88\x41\x85\xb6\x54\xce\x75\x35\xe6\x4a\x4a\xcc\x2d\x34\xb5\x92\xad\x4a\x40\x28\x63\xf6\x40\xec\x28\xd2\x53\x50\xb4\xf4\xa9\x0b\xd6\xff\x8d\xd9\x8f\x2a\xbf\x45\x3b\x1e\x8f\x37\x5c\x16\x6a\x13\x09\xe5\xaf\xda\x88\x9c\x54\xe5\x4a\x40\x9a\xa6\xd0\x46\xd1\x60\x02\x5f\x43\xb0\x31\x14\x4f\x03\x98\xd1\x23\x3d\x4d\xe0\x39\x1c\x2f\x2f\x29\xde\x3f\x87\x20\x66\x35\x0f\x26\xde\x1d\x3a\xc5\x2b\x59\xa1\x31\x6c\x85\x43\x01\x5d\x65\xd4\x83\x8c\xce\x51\x99\x15\xa4\xe0\x0c\x54\x33\x6d\xd0\x93\x44\x54\x8d\x77\x68\x23\xcc\x3a\xb2\x34\x05\xd9\x08\xb1\x07\xa9\x77\x8a\x79\x07\xbf\x03\xf2\xc8\xc7\x9a\xaf\xd2\x14\xa8\x34\x25\x15\x17\xfb\x95\x64\x7c\x5f\x44\x4f\x22\x8a\x0b\xfb\x15\x93\xf9\x10\xcd\x07\xdc\xb0\xf8\x3d\x76\x58\x1c\xf3\xc3\xe2\x01\x86\xae\x67\xf1\x18\x3f\xdf\xe3\x18\xb0\x73\x03\x0f\x70\x93\x4d\x95\xa1\x7e\x8c\x9d\xef\x59\xb4\xec\x9c\xaa\xdf\x49\x3b\x58\x7b\x09\x57\xaf\x26\x0f\x70\x47\xad\xd5\x83\xcc\xa5\xb2\xdb\xf1\xbd\x60\x5b\xca\x99\x60\x64\x55\xfd\xd6\xb5\x18\x46\x97\x2e\xe2\xce\xa0\xe7\x70\xe9\x9a\xc7\x33\x18\xb9\x37\x9a\xe7\x15\xba\x55\x2f\xa7\xd3\xe9\x25\x74\x5f\x5d\xfe\xc4\xc8\x09\x75\x83\xbb\x07\xe4\x31\x4d\x9e\x53\xdc\xff\x1c\x89\x5a\x1e\xbd\x4c\xed\xfb\x67\x48\xd5\xc7\x86\x03\xb1\xe0\x0f\x7f\x80\x93\xd9\x43\x18\xc7\x31\xfc\x17\xa3\x32\x5c\x08\xd7\x3d\x70\x4d\x83\x9e\xbe\xe2\xc6\xb8\x62\xdc\x40\xa1\x24\xb6\x6b\x3e\xed\xda\x3f\x91\xb1\x25\x83\x05\x4c\x8f\x05\xa4\xeb\x70\x10\x16\xce\x44\x8b\x01\xdf\xc3\x40\x70\xb1\x1b\xee\x77\xb0\x92\x57\x08\x5f\xa5\x10\x04\xc3\xc5\x27\x14\x44\xd0\x33\xbb\x30\x68\xdf\x7b\x5b\x8c\xdb\xe8\x78\x2e\x76\x4d\x2e\xe1\x66\x3a\x9d\x4e\x4e\x84\xd8\xed\xd5\xfb\xa6\xa6\xb4\x09\x98\xdc\xba\x2b\xb1\xd7\xad\x4b\x1c\x29\x05\xa2\x2b\x4d\x40\xae\x84\xf0\x39\x4b\xbb\x94\x14\xdc\x36\x4f\x52\x08\xaf\xe6\x67\xa2\xe8\x40\x93\x83\xa3\x1d\x9b\xe7\x8c\xee\x8f\x4d\x74\xa8\xb3\x23\xe2\xf0\xea\xc0\x28\x07\xf6\x3a\x6f\x98\x8b\x5e\x6e\xbe\xd7\xe8\x91\xb9\xf6\xf6\x3a\xd6\xd9\x40\x7e\xcf\xe7\xf9\xd5\x47\x1e\xa3\x9f\xae\x1b\x53\x8e\x8f\x04\x9d\xcc\x4f\x6d\xf3\xce\xa2\xa6\x2c\x59\x51\xc8\x22\x5b\x50\x29\xa0\xf1\xc4\x24\x2e\x55\xd7\x18\x6a\x94\x05\xea\x2e\xa5\xf0\x99\x3d\x25\x80\x07\x26\xf3\x55\xe5\x10\x4e\x9f\xe8\x30\x2e\x25\x53\x12\x01\x00\x8e\x9c\xc0\x01\xf5\x00\xa9\x44\x8c\x82\xd5\x06\x0b\x48\xc1\x7f\x04\x1f\x4f\xa2\x46\xf2\xbb\xf1\x24\x6c\xdf\x8f\x79\x74\xf3\xf3\xbe\x4c\xec\xc4\x7e\x9e\x42\x90\x58\x0d\xbc\x48\x47\x01\x3c\x3f\xe7\x82\x14\x75\x47\x8b\xbd\x04\xc3\xa5\x00\x89\x2d\x16\xae\x0f\xea\xeb\xb5\xbf\x07\x19\xcb\x6f\x57\xae\x10\x9a\x51\xaa\x35\x3e\x61\xcb\xd6\xcc\x32\xed\xb8\x4e\xe6\xb0\x27\x6f\x0b\xc5\x9c\x8c\x33\x07\x5f\x91\xba\x76\x2b\xf4\x9f\x28\xdc\x5b\xa6\x74\x81\x3a\xd4\xac\xe0\x8d\x99\xc1\x8b\xfa\x6e\xfe\xf7\xee\x13\x8e\x6b\x0a\x3f\x2a\x6a\xad\x71\x71\x22\x51\xdb\x65\x7c\x0e\x41\x12\x13\xc1\xef\xb1\xe9\x0f\x3b\xfc\xf8\x0e\x67\x5a\xdf\xd0\x7f\x1a\x6f\xc7\x2b\x5e\x14\x02\x49\xe0\x3d\x7b\x72\x46\xb2\xff\xd0\xa5\x0e\xb7\x84\xb6\xe7\xbd\x5f\xb3\x03\x14\x06\x1f\x59\xd0\xb7\xcf\x47\x04\x80\x90\x8e\xcc\x9d\xce\xdb\x62\xdb\x0d\xeb\x91\xd3\x45\xfb\x53\x8a\xa2\xd1\x2e\xd7\x1a\x87\x2d\xc0\x2e\x61\x64\x28\xf7\x2b\xcc\x68\x12\x95\x4d\xc5\x24\xff\x0d\xc7\x14\x97\x26\x5e\x57\xae\x1f\x1f\x9c\x5e\xc9\x27\xc2\xec\x1b\xe5\xa3\x2e\xc6\x8d\x5a\x25\x8e\x3a\xeb\xbe\xd8\xd7\xf6\x33\x98\xce\x47\x9f\xa8\xa1\xf3\xbb\x84\x19\xd3\x30\x7c\x09\xbb\xe0\x0b\x5a\xd1\xee\xdd\x5c\xc6\xf4\xc8\x77\x32\x5c\x7e\x2e\xd5\x26\x1d\xdd\x4c\x7b\x21\xbd\xa1\x9d\x9d\x47\x2d\xd6\x4e\x8c\x41\x52\x76\xae\xb9\x80\x9b\xe9\x97\x90\xd6\x77\x43\x8e\x4e\x60\x35\xaf\xb1\x00\x96\x5b\xbe\xc6\xff\x87\x83\x7c\x01\x25\x7f\xb2\x88\x84\xc3\x4e\x79\x0e\xa6\x07\xf2\xd2\x6c\xaf\xdb\x7f\x25\x7f\x83\xd8\x69\xf8\x39\x04\x67\x0f\xf2\x20\x12\x8f\x08\x8f\x5c\xfb\x61\xbf\x77\x1f\x98\x82\xe3\x98\x42\xd9\x6e\xff\x71\x74\x12\x95\xb6\x12\xe3\x20\xb1\xee\x47\x32\x24\x73\xcf\xc1\x31\xf0\xc3\x87\x29\xdd\xee\xb0\x90\xa1\xfa\x1d\x8f\xea\x2c\x18\x24\x27\x7d\x2d\xd6\x65\x22\xb0\xdb\xff\x96\x28\x8e\xe1\x47\xcb\xb4\x05\x06\x3f\xbd\x83\xa6\x2e\x98\xf5\x9f\x72\x28\x3e\xfa\x4f\x25\xdd\x8f\x8d\x32\xa6\x0d\x2c\x95\xde\x30\x5d\xb4\xfd\x19\x5b\xe2\xd6\x7d\xca\xe9\x52\x3f\x83\xf6\x1d\xdd\x62\x6b\x26\xc6\x27\x75\xdf\xd3\xf1\x28\x1a\x9a\x7c\x34\x89\x90\xe5\xe5\x29\xa1\x8b\x58\xfd\xbe\x29\x7c\xeb\x4a\x80\xf1\xd3\xb1\x2d\xb9\x99\x44\xcc\x5a\x3d\x1e\x1d\x80\x61\x34\x21\xbb\x5e\x0d\x4a\xb2\x7e\x79\x72\xe0\x56\x8f\xf1\xd8\x27\xd3\x7d\x22\xd0\x91\xe7\xc6\x8c\x3d\xae\x46\x97\x03\xde\x87\xb0\x1a\x3d\x1b\xf5\x86\xda\xbb\xf7\xfe\x1c\xe9\x59\x49\x0e\x58\x8f\xc8\xcb\x46\x27\xdb\xb3\xa2\x78\x4b\xfe\x33\x0e\xce\x78\xfa\x31\x3a\x26\xbd\xb2\xfd\x7d\xfd\xa8\x96\xfd\xcf\x32\x1e\x50\x31\x2f\x46\x93\xc8\x34\x99\xef\x4d\x8c\x5f\xf6\x05\x58\x47\xe6\xc0\x7b\x1c\x0a\x4e\x12\x0a\xda\xe2\x30\xa9\x08\x8f\x92\x90\x47\xa2\x46\xbb\xa5\x3f\xd5\xee\x92\x14\x3e\x9d\xf4\xad\xad\x6f\x0c\x25\x57\xbe\xf5\xbf\xc1\xcc\xb8\x4e\x02\xb4\x78\x77\xdd\x1c\xdf\xb5\x79\xf3\xfd\xbb\x41\xe7\xa6\xf7\x88\xb1\xe3\xde\xff\x0e\xf0\x5c\x9f\xe4\xec\x0f\x0f\x37\x9b\x4d\xb4\x52\x6a\x25\xfc\x4f\x0e\xfb\x46\x4a\xcc\x6a\x1e\x7d\x30\x01\x30\xb3\x95\x39\x14\xb8\x44\xbd\x18\xb0\x6f\xbb\x2b\x49\xec\x7f\x12\x97\xc4\xfe\x57\xbf\xff\x17\x00\x00\xff\xff\x31\x9f\x54\x5e\x06\x2c\x00\x00") +var _faucetHtml = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\xcc\x5a\x7b\x93\xdb\x36\x92\xff\x7b\xfc\x29\x3a\x3c\x7b\x25\x9d\x87\xa4\x66\xc6\xf6\xfa\x24\x52\x29\xaf\x37\xbb\xe7\xab\xbb\x24\x95\x38\x75\xb7\x95\x4d\x5d\x81\x64\x4b\x84\x07\x04\x18\x00\x94\x46\x99\xd2\x77\xbf\x6a\x80\xa4\xa8\xc7\x4c\xec\xb5\xaf\x6a\xfd\xc7\x98\xc4\xa3\xd1\x8f\x5f\xa3\x1f\x54\xf2\xd5\x9f\xbf\x7b\xfb\xfe\x6f\xdf\x7f\x03\xa5\xad\xc4\xe2\x49\x42\xff\x81\x60\x72\x95\x06\x28\x83\xc5\x93\x8b\xa4\x44\x56\x2c\x9e\x5c\x5c\x24\x15\x5a\x06\x79\xc9\xb4\x41\x9b\x06\x8d\x5d\x86\xaf\x83\xfd\x44\x69\x6d\x1d\xe2\xaf\x0d\x5f\xa7\xc1\xff\x84\x3f\xbd\x09\xdf\xaa\xaa\x66\x96\x67\x02\x03\xc8\x95\xb4\x28\x6d\x1a\xbc\xfb\x26\xc5\x62\x85\x83\x7d\x92\x55\x98\x06\x6b\x8e\x9b\x5a\x69\x3b\x58\xba\xe1\x85\x2d\xd3\x02\xd7\x3c\xc7\xd0\xbd\x5c\x02\x97\xdc\x72\x26\x42\x93\x33\x81\xe9\x55\xb0\x78\x42\x74\x2c\xb7\x02\x17\xf7\xf7\xd1\xb7\x68\x37\x4a\xdf\xee\x76\x33\x78\xd3\xd8\x12\xa5\xe5\x39\xb3\x58\xc0\x5f\x58\x93\xa3\x4d\x62\xbf\xd2\x6d\x12\x5c\xde\x42\xa9\x71\x99\x06\xc4\xba\x99\xc5\x71\x5e\xc8\x0f\x26\xca\x85\x6a\x8a\xa5\x60\x1a\xa3\x5c\x55\x31\xfb\xc0\xee\x62\xc1\x33\x13\xdb\x0d\xb7\x16\x75\x98\x29\x65\x8d\xd5\xac\x8e\x6f\xa2\x9b\xe8\x8f\x71\x6e\x4c\xdc\x8f\x45\x15\x97\x51\x6e\x4c\x00\x1a\x45\x1a\x18\xbb\x15\x68\x4a\x44\x1b\x40\xbc\xf8\xc7\xce\x5d\x2a\x69\x43\xb6\x41\xa3\x2a\x8c\x5f\x44\x7f\x8c\xa6\xee\xc8\xe1\xf0\xe3\xa7\xd2\xb1\x26\xd7\xbc\xb6\x60\x74\xfe\xd1\xe7\x7e\xf8\xb5\x41\xbd\x8d\x6f\xa2\xab\xe8\xaa\x7d\x71\xe7\x7c\x30\xc1\x22\x89\x3d\xc1\xc5\x67\xd1\x0e\xa5\xb2\xdb\xf8\x3a\x7a\x11\x5d\xc5\x35\xcb\x6f\xd9\x0a\x8b\xee\x24\x9a\x8a\xba\xc1\x2f\x76\xee\x43\x36\xfc\x70\x6c\xc2\x2f\x71\x58\xa5\x2a\x94\x36\xfa\x60\xe2\xeb\xe8\xea\x75\x34\xed\x06\x4e\xe9\xbb\x03\xc8\x68\x74\xd4\x45\xb4\x46\x4d\xc8\x15\x61\x8e\xd2\xa2\x86\x7b\x1a\xbd\xa8\xb8\x0c\x4b\xe4\xab\xd2\xce\xe0\x6a\x3a\x7d\x36\x3f\x37\xba\x2e\xfd\x70\xc1\x4d\x2d\xd8\x76\x06\x4b\x81\x77\x7e\x88\x09\xbe\x92\x21\xb7\x58\x99\x19\x78\xca\x6e\x62\xe7\xce\xac\xb5\x5a\x69\x34\xa6\x3d\xac\x56\x86\x5b\xae\xe4\x8c\x10\xc5\x2c\x5f\xe3\xb9\xb5\xa6\x66\xf2\x64\x03\xcb\x8c\x12\x8d\xc5\x23\x46\x32\xa1\xf2\x5b\x3f\xe6\xbc\x79\x28\x44\xae\x84\xd2\x33\xd8\x94\xbc\xdd\x06\xee\x20\xa8\x35\xb6\xe4\xa1\x66\x45\xc1\xe5\x6a\x06\xaf\xea\x56\x1e\xa8\x98\x5e\x71\x39\x83\xe9\x7e\x4b\x12\x77\x6a\x4c\x62\x7f\x71\x3d\xb9\x48\x32\x55\x6c\x9d\x0d\x0b\xbe\x86\x5c\x30\x63\xd2\xe0\x48\xc5\xee\x42\x3a\x58\x40\xf7\x10\xe3\xb2\x9b\x3a\x98\xd3\x6a\x13\x80\x3b\x28\x0d\x3c\x13\x61\xa6\xac\x55\xd5\x0c\xae\x88\xbd\x76\xcb\x11\x3d\x11\x8a\x55\x78\x75\xdd\x4d\x5e\x24\xe5\x55\x47\xc4\xe2\x9d\x0d\x9d\x7d\x7a\xcb\x04\x8b\x84\x77\x7b\x97\x0c\x96\x2c\xcc\x98\x2d\x03\x60\x9a\xb3\xb0\xe4\x45\x81\x32\x0d\xac\x6e\x90\x70\xc4\x17\x30\xbc\xfe\x1e\xb8\xfd\xca\xab\x8e\xaf\xb8\xe0\xeb\x56\xac\xc1\xe3\x91\x84\x0f\x0b\xf1\x1a\xda\x07\xb5\x5c\x1a\xb4\xe1\x40\xa6\xc1\x62\x2e\xeb\xc6\x86\x2b\xad\x9a\xba\x9f\xbf\x48\xdc\x28\xf0\x22\x0d\x1a\x2d\x82\xf6\xfa\x77\x8f\x76\x5b\xb7\xaa\x08\x7a\xc1\x95\xae\x42\xb2\x84\x56\x22\x80\x5a\xb0\x1c\x4b\x25\x0a\xd4\x69\xf0\xa3\xca\x39\x13\x20\xbd\xcc\xf0\xd3\x0f\xff\x09\xad\xc9\xb8\x5c\xc1\x56\x35\x1a\xbe\xb1\x25\x6a\x6c\x2a\x60\x45\x41\x70\x8d\xa2\x28\x88\xf7\x9c\x38\xf0\x9e\xf2\x1a\x66\x56\xee\xf9\xbd\x48\xb2\xc6\x5a\xd5\x2f\xcc\xac\x84\xcc\xca\xb0\xc0\x25\x6b\x84\x85\x42\xab\xba\x50\x1b\x19\x5a\xb5\x5a\x51\xa8\xf3\x52\xf8\x4d\x01\x14\xcc\xb2\x76\x2a\x0d\xba\xb5\x9d\x11\x99\xa9\x55\xdd\xd4\xad\x19\xfd\x20\xde\xd5\x4c\x16\x58\x90\xd1\x85\xc1\x60\xf1\x57\xbe\x46\xa8\xd0\x0b\x73\x71\x8c\x89\x9c\x69\xb4\xe1\x90\xe8\x09\x32\x92\xd8\x33\xe3\x45\x82\xf6\x5f\xd2\x88\x8e\x52\x2f\x42\x85\xb2\x81\x83\xb7\x50\xd3\xc5\x12\x2c\xee\xef\x35\x93\x2b\x84\xa7\xbc\xb8\xbb\x84\xa7\xac\x52\x8d\xb4\x30\x4b\x21\x7a\xe3\x1e\xcd\x6e\x77\x40\x1d\x20\x11\x7c\x91\xb0\xc7\xf0\x0d\x4a\xe6\x82\xe7\xb7\x69\x60\x39\xea\xf4\xfe\x9e\x88\xef\x76\x73\xb8\xbf\xe7\x4b\x78\x1a\xfd\x80\x39\xab\x6d\x5e\xb2\xdd\x6e\xa5\xbb\xe7\x08\xef\x30\x6f\x2c\x8e\x27\xf7\xf7\x28\x0c\xee\x76\xa6\xc9\x2a\x6e\xc7\xdd\x76\x1a\x97\xc5\x6e\x47\x3c\xb7\x7c\xee\x76\x10\x13\x51\x59\xe0\x1d\x3c\x8d\xbe\x47\xcd\x55\x61\xc0\xaf\x4f\x62\xb6\x48\x62\xc1\x17\xed\xbe\x43\x25\xc5\x8d\xd8\xe3\x25\x26\xc0\xf4\x40\x77\x7e\xe3\x58\x1d\x72\x7a\xc6\x0d\x56\x61\xcf\x7d\x8b\x07\xc3\x2d\xde\xe2\x36\x0d\xee\xef\x87\x7b\xdb\xd9\x9c\x09\x91\x31\xd2\x8b\x17\xad\xdf\xf4\x1b\x12\x4e\xd7\xdc\xb8\x9c\x6a\xd1\x71\xb0\x67\xfb\x23\xfd\xfa\xe8\xe6\xb2\xaa\x9e\xc1\xcd\xf5\xe0\xda\x3a\xe7\xf2\xaf\x8e\x5c\xfe\xe6\xec\xe2\x9a\x49\x14\xe0\xfe\x86\xa6\x62\xa2\x7b\x6e\xbd\x65\x70\x0d\x1c\x6f\x0a\xe9\x92\xee\x59\xeb\x2f\xfb\xe9\x1c\xd4\x1a\xf5\x52\xa8\xcd\x0c\x58\x63\xd5\x1c\x2a\x76\xd7\x07\xbc\x9b\xe9\x74\xc8\x37\xe5\x82\x2c\x13\xe8\xae\x17\x8d\xbf\x36\x68\xac\xe9\x2f\x13\x3f\xe5\xfe\xd2\x9d\x52\xa0\x34\x58\x1c\x69\x83\x4e\x24\xd5\xba\x55\x03\xd3\xf7\xca\x3c\xcb\xfb\x52\xa9\x3e\x86\x0c\xd9\x68\x49\x0f\xc2\x5d\xb0\x48\xac\xde\xaf\xbb\x48\x6c\xf1\x49\x31\x40\x53\x8e\xf7\x50\x08\xf0\x37\x1a\xc9\x5e\x23\x6a\x9f\x60\x10\x64\xc1\xbd\x26\xb1\x2d\x3e\xe3\x64\x02\x61\xc6\x0c\x7e\xcc\xf1\x2e\xd4\xef\x8f\x77\xaf\x9f\x7b\x7e\x89\x4c\xdb\x0c\x99\xfd\x18\x06\x96\x8d\x2c\x06\xf2\xbb\xbb\xf3\x73\x19\x68\x24\x5f\xa3\x36\xdc\x6e\x3f\x96\x03\x2c\xf6\x2c\xf8\xf7\x43\x16\x92\xd8\xea\xc7\xb1\x36\x7c\xf9\x42\xce\xfd\x7b\x39\xc9\xcd\xe2\xdf\xd5\x06\x0a\x85\x06\x6c\xc9\x0d\x50\x74\xfd\x3a\x89\xcb\x9b\x7e\x49\xbd\x78\x4f\x13\x4e\xa9\xb0\x74\xb9\x05\x70\x03\xba\x91\x2e\xf4\x2a\x09\xb6\xc4\xc3\x7c\xa4\x8d\xd2\x11\xbc\x57\x94\xd3\xad\x51\x5a\xa8\x98\xe0\x39\x57\x8d\x01\x96\x5b\xa5\x0d\x2c\xb5\xaa\x00\xef\x4a\xd6\x18\x4b\x84\xe8\xfa\x60\x6b\xc6\x85\xf3\x25\x67\x52\x50\x1a\x58\x9e\x37\x55\x43\x39\xa9\x5c\x01\x4a\xd5\xac\xca\x96\x17\xab\xc0\x07\x26\xa1\xe4\xaa\xe7\xc7\xd4\xac\x02\x66\x2d\xcb\x6f\xcd\x25\x74\xb7\x02\x30\x8d\x60\x39\x16\xb4\x2b\x57\x55\xa5\x24\xdc\xe8\x02\x6a\xa6\xed\x16\xcc\x61\x72\xc1\xf2\xdc\x45\xb9\x08\xde\xc8\xad\x92\x08\x25\x5b\x3b\x0e\xe1\xbd\xaf\x27\x88\xaf\xbf\xb0\x1c\x33\xa5\xfa\xd5\x50\xb1\x6d\x77\x5c\xcb\xfd\x86\xdb\x92\x7b\xf5\xd4\xa8\x2b\xda\x5a\x80\xe0\x15\xb7\x26\x4a\xe2\x7a\x7f\xa3\xee\x63\xb3\x08\x4b\xa5\xf9\x6f\x94\xd9\x88\xe1\xf5\x69\x8f\x2e\x97\xee\x6e\x74\x56\x17\xb8\xb4\x33\x78\xe1\xef\xc6\x63\x1c\xb7\x25\xd0\x39\x10\x77\x34\x5d\x69\x49\x01\x67\x06\x37\x3e\x9f\xf5\x89\x44\x61\x07\x1c\x14\x47\x50\xf3\x87\xbe\x7e\x5d\xdf\xf5\x7c\xf4\x49\xf1\xb4\x27\x42\x08\x38\x54\xca\x9a\xf7\x6a\xbc\x84\x8a\xdd\x22\x30\x48\xd8\x51\x89\xdc\x32\xed\x0a\x2c\xee\x1a\x04\xb1\xdd\x20\xda\xaf\xc9\x75\xd3\x1f\x3c\x41\x2e\x57\xcf\xae\xa7\x1e\x91\xf4\x40\xe4\x9f\x5d\x4f\xb9\xb4\xea\xd9\xf5\x74\x7a\x37\xfd\xc8\x7f\xcf\xae\xa7\x4a\x3e\xbb\x9e\xda\x12\x9f\x5d\x4f\x9f\x5d\xdf\x0c\xb1\xec\x47\xba\xd4\x92\x56\xa1\xa1\xd3\x3a\x88\x07\x60\x99\x5e\xa1\x4d\x83\xff\x65\x99\x6a\xec\x2c\x13\x4c\xde\x06\x0b\xc7\x2e\x65\x1b\x0e\x05\xe7\x13\x54\xa8\x99\x21\x48\x10\xc7\x0e\x25\x6d\x33\xc4\xc0\xd8\x34\x5a\xab\x46\x52\x54\x04\x92\xd9\x79\xa8\x1c\x11\xca\x48\x31\x93\x28\xc9\x74\xbc\x78\xab\xea\x6d\xe8\x88\xb8\xed\x27\x6a\x34\x4d\x5d\x2b\x6d\xa3\xa1\x3a\x19\x15\x42\x02\x4d\xfc\x7a\xfa\xf2\xf5\xab\x47\xd9\x37\x94\x66\x3b\x19\x7a\x0e\x59\xa6\xd6\x08\x3e\xa9\xcf\xd4\x1d\x30\x59\xc0\x92\x6b\x04\xb6\x61\xdb\xaf\x92\xb8\x70\x25\xd8\xe7\xa3\x76\xd9\x7a\xd7\x3f\x15\x6c\x3b\x97\xbf\x84\xba\xc9\x04\x37\x25\x30\x90\xb8\x81\xc4\x58\xad\xe4\x6a\xe1\x46\x73\xaa\x49\xdd\x2b\xd4\xca\xd8\xc7\xcc\x8f\x55\x86\x45\x71\x06\x00\x5f\xca\xfe\x9b\xcd\x26\xea\x34\xe9\x8c\x5f\xa2\xa8\x63\xba\xfe\x1a\xc9\xed\x36\xf6\x6e\xa4\x64\xfc\x35\x2f\xd2\xeb\xd7\xd7\xaf\x5e\x5d\xbf\xf8\xb7\xd7\x2f\x5f\x5e\xbf\x7e\xf1\xf2\x21\x64\x90\x50\x9f\x09\x0c\x9f\x46\x7f\xab\xa8\x6c\xed\x73\x68\x8f\x97\x2e\x77\xa3\x08\x5d\x50\x0d\xa2\x83\x7f\x18\x43\x8d\xa4\x44\x24\x64\xe2\x6c\x0e\xf1\x09\x28\x72\x30\x7a\x84\xb3\xcf\x84\x56\x07\x1f\x42\x8a\x6a\x2c\x49\xd8\x55\xf3\x5c\xc9\x1e\x4e\x97\x60\x78\x55\x8b\x2d\xe4\x7b\xab\x9f\xc7\xd5\x83\x46\xf9\x5d\x58\x1d\x9a\xcd\x83\xcc\x45\xff\x4a\x15\x48\x51\xdf\x34\x26\xc7\xda\xb5\x79\x29\x92\xfe\x69\xfb\x1b\x93\x96\x4b\xec\x22\x6e\x04\xdf\x49\xb1\x85\xc6\x20\x2c\x95\x86\x02\xb3\x66\xb5\x72\x69\x82\x86\x5a\xf3\x35\xb3\xd8\x85\x59\xd3\xa2\xa2\x07\xc5\xa0\xb2\xa1\x94\x47\x0c\x32\x90\xbf\xa9\x06\x72\x26\xc1\x6a\x96\xdf\x7a\x4f\x69\xb4\x26\x4f\xa9\xd1\x4b\xd3\x07\xfa\x0c\x85\xda\xb8\x25\x5e\xee\x25\x47\xe1\xa2\xbe\x41\x84\x52\x6d\xa0\x6a\x72\xe7\x90\x14\xd5\x9d\x10\x1b\xc6\x2d\x34\xd2\x72\xe1\xf5\x69\x1b\x2d\x29\x47\xc0\x83\x28\x7d\x52\xfb\x25\x58\x2d\xde\x97\x78\x26\x25\xea\xab\x36\xd0\xf8\xd6\x2f\x87\x5a\x2b\x8b\x39\x19\x14\xd8\x8a\x71\x69\xc8\x22\x2e\x0f\xc0\xea\x23\xaa\xba\xfe\xa9\x7d\xd8\xb7\x28\xdd\x74\x1c\xc3\x5f\x85\xca\x98\x80\x35\x21\x3d\x13\x94\xce\x29\x28\x15\x89\x3e\xd0\x96\xb1\xcc\x36\x06\xd4\xd2\x8d\x7a\xce\x69\xff\x9a\x69\xb2\x20\x56\xb5\x85\xb4\x6d\xb0\xd1\x98\x41\xbd\x6e\xdb\x86\xf4\x4a\x95\xfb\xc1\x7c\xaf\xf5\x14\x7e\xfe\x65\xfe\xa4\x65\xe5\xcf\xb8\x74\x90\x20\x7c\x7b\x91\x6d\xc9\x2c\xe4\x1a\x99\x45\x03\xb9\x50\xa6\xd1\x9e\xc3\x42\xab\x1a\x88\xcb\x8e\x52\x47\x99\x26\x6a\x77\x5a\x47\x64\x5c\x32\x53\x4e\xda\xfe\xa0\x46\x67\xa5\x7e\xae\x1b\xbf\x20\xd4\x8d\x89\x00\x4f\xa7\x73\xe0\x49\x47\x37\x12\x28\x57\xb6\x9c\x03\x7f\xfe\xbc\x5f\x7c\xc1\x97\x30\xee\x56\xfc\xcc\x7f\x89\xec\x5d\x44\xa7\x40\x9a\xc2\xf0\x34\x77\x60\x4b\xc7\xd4\x82\xe7\x38\xe6\x97\x70\x35\x99\x77\xb3\x99\x46\x76\xdb\xbd\xb5\x76\xf4\xff\xb9\xbf\xbb\xf9\xa1\x66\x9c\xf2\x0f\x74\xe3\x6b\x7f\x03\x0c\x56\xdc\x58\x68\xb4\x80\xd6\x87\xbd\x09\x7a\x83\xb8\x75\x43\xad\x9c\xe0\xb2\x7d\x68\x31\xd5\x89\xe0\xc9\x44\x06\x65\x31\xfe\x8f\x1f\xbf\xfb\x36\x32\x56\x73\xb9\xe2\xcb\xed\xf8\xbe\xd1\x62\x06\x4f\xc7\xc1\xbf\x34\x5a\x04\x93\x9f\xa7\xbf\x44\x6b\x26\x1a\xbc\x74\xf6\x9e\xb9\xbf\x27\xa7\x5c\x42\xfb\x38\x83\xc3\x03\x77\x93\xc9\xfc\x7c\x9f\x64\xd0\xd6\xd1\x68\xd0\x8e\x69\x61\x0f\xfc\x63\x1d\x31\xa8\xd0\x96\xca\xb9\xae\xc6\x5c\x49\x89\xb9\x85\xa6\x56\xb2\x55\x09\x08\x65\xcc\x1e\x88\xdd\x8a\xf4\x14\x14\xed\xfa\xd4\x05\xeb\xff\xc6\xec\x47\x95\xdf\xa2\x1d\x8f\xc7\x1b\x2e\x0b\xb5\x89\x84\xf2\x57\x6d\x44\x4e\xaa\x72\x25\x20\x4d\x53\x68\xa3\x68\x30\x81\xaf\x21\xd8\x18\x8a\xa7\x01\xcc\xe8\x91\x9e\x26\xf0\x1c\x8e\xb7\x97\x14\xef\x9f\x43\x10\xb3\x9a\x07\x13\xef\x0e\x9d\xe2\x95\xac\xd0\x18\xb6\xc2\x21\x83\xae\x32\xea\x41\x46\x72\x54\x66\x05\x29\x38\x03\xd5\x4c\x1b\xf4\x4b\x22\xaa\xc6\x3b\xb4\x11\x66\xdd\xb2\x34\x05\xd9\x08\xb1\x07\xa9\x77\x8a\x79\x07\xbf\x83\xe5\x91\x8f\x35\x5f\xa5\x29\x50\x69\x4a\x2a\x2e\xf6\x3b\xc9\xf8\xbe\x88\x9e\x44\x14\x17\xf6\x3b\x26\xf3\x21\x9a\x0f\xa8\x61\xf1\x7b\xe4\xb0\x38\xa6\x87\xc5\x03\x04\x5d\xcf\xe2\x31\x7a\xbe\xc7\x31\x20\xe7\x06\x1e\xa0\x26\x9b\x2a\x43\xfd\x18\x39\xdf\xb3\x68\xc9\x39\x55\xbf\x93\x76\xb0\xf7\x12\xae\x5e\x4d\x1e\xa0\x8e\x5a\xab\x07\x89\x4b\x65\xb7\xe3\x7b\xc1\xb6\x94\x33\xc1\xc8\xaa\xfa\xad\x6b\x31\x8c\x2e\x5d\xc4\x9d\x41\x4f\xe1\xd2\x35\x8f\x67\x30\x72\x6f\x34\xcf\x2b\x74\xbb\x5e\x4e\xa7\xd3\x4b\xe8\x3e\xbb\xfc\x89\x91\x13\xea\x06\x77\x0f\xf0\x63\x9a\x3c\xa7\xb8\xff\x39\x1c\xb5\x34\x7a\x9e\xda\xf7\xcf\xe0\xaa\x8f\x0d\x07\x6c\xc1\x1f\xfe\x00\x27\xb3\x87\x30\x8e\x63\xf8\x2f\x46\x65\xb8\x10\xae\x7b\xe0\x9a\x06\xfd\xfa\x8a\x1b\xe3\x8a\x71\x03\x85\x92\xd8\xee\xf9\xb4\x6b\xff\x84\xc7\x76\x19\x2c\x60\x7a\xcc\x20\x5d\x87\x83\xb0\x70\x26\x5a\x0c\xe8\x1e\x06\x82\x8b\xdd\xf0\xbc\x83\x9d\xbc\x42\xf8\x2a\x85\x20\x18\x6e\x3e\x59\x41\x0b\x7a\x62\x17\x06\xed\x7b\x6f\x8b\x71\x1b\x1d\xcf\xc5\xae\xc9\x25\xdc\x4c\xa7\xd3\xc9\x09\x13\xbb\xbd\x7a\xdf\xd4\x94\x36\x01\x93\x5b\x77\x25\xf6\xba\x75\x89\x23\xa5\x40\x74\xa5\x09\xc8\x95\x10\x3e\x67\x69\xb7\x92\x82\xdb\xe6\x49\x0a\xe1\xd5\xfc\x4c\x14\x1d\x68\x72\x20\xda\xb1\x79\xce\xe8\xfe\xd8\x44\x87\x3a\x3b\x5a\x1c\x5e\x1d\x18\xe5\xc0\x5e\xe7\x0d\x73\xd1\xf3\xcd\xf7\x1a\x3d\x32\xd7\xde\x5e\xc7\x3a\x1b\xf0\xef\xe9\x3c\xbf\xfa\x48\x31\xfa\xe9\xba\x31\xe5\xf8\x88\xd1\xc9\xfc\xd4\x36\xef\x2c\x6a\xca\x92\x15\x85\x2c\xb2\x05\x95\x02\x1a\x4f\x4c\xe2\x52\x75\x8d\xa1\x46\x59\xa0\xee\x52\x0a\x9f\xd9\x53\x02\x78\x60\x32\x5f\x55\x0e\xe1\x34\x90\xe8\x44\xb7\x73\xe0\xb0\xa0\x34\x0f\x78\x18\x0e\x64\x71\x79\x99\x92\x08\x00\x70\xe4\x09\x0e\xad\x07\x70\xa5\xc5\x28\x58\x6d\xb0\x80\x14\xfc\xa7\xf0\xf1\x24\x6a\x24\xbf\x1b\x4f\xc2\xf6\xfd\x98\x46\x37\x3f\xef\x6b\xc5\x8e\xf7\xe7\x29\x04\x89\xd5\xc0\x8b\x74\x14\xc0\xf3\x73\x7e\x48\xa1\x77\xb4\xd8\x73\x30\xdc\x0a\x90\xd8\x62\xe1\x9a\xa1\xbe\x68\xfb\x7b\x90\xb1\xfc\x76\xe5\xaa\xa1\x19\xe5\x5b\xe3\x13\xb2\x6c\xcd\x2c\xd3\x8e\xea\x64\x0e\xfb\xe5\x6d\xb5\x98\x93\x85\xe6\xe0\xcb\x52\xd7\x73\x85\xfe\x3b\x85\x7b\xcb\x94\x2e\x50\x87\x9a\x15\xbc\x31\x33\x78\x51\xdf\xcd\xff\xde\x7d\xc7\x71\x9d\xe1\x47\x59\xad\x35\x2e\x4e\x38\x6a\x5b\x8d\xcf\x21\x48\x62\x5a\xf0\x7b\x64\x7a\x61\x87\x9f\xe0\xe1\x4c\xff\x1b\xfa\x0f\xe4\xed\x78\xc5\x8b\x42\x20\x31\xbc\x27\x4f\x1e\x49\xf6\x1f\xfa\xd5\xe1\x91\xd0\x36\xbe\xf7\x7b\x76\x80\xc2\xe0\x23\x1b\xfa\x1e\xfa\x88\x00\x10\x92\xc8\xdc\xe9\xbc\xad\xb8\xdd\xb0\x1e\x39\x5d\xb4\x3f\xa8\x28\x1a\xed\x12\xae\x71\xd8\x02\xec\x12\x46\x86\x12\xc0\xc2\x8c\x26\x51\xd9\x54\x4c\xf2\xdf\x70\x4c\xc1\x69\xe2\x75\xe5\x9a\xf2\xc1\xe9\xbd\x7c\xc2\xcc\xbe\x5b\x3e\xea\x02\xdd\xa8\x55\xe2\xa8\xb3\xee\x8b\x7d\x81\x3f\x83\xe9\x7c\xf4\x89\x1a\x3a\x7f\x4a\x98\x31\x0d\xc3\x97\xb0\x8b\xc0\xa0\x15\x9d\xde\xcd\x65\x4c\x8f\x7c\x3b\xc3\x25\xe9\x52\x6d\xd2\xd1\xcd\xb4\x67\xd2\x1b\xda\xd9\x79\xd4\x62\xed\xc4\x18\xc4\x65\xe7\x9a\x0b\xb8\x99\x7e\x09\x6e\x7d\x4b\xe4\x48\x02\xab\x79\x8d\x05\xb0\xdc\xf2\x35\xfe\x3f\x08\xf2\x05\x94\xfc\xc9\x2c\x12\x0e\x3b\xe5\x39\x98\x1e\xf0\x4b\xb3\xbd\x6e\xff\x95\xfc\x0d\x62\xa7\xe1\xe7\x10\x9c\x15\xe4\x41\x24\x1e\x2d\x3c\x72\xed\x87\xfd\xde\x7d\x65\x0a\x8e\x03\x0b\xa5\xbc\xfd\x17\xd2\x49\x54\xda\x4a\x8c\x83\xc4\xba\x9f\xca\x10\xcf\x3d\x05\x47\xc0\x0f\x1f\xe6\x75\xbb\xc3\x6a\x86\x8a\x78\x3c\x2a\xb6\x60\x90\xa1\xf4\x05\x59\x97\x8e\xc0\x6e\xff\x8b\xa2\x38\x86\x1f\x2d\xd3\x16\x18\xfc\xf4\x0e\x9a\xba\x60\xd6\x7f\xcf\xa1\x20\xe9\xbf\x97\x74\x3f\x39\xca\x98\x36\xb0\x54\x7a\xc3\x74\xd1\x36\x69\x6c\x89\x5b\xf7\x3d\xa7\xcb\xff\x0c\xda\x77\x74\x8b\xad\x99\x18\x9f\x14\x7f\x4f\xc7\xa3\x68\x68\xf2\xd1\x24\x42\x96\x97\xa7\x0b\x5d\xc4\xea\xcf\x4d\xe1\x5b\x57\x07\x8c\x9f\x8e\x6d\xc9\xcd\x24\x62\xd6\xea\xf1\xe8\x00\x0c\xa3\x09\xd9\xf5\x6a\x50\x97\xf5\xdb\x93\x03\xb7\x7a\x8c\xc6\x3e\xa3\xee\xb3\x81\x6e\x79\x6e\xcc\xd8\xe3\x6a\x74\x39\xa0\x7d\x08\xab\xd1\xb3\x51\x6f\xa8\xbd\x7b\xef\xe5\x48\xcf\x72\x72\x40\x7a\x44\x5e\x36\x3a\x39\x9e\x15\xc5\x5b\xf2\x9f\x71\x70\xc6\xd3\x8f\xd1\x31\xe9\x95\xed\xef\xeb\x47\xb5\xec\x7f\x9b\xf1\x80\x8a\x79\x31\x9a\x44\xa6\xc9\x7c\x83\x62\xfc\xb2\xaf\xc2\xba\x65\x0e\xbc\xc7\xa1\xe0\x24\xa1\xa0\x23\x0e\x93\x8a\xf0\x28\x09\x79\x24\x6a\xb4\x47\x7a\xa9\x76\x97\xa4\xf0\xe9\xa4\xef\x6f\x7d\x63\x28\xc3\xf2\xfd\xff\x0d\x66\xc6\xb5\x13\xa0\xc5\xbb\x6b\xe9\xf8\xd6\xcd\x9b\xef\xdf\x0d\xda\x37\xbd\x47\x8c\x1d\xf5\xfe\xd7\x80\xe7\x9a\x25\x67\x7f\x7e\xb8\xd9\x6c\xa2\x95\x52\x2b\xe1\x7f\x78\xd8\x77\x53\x62\x56\xf3\xe8\x83\x09\x80\x99\xad\xcc\xa1\xc0\x25\xea\xc5\x80\x7c\xdb\x62\x49\x62\xff\xc3\xb8\x24\xf6\xbf\xfd\xfd\xbf\x00\x00\x00\xff\xff\xb2\x1e\x6f\x68\x0c\x2c\x00\x00") func faucetHtmlBytes() ([]byte, error) { return bindataRead( @@ -85,7 +85,7 @@ func faucetHtml() (*asset, error) { } info := bindataFileInfo{name: "faucet.html", size: 0, mode: os.FileMode(0), modTime: time.Unix(0, 0)} - a := &asset{bytes: bytes, info: info, digest: [32]uint8{0xdb, 0xa2, 0x98, 0x44, 0x4b, 0x50, 0xf8, 0xa1, 0xac, 0x4a, 0x76, 0x2e, 0xcc, 0x3d, 0xcb, 0x81, 0x9e, 0x2a, 0xaa, 0x87, 0xf5, 0x9d, 0x53, 0x4, 0x8a, 0xdd, 0x5a, 0xfe, 0xd3, 0xc3, 0xf, 0x11}} + a := &asset{bytes: bytes, info: info, digest: [32]uint8{0xc5, 0x8d, 0xb, 0x7a, 0xfd, 0x70, 0x68, 0x68, 0xd2, 0xd8, 0xf3, 0xf6, 0xac, 0x72, 0xed, 0xc2, 0x76, 0x18, 0x2d, 0x1, 0xe5, 0x3b, 0x55, 0xb, 0xce, 0xfc, 0xb6, 0xd5, 0x59, 0xc3, 0x94, 0x5b}} return a, nil } diff --git a/cmd/geth/accountcmd_test.go b/cmd/geth/accountcmd_test.go index 6213e5195..9455eeda3 100644 --- a/cmd/geth/accountcmd_test.go +++ b/cmd/geth/accountcmd_test.go @@ -102,6 +102,7 @@ func TestAccountImport(t *testing.T) { }, } for _, test := range tests { + test := test t.Run(test.name, func(t *testing.T) { t.Parallel() importAccountWithExpect(t, test.key, test.output) @@ -178,11 +179,8 @@ Fatal: could not decrypt key with given password } func TestUnlockFlag(t *testing.T) { - datadir := tmpDatadirWithKeystore(t) - geth := runGeth(t, - "--datadir", datadir, "--nat", "none", "--nodiscover", "--maxpeers", "0", "--port", "0", - "--unlock", "f466859ead1932d743d622cb74fc058882e8648a", - "js", "testdata/empty.js") + geth := runMinimalGeth(t, "--port", "0", "--ipcdisable", "--datadir", tmpDatadirWithKeystore(t), + "--unlock", "f466859ead1932d743d622cb74fc058882e8648a", "js", "testdata/empty.js") geth.Expect(` Unlocking account f466859ead1932d743d622cb74fc058882e8648a | Attempt 1/3 !! Unsupported terminal, password will be echoed. @@ -202,10 +200,9 @@ Password: {{.InputLine "foobar"}} } func TestUnlockFlagWrongPassword(t *testing.T) { - datadir := tmpDatadirWithKeystore(t) - geth := runGeth(t, - "--datadir", datadir, "--nat", "none", "--nodiscover", "--maxpeers", "0", "--port", "0", - "--unlock", "f466859ead1932d743d622cb74fc058882e8648a") + geth := runMinimalGeth(t, "--port", "0", "--ipcdisable", "--datadir", tmpDatadirWithKeystore(t), + "--unlock", "f466859ead1932d743d622cb74fc058882e8648a", "js", "testdata/empty.js") + defer geth.ExpectExit() geth.Expect(` Unlocking account f466859ead1932d743d622cb74fc058882e8648a | Attempt 1/3 @@ -221,11 +218,9 @@ Fatal: Failed to unlock account f466859ead1932d743d622cb74fc058882e8648a (could // https://github.com/ethereum/go-ethereum/issues/1785 func TestUnlockFlagMultiIndex(t *testing.T) { - datadir := tmpDatadirWithKeystore(t) - geth := runGeth(t, - "--datadir", datadir, "--nat", "none", "--nodiscover", "--maxpeers", "0", "--port", "0", - "--unlock", "0,2", - "js", "testdata/empty.js") + geth := runMinimalGeth(t, "--port", "0", "--ipcdisable", "--datadir", tmpDatadirWithKeystore(t), + "--unlock", "f466859ead1932d743d622cb74fc058882e8648a", "--unlock", "0,2", "js", "testdata/empty.js") + geth.Expect(` Unlocking account 0 | Attempt 1/3 !! Unsupported terminal, password will be echoed. @@ -248,11 +243,9 @@ Password: {{.InputLine "foobar"}} } func TestUnlockFlagPasswordFile(t *testing.T) { - datadir := tmpDatadirWithKeystore(t) - geth := runGeth(t, - "--datadir", datadir, "--nat", "none", "--nodiscover", "--maxpeers", "0", "--port", "0", - "--password", "testdata/passwords.txt", "--unlock", "0,2", - "js", "testdata/empty.js") + geth := runMinimalGeth(t, "--port", "0", "--ipcdisable", "--datadir", tmpDatadirWithKeystore(t), + "--unlock", "f466859ead1932d743d622cb74fc058882e8648a", "--password", "testdata/passwords.txt", "--unlock", "0,2", "js", "testdata/empty.js") + geth.ExpectExit() wantMessages := []string{ @@ -268,10 +261,9 @@ func TestUnlockFlagPasswordFile(t *testing.T) { } func TestUnlockFlagPasswordFileWrongPassword(t *testing.T) { - datadir := tmpDatadirWithKeystore(t) - geth := runGeth(t, - "--datadir", datadir, "--nat", "none", "--nodiscover", "--maxpeers", "0", "--port", "0", - "--password", "testdata/wrong-passwords.txt", "--unlock", "0,2") + geth := runMinimalGeth(t, "--port", "0", "--ipcdisable", "--datadir", tmpDatadirWithKeystore(t), + "--unlock", "f466859ead1932d743d622cb74fc058882e8648a", "--password", + "testdata/wrong-passwords.txt", "--unlock", "0,2") defer geth.ExpectExit() geth.Expect(` Fatal: Failed to unlock account 0 (could not decrypt key with given password) @@ -280,9 +272,9 @@ Fatal: Failed to unlock account 0 (could not decrypt key with given password) func TestUnlockFlagAmbiguous(t *testing.T) { store := filepath.Join("..", "..", "accounts", "keystore", "testdata", "dupes") - geth := runGeth(t, - "--keystore", store, "--nat", "none", "--nodiscover", "--maxpeers", "0", "--port", "0", - "--unlock", "f466859ead1932d743d622cb74fc058882e8648a", + geth := runMinimalGeth(t, "--port", "0", "--ipcdisable", "--datadir", tmpDatadirWithKeystore(t), + "--unlock", "f466859ead1932d743d622cb74fc058882e8648a", "--keystore", + store, "--unlock", "f466859ead1932d743d622cb74fc058882e8648a", "js", "testdata/empty.js") defer geth.ExpectExit() @@ -318,9 +310,10 @@ In order to avoid this warning, you need to remove the following duplicate key f func TestUnlockFlagAmbiguousWrongPassword(t *testing.T) { store := filepath.Join("..", "..", "accounts", "keystore", "testdata", "dupes") - geth := runGeth(t, - "--keystore", store, "--nat", "none", "--nodiscover", "--maxpeers", "0", "--port", "0", - "--unlock", "f466859ead1932d743d622cb74fc058882e8648a") + geth := runMinimalGeth(t, "--port", "0", "--ipcdisable", "--datadir", tmpDatadirWithKeystore(t), + "--unlock", "f466859ead1932d743d622cb74fc058882e8648a", "--keystore", + store, "--unlock", "f466859ead1932d743d622cb74fc058882e8648a") + defer geth.ExpectExit() // Helper for the expect template, returns absolute keystore path. diff --git a/cmd/geth/chaincmd.go b/cmd/geth/chaincmd.go index e3a53d44a..e30f661be 100644 --- a/cmd/geth/chaincmd.go +++ b/cmd/geth/chaincmd.go @@ -18,9 +18,9 @@ package main import ( "encoding/json" + "errors" "fmt" "os" - "path/filepath" "runtime" "strconv" "sync/atomic" @@ -28,16 +28,16 @@ import ( "github.com/ethereum/go-ethereum/cmd/utils" "github.com/ethereum/go-ethereum/common" - "github.com/ethereum/go-ethereum/console/prompt" + "github.com/ethereum/go-ethereum/common/hexutil" "github.com/ethereum/go-ethereum/core" "github.com/ethereum/go-ethereum/core/rawdb" "github.com/ethereum/go-ethereum/core/state" "github.com/ethereum/go-ethereum/core/types" - "github.com/ethereum/go-ethereum/eth/downloader" - "github.com/ethereum/go-ethereum/event" + "github.com/ethereum/go-ethereum/crypto" + "github.com/ethereum/go-ethereum/ethdb" "github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/metrics" - "github.com/ethereum/go-ethereum/trie" + "github.com/ethereum/go-ethereum/node" "gopkg.in/urfave/cli.v1" ) @@ -64,7 +64,8 @@ It expects the genesis file as argument.`, Usage: "Dumps genesis block JSON configuration to stdout", ArgsUsage: "", Flags: []cli.Flag{ - utils.DataDirFlag, + utils.MainnetFlag, + utils.TestnetFlag, }, Category: "BLOCKCHAIN COMMANDS", Description: ` @@ -88,11 +89,15 @@ The dumpgenesis command dumps the genesis block configuration in JSON format to utils.MetricsHTTPFlag, utils.MetricsPortFlag, utils.MetricsEnableInfluxDBFlag, + utils.MetricsEnableInfluxDBV2Flag, utils.MetricsInfluxDBEndpointFlag, utils.MetricsInfluxDBDatabaseFlag, utils.MetricsInfluxDBUsernameFlag, utils.MetricsInfluxDBPasswordFlag, utils.MetricsInfluxDBTagsFlag, + utils.MetricsInfluxDBTokenFlag, + utils.MetricsInfluxDBBucketFlag, + utils.MetricsInfluxDBOrganizationFlag, utils.TxLookupLimitFlag, }, Category: "BLOCKCHAIN COMMANDS", @@ -148,68 +153,26 @@ be gzipped.`, Category: "BLOCKCHAIN COMMANDS", Description: ` The export-preimages command export hash preimages to an RLP encoded stream`, - } - copydbCommand = cli.Command{ - Action: utils.MigrateFlags(copyDb), - Name: "copydb", - Usage: "Create a local chain from a target chaindata folder", - ArgsUsage: "", - Flags: []cli.Flag{ - utils.DataDirFlag, - utils.CacheFlag, - utils.SyncModeFlag, - utils.FakePoWFlag, - utils.TxLookupLimitFlag, - utils.TestnetFlag, - }, - Category: "BLOCKCHAIN COMMANDS", - Description: ` -The first argument must be the directory containing the blockchain to download from`, - } - removedbCommand = cli.Command{ - Action: utils.MigrateFlags(removeDB), - Name: "removedb", - Usage: "Remove blockchain and state databases", - ArgsUsage: " ", - Flags: []cli.Flag{ - utils.DataDirFlag, - }, - Category: "BLOCKCHAIN COMMANDS", - Description: ` -Remove blockchain and state databases`, } dumpCommand = cli.Command{ Action: utils.MigrateFlags(dump), Name: "dump", Usage: "Dump a specific block from storage", - ArgsUsage: "[ | ]...", + ArgsUsage: "[? | ]", Flags: []cli.Flag{ utils.DataDirFlag, utils.CacheFlag, - utils.SyncModeFlag, utils.IterativeOutputFlag, utils.ExcludeCodeFlag, utils.ExcludeStorageFlag, utils.IncludeIncompletesFlag, + utils.StartKeyFlag, + utils.DumpLimitFlag, }, Category: "BLOCKCHAIN COMMANDS", Description: ` -The arguments are interpreted as block numbers or hashes. -Use "ethereum dump 0" to dump the genesis block.`, - } - inspectCommand = cli.Command{ - Action: utils.MigrateFlags(inspect), - Name: "inspect", - Usage: "Inspect the storage size for each type of data in the database", - ArgsUsage: " ", - Flags: []cli.Flag{ - utils.DataDirFlag, - utils.AncientFlag, - utils.CacheFlag, - utils.TestnetFlag, - utils.SyncModeFlag, - }, - Category: "BLOCKCHAIN COMMANDS", +This command dumps out the state for a given block (or latest, if none provided). +`, } ) @@ -236,7 +199,7 @@ func initGenesis(ctx *cli.Context) error { defer stack.Close() for _, name := range []string{"chaindata", "lightchaindata"} { - chaindb, err := stack.OpenDatabase(name, 0, 0, "") + chaindb, err := stack.OpenDatabase(name, 0, 0, "", false) if err != nil { utils.Fatalf("Failed to open database: %v", err) } @@ -251,6 +214,7 @@ func initGenesis(ctx *cli.Context) error { } func dumpGenesis(ctx *cli.Context) error { + // TODO(rjl493456442) support loading from the custom datadir genesis := utils.MakeGenesis(ctx) if genesis == nil { genesis = core.DefaultGenesisBlock() @@ -273,7 +237,7 @@ func importChain(ctx *cli.Context) error { stack, _ := makeConfigNode(ctx) defer stack.Close() - chain, db := utils.MakeChain(ctx, stack, false) + chain, db := utils.MakeChain(ctx, stack) defer db.Close() // Start periodically gathering memory profiles @@ -313,17 +277,7 @@ func importChain(ctx *cli.Context) error { fmt.Printf("Import done in %v.\n\n", time.Since(start)) // Output pre-compaction stats mostly to see the import trashing - stats, err := db.Stat("leveldb.stats") - if err != nil { - utils.Fatalf("Failed to read database stats: %v", err) - } - fmt.Println(stats) - - ioStats, err := db.Stat("leveldb.iostats") - if err != nil { - utils.Fatalf("Failed to read database iostats: %v", err) - } - fmt.Println(ioStats) + showLeveldbStats(db) // Print the memory statistics used by the importing mem := new(runtime.MemStats) @@ -341,22 +295,12 @@ func importChain(ctx *cli.Context) error { // Compact the entire database to more accurately measure disk io and print the stats start = time.Now() fmt.Println("Compacting entire database...") - if err = db.Compact(nil, nil); err != nil { + if err := db.Compact(nil, nil); err != nil { utils.Fatalf("Compaction failed: %v", err) } fmt.Printf("Compaction done in %v.\n\n", time.Since(start)) - stats, err = db.Stat("leveldb.stats") - if err != nil { - utils.Fatalf("Failed to read database stats: %v", err) - } - fmt.Println(stats) - - ioStats, err = db.Stat("leveldb.iostats") - if err != nil { - utils.Fatalf("Failed to read database iostats: %v", err) - } - fmt.Println(ioStats) + showLeveldbStats(db) return importErr } @@ -368,7 +312,7 @@ func exportChain(ctx *cli.Context) error { stack, _ := makeConfigNode(ctx) defer stack.Close() - chain, _ := utils.MakeChain(ctx, stack, true) + chain, _ := utils.MakeChain(ctx, stack) start := time.Now() var err error @@ -385,6 +329,9 @@ func exportChain(ctx *cli.Context) error { if first < 0 || last < 0 { utils.Fatalf("Export error: block number must be greater than 0\n") } + if head := chain.CurrentFastBlock(); uint64(last) > head.NumberU64() { + utils.Fatalf("Export error: block number %d larger than head block %d\n", uint64(last), head.NumberU64()) + } err = utils.ExportAppendChain(chain, fp, uint64(first), uint64(last)) } @@ -404,7 +351,7 @@ func importPreimages(ctx *cli.Context) error { stack, _ := makeConfigNode(ctx) defer stack.Close() - db := utils.MakeChainDatabase(ctx, stack) + db := utils.MakeChainDatabase(ctx, stack, false) start := time.Now() if err := utils.ImportPreimages(db, ctx.Args().First()); err != nil { @@ -423,7 +370,7 @@ func exportPreimages(ctx *cli.Context) error { stack, _ := makeConfigNode(ctx) defer stack.Close() - db := utils.MakeChainDatabase(ctx, stack) + db := utils.MakeChainDatabase(ctx, stack, true) start := time.Now() if err := utils.ExportPreimages(db, ctx.Args().First()); err != nil { @@ -433,171 +380,89 @@ func exportPreimages(ctx *cli.Context) error { return nil } -func copyDb(ctx *cli.Context) error { - // Ensure we have a source chain directory to copy - if len(ctx.Args()) < 1 { - utils.Fatalf("Source chaindata directory path argument missing") - } - if len(ctx.Args()) < 2 { - utils.Fatalf("Source ancient chain directory path argument missing") +func parseDumpConfig(ctx *cli.Context, stack *node.Node) (*state.DumpConfig, ethdb.Database, common.Hash, error) { + db := utils.MakeChainDatabase(ctx, stack, true) + var header *types.Header + if ctx.NArg() > 1 { + return nil, nil, common.Hash{}, fmt.Errorf("expected 1 argument (number or hash), got %d", ctx.NArg()) } - // Initialize a new chain for the running node to sync into + if ctx.NArg() == 1 { + arg := ctx.Args().First() + if hashish(arg) { + hash := common.HexToHash(arg) + if number := rawdb.ReadHeaderNumber(db, hash); number != nil { + header = rawdb.ReadHeader(db, hash, *number) + } else { + return nil, nil, common.Hash{}, fmt.Errorf("block %x not found", hash) + } + } else { + number, err := strconv.Atoi(arg) + if err != nil { + return nil, nil, common.Hash{}, err + } + if hash := rawdb.ReadCanonicalHash(db, uint64(number)); hash != (common.Hash{}) { + header = rawdb.ReadHeader(db, hash, uint64(number)) + } else { + return nil, nil, common.Hash{}, fmt.Errorf("header for block %d not found", number) + } + } + } else { + // Use latest + header = rawdb.ReadHeadHeader(db) + } + if header == nil { + return nil, nil, common.Hash{}, errors.New("no head block found") + } + startArg := common.FromHex(ctx.String(utils.StartKeyFlag.Name)) + var start common.Hash + switch len(startArg) { + case 0: // common.Hash + case 32: + start = common.BytesToHash(startArg) + case 20: + start = crypto.Keccak256Hash(startArg) + log.Info("Converting start-address to hash", "address", common.BytesToAddress(startArg), "hash", start.Hex()) + default: + return nil, nil, common.Hash{}, fmt.Errorf("invalid start argument: %x. 20 or 32 hex-encoded bytes required", startArg) + } + var conf = &state.DumpConfig{ + SkipCode: ctx.Bool(utils.ExcludeCodeFlag.Name), + SkipStorage: ctx.Bool(utils.ExcludeStorageFlag.Name), + OnlyWithAddresses: !ctx.Bool(utils.IncludeIncompletesFlag.Name), + Start: start.Bytes(), + Max: ctx.Uint64(utils.DumpLimitFlag.Name), + } + log.Info("State dump configured", "block", header.Number, "hash", header.Hash().Hex(), + "skipcode", conf.SkipCode, "skipstorage", conf.SkipStorage, + "start", hexutil.Encode(conf.Start), "limit", conf.Max) + return conf, db, header.Root, nil +} + +func dump(ctx *cli.Context) error { stack, _ := makeConfigNode(ctx) defer stack.Close() - chain, chainDb := utils.MakeChain(ctx, stack, false) - syncMode := *utils.GlobalTextMarshaler(ctx, utils.SyncModeFlag.Name).(*downloader.SyncMode) - - var syncBloom *trie.SyncBloom - if syncMode == downloader.FastSync { - syncBloom = trie.NewSyncBloom(uint64(ctx.GlobalInt(utils.CacheFlag.Name)/2), chainDb) - } - dl := downloader.New(0, chainDb, syncBloom, new(event.TypeMux), chain, nil, nil) - - // Create a source peer to satisfy downloader requests from - db, err := rawdb.NewLevelDBDatabaseWithFreezer(ctx.Args().First(), ctx.GlobalInt(utils.CacheFlag.Name)/2, 256, ctx.Args().Get(1), "") + conf, db, root, err := parseDumpConfig(ctx, stack) if err != nil { return err } - hc, err := core.NewHeaderChain(db, chain.Config(), chain.Engine(), func() bool { return false }) + state, err := state.New(root, state.NewDatabase(db), nil) if err != nil { return err } - peer := downloader.NewFakePeer("local", db, hc, dl) - if err = dl.RegisterPeer("local", 63, peer); err != nil { - return err - } - // Synchronise with the simulated peer - start := time.Now() - - currentHeader := hc.CurrentHeader() - if err = dl.Synchronise("local", currentHeader.Hash(), hc.GetTd(currentHeader.Hash(), currentHeader.Number.Uint64()), syncMode); err != nil { - return err - } - for dl.Synchronising() { - time.Sleep(10 * time.Millisecond) - } - fmt.Printf("Database copy done in %v\n", time.Since(start)) - - // Compact the entire database to remove any sync overhead - start = time.Now() - fmt.Println("Compacting entire database...") - if err = db.Compact(nil, nil); err != nil { - utils.Fatalf("Compaction failed: %v", err) - } - fmt.Printf("Compaction done in %v.\n\n", time.Since(start)) - return nil -} - -func removeDB(ctx *cli.Context) error { - stack, config := makeConfigNode(ctx) - - // Remove the full node state database - path := stack.ResolvePath("chaindata") - if common.FileExist(path) { - confirmAndRemoveDB(path, "full node state database") - } else { - log.Info("Full node state database missing", "path", path) - } - // Remove the full node ancient database - path = config.Eth.DatabaseFreezer - switch { - case path == "": - path = filepath.Join(stack.ResolvePath("chaindata"), "ancient") - case !filepath.IsAbs(path): - path = config.Node.ResolvePath(path) - } - if common.FileExist(path) { - confirmAndRemoveDB(path, "full node ancient database") + if ctx.Bool(utils.IterativeOutputFlag.Name) { + state.IterativeDump(conf, json.NewEncoder(os.Stdout)) } else { - log.Info("Full node ancient database missing", "path", path) - } - // Remove the light node database - path = stack.ResolvePath("lightchaindata") - if common.FileExist(path) { - confirmAndRemoveDB(path, "light node database") - } else { - log.Info("Light node database missing", "path", path) - } - return nil -} - -// confirmAndRemoveDB prompts the user for a last confirmation and removes the -// folder if accepted. -func confirmAndRemoveDB(database string, kind string) { - confirm, err := prompt.Stdin.PromptConfirm(fmt.Sprintf("Remove %s (%s)?", kind, database)) - switch { - case err != nil: - utils.Fatalf("%v", err) - case !confirm: - log.Info("Database deletion skipped", "path", database) - default: - start := time.Now() - filepath.Walk(database, func(path string, info os.FileInfo, err error) error { - // If we're at the top level folder, recurse into - if path == database { - return nil - } - // Delete all the files, but not subfolders - if !info.IsDir() { - os.Remove(path) - return nil - } - return filepath.SkipDir - }) - log.Info("Database successfully deleted", "path", database, "elapsed", common.PrettyDuration(time.Since(start))) - } -} - -func dump(ctx *cli.Context) error { - stack, _ := makeConfigNode(ctx) - defer stack.Close() - - chain, chainDb := utils.MakeChain(ctx, stack, true) - defer chainDb.Close() - for _, arg := range ctx.Args() { - var block *types.Block - if hashish(arg) { - block = chain.GetBlockByHash(common.HexToHash(arg)) - } else { - num, _ := strconv.Atoi(arg) - block = chain.GetBlockByNumber(uint64(num)) - } - if block == nil { - fmt.Println("{}") - utils.Fatalf("block not found") - } else { - state, err := state.New(block.Root(), state.NewDatabase(chainDb), nil) - if err != nil { - utils.Fatalf("could not create new state: %v", err) - } - excludeCode := ctx.Bool(utils.ExcludeCodeFlag.Name) - excludeStorage := ctx.Bool(utils.ExcludeStorageFlag.Name) - includeMissing := ctx.Bool(utils.IncludeIncompletesFlag.Name) - if ctx.Bool(utils.IterativeOutputFlag.Name) { - state.IterativeDump(excludeCode, excludeStorage, !includeMissing, json.NewEncoder(os.Stdout)) - } else { - if includeMissing { - fmt.Printf("If you want to include accounts with missing preimages, you need iterative output, since" + - " otherwise the accounts will overwrite each other in the resulting mapping.") - } - fmt.Printf("%v %s\n", includeMissing, state.Dump(excludeCode, excludeStorage, false)) - } + if conf.OnlyWithAddresses { + fmt.Fprintf(os.Stderr, "If you want to include accounts with missing preimages, you need iterative output, since"+ + " otherwise the accounts will overwrite each other in the resulting mapping.") + return fmt.Errorf("incompatible options") } + fmt.Println(string(state.Dump(conf))) } return nil } -func inspect(ctx *cli.Context) error { - node, _ := makeConfigNode(ctx) - defer node.Close() - - _, chainDb := utils.MakeChain(ctx, node, true) - defer chainDb.Close() - - return rawdb.InspectDatabase(chainDb) -} - // hashish returns true for strings that look like hashes. func hashish(x string) bool { _, err := strconv.Atoi(x) diff --git a/cmd/geth/config.go b/cmd/geth/config.go index 772dd63d4..01f651faf 100644 --- a/cmd/geth/config.go +++ b/cmd/geth/config.go @@ -20,19 +20,22 @@ import ( "bufio" "errors" "fmt" - "github.com/ethereum/go-ethereum/internal/debug" - "github.com/ethereum/go-ethereum/p2p/enode" + "math/big" "os" "reflect" "unicode" - cli "gopkg.in/urfave/cli.v1" + "gopkg.in/urfave/cli.v1" "github.com/ethereum/go-ethereum/cmd/utils" - "github.com/ethereum/go-ethereum/eth" + "github.com/ethereum/go-ethereum/eth/catalyst" + "github.com/ethereum/go-ethereum/eth/ethconfig" + "github.com/ethereum/go-ethereum/internal/debug" "github.com/ethereum/go-ethereum/internal/ethapi" "github.com/ethereum/go-ethereum/log" + "github.com/ethereum/go-ethereum/metrics" "github.com/ethereum/go-ethereum/node" + "github.com/ethereum/go-ethereum/p2p/enode" "github.com/ethereum/go-ethereum/params" "github.com/naoina/toml" ) @@ -43,7 +46,7 @@ var ( Name: "dumpconfig", Usage: "Show configuration values", ArgsUsage: "", - Flags: append(append(nodeFlags, rpcFlags...), whisperFlags...), + Flags: append(nodeFlags, rpcFlags...), Category: "MISCELLANEOUS COMMANDS", Description: `The dumpconfig command shows configuration values.`, } @@ -63,7 +66,12 @@ var tomlSettings = toml.Config{ return field }, MissingField: func(rt reflect.Type, field string) error { - link := "" + id := fmt.Sprintf("%s.%s", rt.String(), field) + if deprecated(id) { + log.Warn("Config field is deprecated and won't have an effect", "name", id) + return nil + } + var link string if unicode.IsUpper(rune(rt.Name()[0])) && rt.PkgPath() != "main" { link = fmt.Sprintf(", see https://godoc.org/%s#%s for available fields", rt.PkgPath(), rt.Name()) } @@ -75,21 +83,11 @@ type ethstatsConfig struct { URL string `toml:",omitempty"` } -// whisper has been deprecated, but clients out there might still have [Shh] -// in their config, which will crash. Cut them some slack by keeping the -// config, and displaying a message that those config switches are ineffectual. -// To be removed circa Q1 2021 -- @gballet. -type whisperDeprecatedConfig struct { - MaxMessageSize uint32 `toml:",omitempty"` - MinimumAcceptedPOW float64 `toml:",omitempty"` - RestrictConnectionBetweenLightClients bool `toml:",omitempty"` -} - type gethConfig struct { - Eth eth.Config - Shh whisperDeprecatedConfig + Eth ethconfig.Config Node node.Config Ethstats ethstatsConfig + Metrics metrics.Config } func loadConfig(file string, cfg *gethConfig) error { @@ -121,8 +119,9 @@ func defaultNodeConfig() node.Config { func makeConfigNode(ctx *cli.Context) (*node.Node, gethConfig) { // Load defaults. cfg := gethConfig{ - Eth: eth.DefaultConfig, - Node: defaultNodeConfig(), + Eth: ethconfig.Defaults, + Node: defaultNodeConfig(), + Metrics: metrics.DefaultConfig, } // Load config file. @@ -130,10 +129,6 @@ func makeConfigNode(ctx *cli.Context) (*node.Node, gethConfig) { if err := loadConfig(file, &cfg); err != nil { utils.Fatalf("%v", err) } - - if cfg.Shh != (whisperDeprecatedConfig{}) { - log.Warn("Deprecated whisper config detected. Whisper has been moved to github.com/ethereum/whisper") - } } // Apply flags. @@ -146,28 +141,30 @@ func makeConfigNode(ctx *cli.Context) (*node.Node, gethConfig) { if ctx.GlobalIsSet(utils.EthStatsURLFlag.Name) { cfg.Ethstats.URL = ctx.GlobalString(utils.EthStatsURLFlag.Name) } - utils.SetShhConfig(ctx, stack) + applyMetricConfig(ctx, &cfg) return stack, cfg } -// enableWhisper returns true in case one of the whisper flags is set. -func checkWhisper(ctx *cli.Context) { - for _, flag := range whisperFlags { - if ctx.GlobalIsSet(flag.GetName()) { - log.Warn("deprecated whisper flag detected. Whisper has been moved to github.com/ethereum/whisper") - } - } -} - // makeFullNode loads geth configuration and creates the Ethereum backend. func makeFullNode(ctx *cli.Context) (*node.Node, ethapi.Backend) { stack, cfg := makeConfigNode(ctx) + if ctx.GlobalIsSet(utils.OverrideLondonFlag.Name) { + cfg.Eth.OverrideLondon = new(big.Int).SetUint64(ctx.GlobalUint64(utils.OverrideLondonFlag.Name)) + } + backend, eth := utils.RegisterEthService(stack, &cfg.Eth) debug.ID = enode.PubkeyToIDV4(&cfg.Node.NodeKey().PublicKey).TerminalString() - backend := utils.RegisterEthService(stack, &cfg.Eth) + // Configure catalyst. + if ctx.GlobalBool(utils.CatalystFlag.Name) { + if eth == nil { + utils.Fatalf("Catalyst does not work in light client mode.") + } + if err := catalyst.Register(stack, eth); err != nil { + utils.Fatalf("%v", err) + } + } - checkWhisper(ctx) // Configure GraphQL if requested if ctx.GlobalIsSet(utils.GraphQLEnabledFlag.Name) { utils.RegisterGraphQLService(stack, backend, cfg.Node) @@ -207,3 +204,59 @@ func dumpConfig(ctx *cli.Context) error { return nil } + +func applyMetricConfig(ctx *cli.Context, cfg *gethConfig) { + if ctx.GlobalIsSet(utils.MetricsEnabledFlag.Name) { + cfg.Metrics.Enabled = ctx.GlobalBool(utils.MetricsEnabledFlag.Name) + } + if ctx.GlobalIsSet(utils.MetricsEnabledExpensiveFlag.Name) { + cfg.Metrics.EnabledExpensive = ctx.GlobalBool(utils.MetricsEnabledExpensiveFlag.Name) + } + if ctx.GlobalIsSet(utils.MetricsHTTPFlag.Name) { + cfg.Metrics.HTTP = ctx.GlobalString(utils.MetricsHTTPFlag.Name) + } + if ctx.GlobalIsSet(utils.MetricsPortFlag.Name) { + cfg.Metrics.Port = ctx.GlobalInt(utils.MetricsPortFlag.Name) + } + if ctx.GlobalIsSet(utils.MetricsEnableInfluxDBFlag.Name) { + cfg.Metrics.EnableInfluxDB = ctx.GlobalBool(utils.MetricsEnableInfluxDBFlag.Name) + } + if ctx.GlobalIsSet(utils.MetricsInfluxDBEndpointFlag.Name) { + cfg.Metrics.InfluxDBEndpoint = ctx.GlobalString(utils.MetricsInfluxDBEndpointFlag.Name) + } + if ctx.GlobalIsSet(utils.MetricsInfluxDBDatabaseFlag.Name) { + cfg.Metrics.InfluxDBDatabase = ctx.GlobalString(utils.MetricsInfluxDBDatabaseFlag.Name) + } + if ctx.GlobalIsSet(utils.MetricsInfluxDBUsernameFlag.Name) { + cfg.Metrics.InfluxDBUsername = ctx.GlobalString(utils.MetricsInfluxDBUsernameFlag.Name) + } + if ctx.GlobalIsSet(utils.MetricsInfluxDBPasswordFlag.Name) { + cfg.Metrics.InfluxDBPassword = ctx.GlobalString(utils.MetricsInfluxDBPasswordFlag.Name) + } + if ctx.GlobalIsSet(utils.MetricsInfluxDBTagsFlag.Name) { + cfg.Metrics.InfluxDBTags = ctx.GlobalString(utils.MetricsInfluxDBTagsFlag.Name) + } + if ctx.GlobalIsSet(utils.MetricsEnableInfluxDBV2Flag.Name) { + cfg.Metrics.EnableInfluxDBV2 = ctx.GlobalBool(utils.MetricsEnableInfluxDBV2Flag.Name) + } + if ctx.GlobalIsSet(utils.MetricsInfluxDBTokenFlag.Name) { + cfg.Metrics.InfluxDBToken = ctx.GlobalString(utils.MetricsInfluxDBTokenFlag.Name) + } + if ctx.GlobalIsSet(utils.MetricsInfluxDBBucketFlag.Name) { + cfg.Metrics.InfluxDBBucket = ctx.GlobalString(utils.MetricsInfluxDBBucketFlag.Name) + } + if ctx.GlobalIsSet(utils.MetricsInfluxDBOrganizationFlag.Name) { + cfg.Metrics.InfluxDBOrganization = ctx.GlobalString(utils.MetricsInfluxDBOrganizationFlag.Name) + } +} + +func deprecated(field string) bool { + switch field { + case "ethconfig.Config.EVMInterpreter": + return true + case "ethconfig.Config.EWASMInterpreter": + return true + default: + return false + } +} diff --git a/cmd/geth/consolecmd.go b/cmd/geth/consolecmd.go index e936ddb0c..95218f17f 100644 --- a/cmd/geth/consolecmd.go +++ b/cmd/geth/consolecmd.go @@ -18,11 +18,8 @@ package main import ( "fmt" - "os" - "os/signal" "path/filepath" "strings" - "syscall" "github.com/ethereum/go-ethereum/cmd/utils" "github.com/ethereum/go-ethereum/console" @@ -38,12 +35,12 @@ var ( Action: utils.MigrateFlags(localConsole), Name: "console", Usage: "Start an interactive JavaScript environment", - Flags: append(append(append(nodeFlags, rpcFlags...), consoleFlags...), whisperFlags...), + Flags: append(append(nodeFlags, rpcFlags...), consoleFlags...), Category: "CONSOLE COMMANDS", Description: ` The Geth console is an interactive shell for the JavaScript runtime environment which exposes a node admin interface as well as the Ðapp JavaScript API. -See https://github.com/ethereum/go-ethereum/wiki/JavaScript-Console.`, +See https://geth.ethereum.org/docs/interface/javascript-console.`, } attachCommand = cli.Command{ @@ -56,7 +53,7 @@ See https://github.com/ethereum/go-ethereum/wiki/JavaScript-Console.`, Description: ` The Geth console is an interactive shell for the JavaScript runtime environment which exposes a node admin interface as well as the Ðapp JavaScript API. -See https://github.com/ethereum/go-ethereum/wiki/JavaScript-Console. +See https://geth.ethereum.org/docs/interface/javascript-console. This command allows to open a console on a running geth node.`, } @@ -69,7 +66,7 @@ This command allows to open a console on a running geth node.`, Category: "CONSOLE COMMANDS", Description: ` The JavaScript VM exposes a node admin interface as well as the Ðapp -JavaScript API. See https://github.com/ethereum/go-ethereum/wiki/JavaScript-Console`, +JavaScript API. See https://geth.ethereum.org/docs/interface/javascript-console`, } ) @@ -160,7 +157,7 @@ func remoteConsole(ctx *cli.Context) error { // dialRPC returns a RPC client which connects to the given endpoint. // The check for empty endpoint implements the defaulting logic -// for "geth attach" and "geth monitor" with no argument. +// for "geth attach" with no argument. func dialRPC(endpoint string) (*rpc.Client, error) { if endpoint == "" { endpoint = node.DefaultIPCEndpoint(clientIdentifier) @@ -205,13 +202,10 @@ func ephemeralConsole(ctx *cli.Context) error { utils.Fatalf("Failed to execute %s: %v", file, err) } } - // Wait for pending callbacks, but stop for Ctrl-C. - abort := make(chan os.Signal, 1) - signal.Notify(abort, syscall.SIGINT, syscall.SIGTERM) go func() { - <-abort - os.Exit(0) + stack.Wait() + console.Stop(false) }() console.Stop(true) diff --git a/cmd/geth/consolecmd_test.go b/cmd/geth/consolecmd_test.go index 6c100e18d..c3f41b187 100644 --- a/cmd/geth/consolecmd_test.go +++ b/cmd/geth/consolecmd_test.go @@ -35,16 +35,25 @@ const ( httpAPIs = "eth:1.0 net:1.0 rpc:1.0 web3:1.0" ) +// spawns geth with the given command line args, using a set of flags to minimise +// memory and disk IO. If the args don't set --datadir, the +// child g gets a temporary data directory. +func runMinimalGeth(t *testing.T, args ...string) *testgeth { + // --ropsten to make the 'writing genesis to disk' faster (no accounts) + // --networkid=1337 to avoid cache bump + // --syncmode=full to avoid allocating fast sync bloom + allArgs := []string{"--ropsten", "--networkid", "1337", "--syncmode=full", "--port", "0", + "--nat", "none", "--nodiscover", "--maxpeers", "0", "--cache", "64"} + return runGeth(t, append(allArgs, args...)...) +} + // Tests that a node embedded within a console can be started up properly and // then terminated by closing the input stream. func TestConsoleWelcome(t *testing.T) { coinbase := "0x8605cdbbdb6d264aa742e77020dcbc58fcdce182" // Start a geth console, make sure it's cleaned up and terminate the console - geth := runGeth(t, - "--port", "0", "--maxpeers", "0", "--nodiscover", "--nat", "none", - "--etherbase", coinbase, - "console") + geth := runMinimalGeth(t, "--miner.etherbase", coinbase, "console") // Gather all the infos the welcome message needs to contain geth.SetTemplateFunc("goos", func() string { return runtime.GOOS }) @@ -66,16 +75,20 @@ at block: 0 ({{niltime}}) datadir: {{.Datadir}} modules: {{apis}} +To exit, press ctrl-d > {{.InputLine "exit"}} `) geth.ExpectExit() } // Tests that a console can be attached to a running node via various means. -func TestIPCAttachWelcome(t *testing.T) { +func TestAttachWelcome(t *testing.T) { + var ( + ipc string + httpPort string + wsPort string + ) // Configure the instance for IPC attachment - coinbase := "0x8605cdbbdb6d264aa742e77020dcbc58fcdce182" - var ipc string if runtime.GOOS == "windows" { ipc = `\\.\pipe\geth` + strconv.Itoa(trulyRandInt(100000, 999999)) } else { @@ -83,51 +96,28 @@ func TestIPCAttachWelcome(t *testing.T) { defer os.RemoveAll(ws) ipc = filepath.Join(ws, "geth.ipc") } - geth := runGeth(t, - "--port", "0", "--maxpeers", "0", "--nodiscover", "--nat", "none", - "--etherbase", coinbase, "--ipcpath", ipc) - - defer func() { - geth.Interrupt() - geth.ExpectExit() - }() - - waitForEndpoint(t, ipc, 3*time.Second) - testAttachWelcome(t, geth, "ipc:"+ipc, ipcAPIs) - -} - -func TestHTTPAttachWelcome(t *testing.T) { - coinbase := "0x8605cdbbdb6d264aa742e77020dcbc58fcdce182" - port := strconv.Itoa(trulyRandInt(1024, 65536)) // Yeah, sometimes this will fail, sorry :P - geth := runGeth(t, - "--port", "0", "--maxpeers", "0", "--nodiscover", "--nat", "none", - "--etherbase", coinbase, "--http", "--http.port", port) - defer func() { - geth.Interrupt() - geth.ExpectExit() - }() - - endpoint := "http://127.0.0.1:" + port - waitForEndpoint(t, endpoint, 3*time.Second) - testAttachWelcome(t, geth, endpoint, httpAPIs) -} - -func TestWSAttachWelcome(t *testing.T) { - coinbase := "0x8605cdbbdb6d264aa742e77020dcbc58fcdce182" - port := strconv.Itoa(trulyRandInt(1024, 65536)) // Yeah, sometimes this will fail, sorry :P - - geth := runGeth(t, - "--port", "0", "--maxpeers", "0", "--nodiscover", "--nat", "none", - "--etherbase", coinbase, "--ws", "--ws.port", port) - defer func() { - geth.Interrupt() - geth.ExpectExit() - }() - - endpoint := "ws://127.0.0.1:" + port - waitForEndpoint(t, endpoint, 3*time.Second) - testAttachWelcome(t, geth, endpoint, httpAPIs) + // And HTTP + WS attachment + p := trulyRandInt(1024, 65533) // Yeah, sometimes this will fail, sorry :P + httpPort = strconv.Itoa(p) + wsPort = strconv.Itoa(p + 1) + geth := runMinimalGeth(t, "--miner.etherbase", "0x8605cdbbdb6d264aa742e77020dcbc58fcdce182", + "--ipcpath", ipc, + "--http", "--http.port", httpPort, + "--ws", "--ws.port", wsPort) + t.Run("ipc", func(t *testing.T) { + waitForEndpoint(t, ipc, 3*time.Second) + testAttachWelcome(t, geth, "ipc:"+ipc, ipcAPIs) + }) + t.Run("http", func(t *testing.T) { + endpoint := "http://127.0.0.1:" + httpPort + waitForEndpoint(t, endpoint, 3*time.Second) + testAttachWelcome(t, geth, endpoint, httpAPIs) + }) + t.Run("ws", func(t *testing.T) { + endpoint := "ws://127.0.0.1:" + wsPort + waitForEndpoint(t, endpoint, 3*time.Second) + testAttachWelcome(t, geth, endpoint, httpAPIs) + }) } func testAttachWelcome(t *testing.T, geth *testgeth, endpoint, apis string) { @@ -159,6 +149,7 @@ at block: 0 ({{niltime}}){{if ipc}} datadir: {{datadir}}{{end}} modules: {{apis}} +To exit, press ctrl-d > {{.InputLine "exit" }} `) attach.ExpectExit() diff --git a/cmd/geth/dao_test.go b/cmd/geth/dao_test.go index 6c36771e9..b7f26b365 100644 --- a/cmd/geth/dao_test.go +++ b/cmd/geth/dao_test.go @@ -115,15 +115,15 @@ func testDAOForkBlockNewChain(t *testing.T, test int, genesis string, expectBloc if err := ioutil.WriteFile(json, []byte(genesis), 0600); err != nil { t.Fatalf("test %d: failed to write genesis file: %v", test, err) } - runGeth(t, "--datadir", datadir, "init", json).WaitExit() + runGeth(t, "--datadir", datadir, "--networkid", "1337", "init", json).WaitExit() } else { // Force chain initialization - args := []string{"--port", "0", "--maxpeers", "0", "--nodiscover", "--nat", "none", "--ipcdisable", "--datadir", datadir} + args := []string{"--port", "0", "--networkid", "1337", "--maxpeers", "0", "--nodiscover", "--nat", "none", "--ipcdisable", "--datadir", datadir} runGeth(t, append(args, []string{"--exec", "2+2", "console"}...)...).WaitExit() } // Retrieve the DAO config flag from the database path := filepath.Join(datadir, "geth", "chaindata") - db, err := rawdb.NewLevelDBDatabase(path, 0, 0, "") + db, err := rawdb.NewLevelDBDatabase(path, 0, 0, "", false) if err != nil { t.Fatalf("test %d: failed to open test database: %v", test, err) } diff --git a/cmd/geth/dbcmd.go b/cmd/geth/dbcmd.go new file mode 100644 index 000000000..7c7b358fc --- /dev/null +++ b/cmd/geth/dbcmd.go @@ -0,0 +1,485 @@ +// Copyright 2020 The go-ethereum Authors +// This file is part of go-ethereum. +// +// go-ethereum is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// go-ethereum is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. +// +// You should have received a copy of the GNU General Public License +// along with go-ethereum. If not, see . + +package main + +import ( + "fmt" + "os" + "path/filepath" + "sort" + "strconv" + "time" + + "github.com/ethereum/go-ethereum/cmd/utils" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/common/hexutil" + "github.com/ethereum/go-ethereum/console/prompt" + "github.com/ethereum/go-ethereum/core/rawdb" + "github.com/ethereum/go-ethereum/ethdb" + "github.com/ethereum/go-ethereum/log" + "github.com/ethereum/go-ethereum/trie" + "gopkg.in/urfave/cli.v1" +) + +var ( + removedbCommand = cli.Command{ + Action: utils.MigrateFlags(removeDB), + Name: "removedb", + Usage: "Remove blockchain and state databases", + ArgsUsage: "", + Flags: []cli.Flag{ + utils.DataDirFlag, + }, + Category: "DATABASE COMMANDS", + Description: ` +Remove blockchain and state databases`, + } + dbCommand = cli.Command{ + Name: "db", + Usage: "Low level database operations", + ArgsUsage: "", + Category: "DATABASE COMMANDS", + Subcommands: []cli.Command{ + dbInspectCmd, + dbStatCmd, + dbCompactCmd, + dbGetCmd, + dbDeleteCmd, + dbPutCmd, + dbGetSlotsCmd, + dbDumpFreezerIndex, + }, + } + dbInspectCmd = cli.Command{ + Action: utils.MigrateFlags(inspect), + Name: "inspect", + ArgsUsage: " ", + Flags: []cli.Flag{ + utils.DataDirFlag, + utils.SyncModeFlag, + utils.MainnetFlag, + utils.TestnetFlag, + }, + Usage: "Inspect the storage size for each type of data in the database", + Description: `This commands iterates the entire database. If the optional 'prefix' and 'start' arguments are provided, then the iteration is limited to the given subset of data.`, + } + dbStatCmd = cli.Command{ + Action: utils.MigrateFlags(dbStats), + Name: "stats", + Usage: "Print leveldb statistics", + Flags: []cli.Flag{ + utils.DataDirFlag, + utils.SyncModeFlag, + utils.MainnetFlag, + utils.TestnetFlag, + }, + } + dbCompactCmd = cli.Command{ + Action: utils.MigrateFlags(dbCompact), + Name: "compact", + Usage: "Compact leveldb database. WARNING: May take a very long time", + Flags: []cli.Flag{ + utils.DataDirFlag, + utils.SyncModeFlag, + utils.MainnetFlag, + utils.TestnetFlag, + utils.CacheFlag, + utils.CacheDatabaseFlag, + }, + Description: `This command performs a database compaction. +WARNING: This operation may take a very long time to finish, and may cause database +corruption if it is aborted during execution'!`, + } + dbGetCmd = cli.Command{ + Action: utils.MigrateFlags(dbGet), + Name: "get", + Usage: "Show the value of a database key", + ArgsUsage: "", + Flags: []cli.Flag{ + utils.DataDirFlag, + utils.SyncModeFlag, + utils.MainnetFlag, + utils.TestnetFlag, + }, + Description: "This command looks up the specified database key from the database.", + } + dbDeleteCmd = cli.Command{ + Action: utils.MigrateFlags(dbDelete), + Name: "delete", + Usage: "Delete a database key (WARNING: may corrupt your database)", + ArgsUsage: "", + Flags: []cli.Flag{ + utils.DataDirFlag, + utils.SyncModeFlag, + utils.MainnetFlag, + utils.TestnetFlag, + }, + Description: `This command deletes the specified database key from the database. +WARNING: This is a low-level operation which may cause database corruption!`, + } + dbPutCmd = cli.Command{ + Action: utils.MigrateFlags(dbPut), + Name: "put", + Usage: "Set the value of a database key (WARNING: may corrupt your database)", + ArgsUsage: " ", + Flags: []cli.Flag{ + utils.DataDirFlag, + utils.SyncModeFlag, + utils.MainnetFlag, + utils.TestnetFlag, + }, + Description: `This command sets a given database key to the given value. +WARNING: This is a low-level operation which may cause database corruption!`, + } + dbGetSlotsCmd = cli.Command{ + Action: utils.MigrateFlags(dbDumpTrie), + Name: "dumptrie", + Usage: "Show the storage key/values of a given storage trie", + ArgsUsage: " ", + Flags: []cli.Flag{ + utils.DataDirFlag, + utils.SyncModeFlag, + utils.MainnetFlag, + utils.TestnetFlag, + }, + Description: "This command looks up the specified database key from the database.", + } + dbDumpFreezerIndex = cli.Command{ + Action: utils.MigrateFlags(freezerInspect), + Name: "freezer-index", + Usage: "Dump out the index of a given freezer type", + ArgsUsage: " ", + Flags: []cli.Flag{ + utils.DataDirFlag, + utils.SyncModeFlag, + utils.MainnetFlag, + utils.TestnetFlag, + }, + Description: "This command displays information about the freezer index.", + } +) + +func removeDB(ctx *cli.Context) error { + stack, config := makeConfigNode(ctx) + + // Remove the full node state database + path := stack.ResolvePath("chaindata") + if common.FileExist(path) { + confirmAndRemoveDB(path, "full node state database") + } else { + log.Info("Full node state database missing", "path", path) + } + // Remove the full node ancient database + path = config.Eth.DatabaseFreezer + switch { + case path == "": + path = filepath.Join(stack.ResolvePath("chaindata"), "ancient") + case !filepath.IsAbs(path): + path = config.Node.ResolvePath(path) + } + if common.FileExist(path) { + confirmAndRemoveDB(path, "full node ancient database") + } else { + log.Info("Full node ancient database missing", "path", path) + } + // Remove the light node database + path = stack.ResolvePath("lightchaindata") + if common.FileExist(path) { + confirmAndRemoveDB(path, "light node database") + } else { + log.Info("Light node database missing", "path", path) + } + return nil +} + +// confirmAndRemoveDB prompts the user for a last confirmation and removes the +// folder if accepted. +func confirmAndRemoveDB(database string, kind string) { + confirm, err := prompt.Stdin.PromptConfirm(fmt.Sprintf("Remove %s (%s)?", kind, database)) + switch { + case err != nil: + utils.Fatalf("%v", err) + case !confirm: + log.Info("Database deletion skipped", "path", database) + default: + start := time.Now() + filepath.Walk(database, func(path string, info os.FileInfo, err error) error { + // If we're at the top level folder, recurse into + if path == database { + return nil + } + // Delete all the files, but not subfolders + if !info.IsDir() { + os.Remove(path) + return nil + } + return filepath.SkipDir + }) + log.Info("Database successfully deleted", "path", database, "elapsed", common.PrettyDuration(time.Since(start))) + } +} + +func inspect(ctx *cli.Context) error { + var ( + prefix []byte + start []byte + ) + if ctx.NArg() > 2 { + return fmt.Errorf("Max 2 arguments: %v", ctx.Command.ArgsUsage) + } + if ctx.NArg() >= 1 { + if d, err := hexutil.Decode(ctx.Args().Get(0)); err != nil { + return fmt.Errorf("failed to hex-decode 'prefix': %v", err) + } else { + prefix = d + } + } + if ctx.NArg() >= 2 { + if d, err := hexutil.Decode(ctx.Args().Get(1)); err != nil { + return fmt.Errorf("failed to hex-decode 'start': %v", err) + } else { + start = d + } + } + stack, _ := makeConfigNode(ctx) + defer stack.Close() + + db := utils.MakeChainDatabase(ctx, stack, true) + defer db.Close() + + return rawdb.InspectDatabase(db, prefix, start) +} + +func showLeveldbStats(db ethdb.Stater) { + if stats, err := db.Stat("leveldb.stats"); err != nil { + log.Warn("Failed to read database stats", "error", err) + } else { + fmt.Println(stats) + } + if ioStats, err := db.Stat("leveldb.iostats"); err != nil { + log.Warn("Failed to read database iostats", "error", err) + } else { + fmt.Println(ioStats) + } +} + +func dbStats(ctx *cli.Context) error { + stack, _ := makeConfigNode(ctx) + defer stack.Close() + + db := utils.MakeChainDatabase(ctx, stack, true) + defer db.Close() + + showLeveldbStats(db) + return nil +} + +func dbCompact(ctx *cli.Context) error { + stack, _ := makeConfigNode(ctx) + defer stack.Close() + + db := utils.MakeChainDatabase(ctx, stack, false) + defer db.Close() + + log.Info("Stats before compaction") + showLeveldbStats(db) + + log.Info("Triggering compaction") + if err := db.Compact(nil, nil); err != nil { + log.Info("Compact err", "error", err) + return err + } + log.Info("Stats after compaction") + showLeveldbStats(db) + return nil +} + +// dbGet shows the value of a given database key +func dbGet(ctx *cli.Context) error { + if ctx.NArg() != 1 { + return fmt.Errorf("required arguments: %v", ctx.Command.ArgsUsage) + } + stack, _ := makeConfigNode(ctx) + defer stack.Close() + + db := utils.MakeChainDatabase(ctx, stack, true) + defer db.Close() + + key, err := hexutil.Decode(ctx.Args().Get(0)) + if err != nil { + log.Info("Could not decode the key", "error", err) + return err + } + data, err := db.Get(key) + if err != nil { + log.Info("Get operation failed", "error", err) + return err + } + fmt.Printf("key %#x: %#x\n", key, data) + return nil +} + +// dbDelete deletes a key from the database +func dbDelete(ctx *cli.Context) error { + if ctx.NArg() != 1 { + return fmt.Errorf("required arguments: %v", ctx.Command.ArgsUsage) + } + stack, _ := makeConfigNode(ctx) + defer stack.Close() + + db := utils.MakeChainDatabase(ctx, stack, false) + defer db.Close() + + key, err := hexutil.Decode(ctx.Args().Get(0)) + if err != nil { + log.Info("Could not decode the key", "error", err) + return err + } + data, err := db.Get(key) + if err == nil { + fmt.Printf("Previous value: %#x\n", data) + } + if err = db.Delete(key); err != nil { + log.Info("Delete operation returned an error", "error", err) + return err + } + return nil +} + +// dbPut overwrite a value in the database +func dbPut(ctx *cli.Context) error { + if ctx.NArg() != 2 { + return fmt.Errorf("required arguments: %v", ctx.Command.ArgsUsage) + } + stack, _ := makeConfigNode(ctx) + defer stack.Close() + + db := utils.MakeChainDatabase(ctx, stack, false) + defer db.Close() + + var ( + key []byte + value []byte + data []byte + err error + ) + key, err = hexutil.Decode(ctx.Args().Get(0)) + if err != nil { + log.Info("Could not decode the key", "error", err) + return err + } + value, err = hexutil.Decode(ctx.Args().Get(1)) + if err != nil { + log.Info("Could not decode the value", "error", err) + return err + } + data, err = db.Get(key) + if err == nil { + fmt.Printf("Previous value: %#x\n", data) + } + return db.Put(key, value) +} + +// dbDumpTrie shows the key-value slots of a given storage trie +func dbDumpTrie(ctx *cli.Context) error { + if ctx.NArg() < 1 { + return fmt.Errorf("required arguments: %v", ctx.Command.ArgsUsage) + } + stack, _ := makeConfigNode(ctx) + defer stack.Close() + + db := utils.MakeChainDatabase(ctx, stack, true) + defer db.Close() + var ( + root []byte + start []byte + max = int64(-1) + err error + ) + if root, err = hexutil.Decode(ctx.Args().Get(0)); err != nil { + log.Info("Could not decode the root", "error", err) + return err + } + stRoot := common.BytesToHash(root) + if ctx.NArg() >= 2 { + if start, err = hexutil.Decode(ctx.Args().Get(1)); err != nil { + log.Info("Could not decode the seek position", "error", err) + return err + } + } + if ctx.NArg() >= 3 { + if max, err = strconv.ParseInt(ctx.Args().Get(2), 10, 64); err != nil { + log.Info("Could not decode the max count", "error", err) + return err + } + } + theTrie, err := trie.New(stRoot, trie.NewDatabase(db)) + if err != nil { + return err + } + var count int64 + it := trie.NewIterator(theTrie.NodeIterator(start)) + for it.Next() { + if max > 0 && count == max { + fmt.Printf("Exiting after %d values\n", count) + break + } + fmt.Printf(" %d. key %#x: %#x\n", count, it.Key, it.Value) + count++ + } + return it.Err +} + +func freezerInspect(ctx *cli.Context) error { + var ( + start, end int64 + disableSnappy bool + err error + ) + if ctx.NArg() < 3 { + return fmt.Errorf("required arguments: %v", ctx.Command.ArgsUsage) + } + kind := ctx.Args().Get(0) + if noSnap, ok := rawdb.FreezerNoSnappy[kind]; !ok { + var options []string + for opt := range rawdb.FreezerNoSnappy { + options = append(options, opt) + } + sort.Strings(options) + return fmt.Errorf("Could read freezer-type '%v'. Available options: %v", kind, options) + } else { + disableSnappy = noSnap + } + if start, err = strconv.ParseInt(ctx.Args().Get(1), 10, 64); err != nil { + log.Info("Could read start-param", "error", err) + return err + } + if end, err = strconv.ParseInt(ctx.Args().Get(2), 10, 64); err != nil { + log.Info("Could read count param", "error", err) + return err + } + stack, _ := makeConfigNode(ctx) + defer stack.Close() + path := filepath.Join(stack.ResolvePath("chaindata"), "ancient") + log.Info("Opening freezer", "location", path, "name", kind) + if f, err := rawdb.NewFreezerTable(path, kind, disableSnappy); err != nil { + return err + } else { + f.DumpIndex(start, end) + } + return nil +} diff --git a/cmd/geth/genesis_test.go b/cmd/geth/genesis_test.go index ee3991acd..0563ef3c4 100644 --- a/cmd/geth/genesis_test.go +++ b/cmd/geth/genesis_test.go @@ -81,10 +81,10 @@ func TestCustomGenesis(t *testing.T) { if err := ioutil.WriteFile(json, []byte(tt.genesis), 0600); err != nil { t.Fatalf("test %d: failed to write genesis file: %v", i, err) } - runGeth(t, "--nousb", "--datadir", datadir, "init", json).WaitExit() + runGeth(t, "--datadir", datadir, "init", json).WaitExit() // Query the custom genesis block - geth := runGeth(t, "--nousb", + geth := runGeth(t, "--networkid", "1337", "--syncmode=full", "--cache", "16", "--datadir", datadir, "--maxpeers", "0", "--port", "0", "--nodiscover", "--nat", "none", "--ipcdisable", "--exec", tt.query, "console") diff --git a/cmd/geth/les_test.go b/cmd/geth/les_test.go index 259d4a806..151c12c68 100644 --- a/cmd/geth/les_test.go +++ b/cmd/geth/les_test.go @@ -2,7 +2,12 @@ package main import ( "context" + "fmt" + "os" "path/filepath" + "runtime" + "strings" + "sync/atomic" "testing" "time" @@ -95,24 +100,61 @@ func (g *gethrpc) waitSynced() { } } +// ipcEndpoint resolves an IPC endpoint based on a configured value, taking into +// account the set data folders as well as the designated platform we're currently +// running on. +func ipcEndpoint(ipcPath, datadir string) string { + // On windows we can only use plain top-level pipes + if runtime.GOOS == "windows" { + if strings.HasPrefix(ipcPath, `\\.\pipe\`) { + return ipcPath + } + return `\\.\pipe\` + ipcPath + } + // Resolve names into the data directory full paths otherwise + if filepath.Base(ipcPath) == ipcPath { + if datadir == "" { + return filepath.Join(os.TempDir(), ipcPath) + } + return filepath.Join(datadir, ipcPath) + } + return ipcPath +} + +// nextIPC ensures that each ipc pipe gets a unique name. +// On linux, it works well to use ipc pipes all over the filesystem (in datadirs), +// but windows require pipes to sit in "\\.\pipe\". Therefore, to run several +// nodes simultaneously, we need to distinguish between them, which we do by +// the pipe filename instead of folder. +var nextIPC = uint32(0) + func startGethWithIpc(t *testing.T, name string, args ...string) *gethrpc { - g := &gethrpc{name: name} - args = append([]string{"--networkid=42", "--port=0", "--nousb"}, args...) + ipcName := fmt.Sprintf("geth-%d.ipc", atomic.AddUint32(&nextIPC, 1)) + args = append([]string{"--networkid=42", "--port=0", "--ipcpath", ipcName}, args...) t.Logf("Starting %v with rpc: %v", name, args) - g.geth = runGeth(t, args...) - // wait before we can attach to it. TODO: probe for it properly - time.Sleep(1 * time.Second) + + g := &gethrpc{ + name: name, + geth: runGeth(t, args...), + } + ipcpath := ipcEndpoint(ipcName, g.geth.Datadir) + // We can't know exactly how long geth will take to start, so we try 10 + // times over a 5 second period. var err error - ipcpath := filepath.Join(g.geth.Datadir, "geth.ipc") - g.rpc, err = rpc.Dial(ipcpath) - if err != nil { - t.Fatalf("%v rpc connect: %v", name, err) + for i := 0; i < 10; i++ { + time.Sleep(500 * time.Millisecond) + if g.rpc, err = rpc.Dial(ipcpath); err == nil { + return g + } } - return g + t.Fatalf("%v rpc connect to %v: %v", name, ipcpath, err) + return nil } func initGeth(t *testing.T) string { - g := runGeth(t, "--nousb", "--networkid=42", "init", "./testdata/clique.json") + args := []string{"--networkid=42", "init", "./testdata/clique.json"} + t.Logf("Initializing geth: %v ", args) + g := runGeth(t, args...) datadir := g.Datadir g.WaitExit() return datadir @@ -120,15 +162,16 @@ func initGeth(t *testing.T) string { func startLightServer(t *testing.T) *gethrpc { datadir := initGeth(t) - runGeth(t, "--nousb", "--datadir", datadir, "--password", "./testdata/password.txt", "account", "import", "./testdata/key.prv").WaitExit() + t.Logf("Importing keys to geth") + runGeth(t, "--datadir", datadir, "--password", "./testdata/password.txt", "account", "import", "./testdata/key.prv", "--lightkdf").WaitExit() account := "0x02f0d131f1f97aef08aec6e3291b957d9efe7105" - server := startGethWithIpc(t, "lightserver", "--allow-insecure-unlock", "--datadir", datadir, "--password", "./testdata/password.txt", "--unlock", account, "--mine", "--light.serve=100", "--light.maxpeers=1", "--nodiscover", "--nat=extip:127.0.0.1") + server := startGethWithIpc(t, "lightserver", "--allow-insecure-unlock", "--datadir", datadir, "--password", "./testdata/password.txt", "--unlock", account, "--mine", "--light.serve=100", "--light.maxpeers=1", "--nodiscover", "--nat=extip:127.0.0.1", "--verbosity=4") return server } func startClient(t *testing.T, name string) *gethrpc { datadir := initGeth(t) - return startGethWithIpc(t, name, "--datadir", datadir, "--nodiscover", "--syncmode=light", "--nat=extip:127.0.0.1") + return startGethWithIpc(t, name, "--datadir", datadir, "--nodiscover", "--syncmode=light", "--nat=extip:127.0.0.1", "--verbosity=4") } func TestPriorityClient(t *testing.T) { @@ -151,7 +194,7 @@ func TestPriorityClient(t *testing.T) { prioCli := startClient(t, "prioCli") defer prioCli.killAndWait() // 3_000_000_000 once we move to Go 1.13 - tokens := 3000000000 + tokens := uint64(3000000000) lightServer.callRPC(nil, "les_addBalance", prioCli.getNodeInfo().ID, tokens) prioCli.addPeer(lightServer) diff --git a/cmd/geth/main.go b/cmd/geth/main.go index 176f5374a..8ed58caf5 100644 --- a/cmd/geth/main.go +++ b/cmd/geth/main.go @@ -19,9 +19,7 @@ package main import ( "fmt" - "math" "os" - godebug "runtime/debug" "sort" "strconv" "strings" @@ -41,8 +39,7 @@ import ( "github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/metrics" "github.com/ethereum/go-ethereum/node" - gopsutil "github.com/shirou/gopsutil/mem" - cli "gopkg.in/urfave/cli.v1" + "gopkg.in/urfave/cli.v1" ) const ( @@ -61,14 +58,15 @@ var ( utils.UnlockedAccountFlag, utils.PasswordFileFlag, utils.BootnodesFlag, - utils.LegacyBootnodesV4Flag, - utils.LegacyBootnodesV5Flag, utils.DataDirFlag, utils.AncientFlag, + utils.MinFreeDiskSpaceFlag, utils.KeyStoreDirFlag, utils.ExternalSignerFlag, utils.NoUSBFlag, + utils.USBFlag, utils.SmartCardDaemonPathFlag, + utils.OverrideLondonFlag, utils.EthashCacheDirFlag, utils.EthashCachesInMemoryFlag, utils.EthashCachesOnDiskFlag, @@ -94,17 +92,17 @@ var ( utils.SnapshotFlag, utils.TxLookupLimitFlag, utils.LightServeFlag, - utils.LegacyLightServFlag, utils.LightIngressFlag, utils.LightEgressFlag, utils.LightMaxPeersFlag, - utils.LegacyLightPeersFlag, utils.LightNoPruneFlag, utils.LightKDFFlag, utils.UltraLightServersFlag, utils.UltraLightFractionFlag, utils.UltraLightOnlyAnnounceFlag, + utils.LightNoSyncServeFlag, utils.WhitelistFlag, + utils.BloomFilterSizeFlag, utils.CacheFlag, utils.CacheDatabaseFlag, utils.CacheTrieFlag, @@ -113,22 +111,18 @@ var ( utils.CacheGCFlag, utils.CacheSnapshotFlag, utils.CacheNoPrefetchFlag, + utils.CachePreimagesFlag, utils.ListenPortFlag, utils.MaxPeersFlag, utils.MaxPendingPeersFlag, utils.MiningEnabledFlag, utils.MinerThreadsFlag, - utils.LegacyMinerThreadsFlag, utils.MinerNotifyFlag, - utils.MinerGasTargetFlag, utils.LegacyMinerGasTargetFlag, utils.MinerGasLimitFlag, utils.MinerGasPriceFlag, - utils.LegacyMinerGasPriceFlag, utils.MinerEtherbaseFlag, - utils.LegacyMinerEtherbaseFlag, utils.MinerExtraDataFlag, - utils.LegacyMinerExtraDataFlag, utils.MinerRecommitIntervalFlag, utils.MinerNoVerfiyFlag, utils.NATFlag, @@ -138,6 +132,7 @@ var ( utils.NodeKeyFileFlag, utils.NodeKeyHexFlag, utils.DNSDiscoveryFlag, + utils.MainnetFlag, utils.DeveloperFlag, utils.DeveloperPeriodFlag, utils.TestnetFlag, @@ -147,13 +142,12 @@ var ( utils.FakePoWFlag, utils.NoCompactionFlag, utils.GpoBlocksFlag, - utils.LegacyGpoBlocksFlag, utils.GpoPercentileFlag, - utils.LegacyGpoPercentileFlag, utils.GpoMaxGasPriceFlag, - utils.EWASMInterpreterFlag, - utils.EVMInterpreterFlag, + utils.GpoIgnoreGasPriceFlag, + utils.MinerNotifyFullFlag, configFileFlag, + utils.CatalystFlag, } rpcFlags = []cli.Flag{ @@ -167,32 +161,24 @@ var ( utils.LegacyRPCPortFlag, utils.LegacyRPCCORSDomainFlag, utils.LegacyRPCVirtualHostsFlag, + utils.LegacyRPCApiFlag, utils.GraphQLEnabledFlag, utils.GraphQLCORSDomainFlag, utils.GraphQLVirtualHostsFlag, utils.HTTPApiFlag, - utils.LegacyRPCApiFlag, + utils.HTTPPathPrefixFlag, utils.WSEnabledFlag, utils.WSListenAddrFlag, - utils.LegacyWSListenAddrFlag, utils.WSPortFlag, - utils.LegacyWSPortFlag, utils.WSApiFlag, - utils.LegacyWSApiFlag, utils.WSAllowedOriginsFlag, - utils.LegacyWSAllowedOriginsFlag, + utils.WSPathPrefixFlag, utils.IPCDisabledFlag, utils.IPCPathFlag, utils.InsecureUnlockAllowedFlag, utils.RPCGlobalGasCapFlag, utils.RPCGlobalTxFeeCapFlag, - } - - whisperFlags = []cli.Flag{ - utils.WhisperEnabledFlag, - utils.WhisperMaxMessageSizeFlag, - utils.WhisperMinPOWFlag, - utils.WhisperRestrictConnectionBetweenLightClientsFlag, + utils.AllowUnprotectedTxs, } metricsFlags = []cli.Flag{ @@ -206,6 +192,10 @@ var ( utils.MetricsInfluxDBUsernameFlag, utils.MetricsInfluxDBPasswordFlag, utils.MetricsInfluxDBTagsFlag, + utils.MetricsEnableInfluxDBV2Flag, + utils.MetricsInfluxDBTokenFlag, + utils.MetricsInfluxDBBucketFlag, + utils.MetricsInfluxDBOrganizationFlag, } ) @@ -213,7 +203,7 @@ func init() { // Initialize the CLI app and start Geth app.Action = geth app.HideVersion = true // we have a command to print the version - app.Copyright = "Copyright 2013-2020 The go-ethereum Authors" + app.Copyright = "Copyright 2013-2021 The go-ethereum Authors" app.Commands = []cli.Command{ // See chaincmd.go: initCommand, @@ -221,11 +211,9 @@ func init() { exportCommand, importPreimagesCommand, exportPreimagesCommand, - copydbCommand, removedbCommand, dumpCommand, dumpGenesisCommand, - inspectCommand, // See accountcmd.go: accountCommand, walletCommand, @@ -237,13 +225,16 @@ func init() { makecacheCommand, makedagCommand, versionCommand, + versionCheckCommand, licenseCommand, // See config.go dumpConfigCommand, - // See retesteth.go - retestethCommand, + // see dbcmd.go + dbCommand, // See cmd/utils/flags_legacy.go utils.ShowDeprecated, + // See snapshot.go + snapshotCommand, } sort.Sort(cli.CommandsByName(app.Commands)) @@ -251,8 +242,6 @@ func init() { app.Flags = append(app.Flags, rpcFlags...) app.Flags = append(app.Flags, consoleFlags...) app.Flags = append(app.Flags, debug.Flags...) - app.Flags = append(app.Flags, debug.DeprecatedFlags...) - app.Flags = append(app.Flags, whisperFlags...) app.Flags = append(app.Flags, metricsFlags...) app.Before = func(ctx *cli.Context) error { @@ -298,25 +287,6 @@ func prepare(ctx *cli.Context) { log.Info("Dropping default light client cache", "provided", ctx.GlobalInt(utils.CacheFlag.Name), "updated", 128) ctx.GlobalSet(utils.CacheFlag.Name, strconv.Itoa(128)) } - // Cap the cache allowance and tune the garbage collector - mem, err := gopsutil.VirtualMemory() - if err == nil { - if 32<<(^uintptr(0)>>63) == 32 && mem.Total > 2*1024*1024*1024 { - log.Warn("Lowering memory allowance on 32bit arch", "available", mem.Total/1024/1024, "addressable", 2*1024) - mem.Total = 2 * 1024 * 1024 * 1024 - } - allowance := int(mem.Total / 1024 / 1024 / 3) - if cache := ctx.GlobalInt(utils.CacheFlag.Name); cache > allowance { - log.Warn("Sanitizing cache to Go's GC limits", "provided", cache, "updated", allowance) - ctx.GlobalSet(utils.CacheFlag.Name, strconv.Itoa(allowance)) - } - } - // Ensure Go's GC ignores the database cache for trigger percentage - cache := ctx.GlobalInt(utils.CacheFlag.Name) - gogc := math.Max(20, math.Min(100, 100/(float64(cache)/1024))) - - log.Debug("Sanitizing Go's GC trigger", "percent", int(gogc)) - godebug.SetGCPercent(int(gogc)) // Start metrics export if enabled utils.SetupMetrics(ctx) @@ -349,7 +319,7 @@ func startNode(ctx *cli.Context, stack *node.Node, backend ethapi.Backend) { debug.Memsize.Add("node", stack) // Start up the node itself - utils.StartNode(stack) + utils.StartNode(ctx, stack) // Unlock any account specifically requested unlockAccounts(ctx, stack) @@ -432,19 +402,11 @@ func startNode(ctx *cli.Context, stack *node.Node, backend ethapi.Backend) { if !ok { utils.Fatalf("Ethereum service not running: %v", err) } - // Set the gas price to the limits from the CLI and start mining gasprice := utils.GlobalBig(ctx, utils.MinerGasPriceFlag.Name) - if ctx.GlobalIsSet(utils.LegacyMinerGasPriceFlag.Name) && !ctx.GlobalIsSet(utils.MinerGasPriceFlag.Name) { - gasprice = utils.GlobalBig(ctx, utils.LegacyMinerGasPriceFlag.Name) - } ethBackend.TxPool().SetGasPrice(gasprice) // start mining threads := ctx.GlobalInt(utils.MinerThreadsFlag.Name) - if ctx.GlobalIsSet(utils.LegacyMinerThreadsFlag.Name) && !ctx.GlobalIsSet(utils.MinerThreadsFlag.Name) { - threads = ctx.GlobalInt(utils.LegacyMinerThreadsFlag.Name) - log.Warn("The flag --minerthreads is deprecated and will be removed in the future, please use --miner.threads") - } if err := ethBackend.StartMining(threads); err != nil { utils.Fatalf("Failed to start mining: %v", err) } diff --git a/cmd/geth/misccmd.go b/cmd/geth/misccmd.go index 0e7ee9651..b347d31d9 100644 --- a/cmd/geth/misccmd.go +++ b/cmd/geth/misccmd.go @@ -25,12 +25,23 @@ import ( "github.com/ethereum/go-ethereum/cmd/utils" "github.com/ethereum/go-ethereum/consensus/ethash" - "github.com/ethereum/go-ethereum/eth" "github.com/ethereum/go-ethereum/params" "gopkg.in/urfave/cli.v1" ) var ( + VersionCheckUrlFlag = cli.StringFlag{ + Name: "check.url", + Usage: "URL to use when checking vulnerabilities", + Value: "https://geth.ethereum.org/docs/vulnerabilities/vulnerabilities.json", + } + VersionCheckVersionFlag = cli.StringFlag{ + Name: "check.version", + Usage: "Version to check", + Value: fmt.Sprintf("Geth/v%v/%v-%v/%v", + params.VersionWithCommit(gitCommit, gitDate), + runtime.GOOS, runtime.GOARCH, runtime.Version()), + } makecacheCommand = cli.Command{ Action: utils.MigrateFlags(makecache), Name: "makecache", @@ -65,6 +76,21 @@ Regular users do not need to execute it. Category: "MISCELLANEOUS COMMANDS", Description: ` The output of this command is supposed to be machine-readable. +`, + } + versionCheckCommand = cli.Command{ + Action: utils.MigrateFlags(versionCheck), + Flags: []cli.Flag{ + VersionCheckUrlFlag, + VersionCheckVersionFlag, + }, + Name: "version-check", + Usage: "Checks (online) whether the current version suffers from any known security vulnerabilities", + ArgsUsage: "", + Category: "MISCELLANEOUS COMMANDS", + Description: ` +The version-check command fetches vulnerability-information from https://geth.ethereum.org/docs/vulnerabilities/vulnerabilities.json, +and displays information about any security vulnerabilities that affect the currently executing version. `, } licenseCommand = cli.Command{ @@ -116,7 +142,6 @@ func version(ctx *cli.Context) error { fmt.Println("Git Commit Date:", gitDate) } fmt.Println("Architecture:", runtime.GOARCH) - fmt.Println("Protocol Versions:", eth.ProtocolVersions) fmt.Println("Go Version:", runtime.Version()) fmt.Println("Operating System:", runtime.GOOS) fmt.Printf("GOPATH=%s\n", os.Getenv("GOPATH")) diff --git a/cmd/geth/retesteth.go b/cmd/geth/retesteth.go deleted file mode 100644 index db26821dc..000000000 --- a/cmd/geth/retesteth.go +++ /dev/null @@ -1,919 +0,0 @@ -// Copyright 2019 The go-ethereum Authors -// This file is part of go-ethereum. -// -// go-ethereum is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// go-ethereum is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. -// -// You should have received a copy of the GNU General Public License -// along with go-ethereum. If not, see . - -package main - -import ( - "bytes" - "context" - "fmt" - "math/big" - "os" - "os/signal" - "time" - - "github.com/ethereum/go-ethereum/cmd/utils" - "github.com/ethereum/go-ethereum/common" - "github.com/ethereum/go-ethereum/common/hexutil" - "github.com/ethereum/go-ethereum/common/math" - "github.com/ethereum/go-ethereum/consensus" - "github.com/ethereum/go-ethereum/consensus/ethash" - "github.com/ethereum/go-ethereum/consensus/misc" - "github.com/ethereum/go-ethereum/core" - "github.com/ethereum/go-ethereum/core/rawdb" - "github.com/ethereum/go-ethereum/core/state" - "github.com/ethereum/go-ethereum/core/types" - "github.com/ethereum/go-ethereum/core/vm" - "github.com/ethereum/go-ethereum/crypto" - "github.com/ethereum/go-ethereum/ethdb" - "github.com/ethereum/go-ethereum/log" - "github.com/ethereum/go-ethereum/node" - "github.com/ethereum/go-ethereum/params" - "github.com/ethereum/go-ethereum/rlp" - "github.com/ethereum/go-ethereum/rpc" - "github.com/ethereum/go-ethereum/trie" - - cli "gopkg.in/urfave/cli.v1" -) - -var ( - rpcPortFlag = cli.IntFlag{ - Name: "rpcport", - Usage: "HTTP-RPC server listening port", - Value: node.DefaultHTTPPort, - } - retestethCommand = cli.Command{ - Action: utils.MigrateFlags(retesteth), - Name: "retesteth", - Usage: "Launches geth in retesteth mode", - ArgsUsage: "", - Flags: []cli.Flag{rpcPortFlag}, - Category: "MISCELLANEOUS COMMANDS", - Description: `Launches geth in retesteth mode (no database, no network, only retesteth RPC interface)`, - } -) - -type RetestethTestAPI interface { - SetChainParams(ctx context.Context, chainParams ChainParams) (bool, error) - MineBlocks(ctx context.Context, number uint64) (bool, error) - ModifyTimestamp(ctx context.Context, interval uint64) (bool, error) - ImportRawBlock(ctx context.Context, rawBlock hexutil.Bytes) (common.Hash, error) - RewindToBlock(ctx context.Context, number uint64) (bool, error) - GetLogHash(ctx context.Context, txHash common.Hash) (common.Hash, error) -} - -type RetestethEthAPI interface { - SendRawTransaction(ctx context.Context, rawTx hexutil.Bytes) (common.Hash, error) - BlockNumber(ctx context.Context) (uint64, error) - GetBlockByNumber(ctx context.Context, blockNr math.HexOrDecimal64, fullTx bool) (map[string]interface{}, error) - GetBlockByHash(ctx context.Context, blockHash common.Hash, fullTx bool) (map[string]interface{}, error) - GetBalance(ctx context.Context, address common.Address, blockNr math.HexOrDecimal64) (*math.HexOrDecimal256, error) - GetCode(ctx context.Context, address common.Address, blockNr math.HexOrDecimal64) (hexutil.Bytes, error) - GetTransactionCount(ctx context.Context, address common.Address, blockNr math.HexOrDecimal64) (uint64, error) -} - -type RetestethDebugAPI interface { - AccountRange(ctx context.Context, - blockHashOrNumber *math.HexOrDecimal256, txIndex uint64, - addressHash *math.HexOrDecimal256, maxResults uint64, - ) (AccountRangeResult, error) - StorageRangeAt(ctx context.Context, - blockHashOrNumber *math.HexOrDecimal256, txIndex uint64, - address common.Address, - begin *math.HexOrDecimal256, maxResults uint64, - ) (StorageRangeResult, error) -} - -type RetestWeb3API interface { - ClientVersion(ctx context.Context) (string, error) -} - -type RetestethAPI struct { - ethDb ethdb.Database - db state.Database - chainConfig *params.ChainConfig - author common.Address - extraData []byte - genesisHash common.Hash - engine *NoRewardEngine - blockchain *core.BlockChain - txMap map[common.Address]map[uint64]*types.Transaction // Sender -> Nonce -> Transaction - txSenders map[common.Address]struct{} // Set of transaction senders - blockInterval uint64 -} - -type ChainParams struct { - SealEngine string `json:"sealEngine"` - Params CParamsParams `json:"params"` - Genesis CParamsGenesis `json:"genesis"` - Accounts map[common.Address]CParamsAccount `json:"accounts"` -} - -type CParamsParams struct { - AccountStartNonce math.HexOrDecimal64 `json:"accountStartNonce"` - HomesteadForkBlock *math.HexOrDecimal64 `json:"homesteadForkBlock"` - EIP150ForkBlock *math.HexOrDecimal64 `json:"EIP150ForkBlock"` - EIP158ForkBlock *math.HexOrDecimal64 `json:"EIP158ForkBlock"` - DaoHardforkBlock *math.HexOrDecimal64 `json:"daoHardforkBlock"` - ByzantiumForkBlock *math.HexOrDecimal64 `json:"byzantiumForkBlock"` - ConstantinopleForkBlock *math.HexOrDecimal64 `json:"constantinopleForkBlock"` - ConstantinopleFixForkBlock *math.HexOrDecimal64 `json:"constantinopleFixForkBlock"` - IstanbulBlock *math.HexOrDecimal64 `json:"istanbulForkBlock"` - ChainID *math.HexOrDecimal256 `json:"chainID"` - MaximumExtraDataSize math.HexOrDecimal64 `json:"maximumExtraDataSize"` - TieBreakingGas bool `json:"tieBreakingGas"` - MinGasLimit math.HexOrDecimal64 `json:"minGasLimit"` - MaxGasLimit math.HexOrDecimal64 `json:"maxGasLimit"` - GasLimitBoundDivisor math.HexOrDecimal64 `json:"gasLimitBoundDivisor"` - MinimumDifficulty math.HexOrDecimal256 `json:"minimumDifficulty"` - DifficultyBoundDivisor math.HexOrDecimal256 `json:"difficultyBoundDivisor"` - DurationLimit math.HexOrDecimal256 `json:"durationLimit"` - BlockReward math.HexOrDecimal256 `json:"blockReward"` - NetworkID math.HexOrDecimal256 `json:"networkID"` -} - -type CParamsGenesis struct { - Nonce math.HexOrDecimal64 `json:"nonce"` - Difficulty *math.HexOrDecimal256 `json:"difficulty"` - MixHash *math.HexOrDecimal256 `json:"mixHash"` - Author common.Address `json:"author"` - Timestamp math.HexOrDecimal64 `json:"timestamp"` - ParentHash common.Hash `json:"parentHash"` - ExtraData hexutil.Bytes `json:"extraData"` - GasLimit math.HexOrDecimal64 `json:"gasLimit"` -} - -type CParamsAccount struct { - Balance *math.HexOrDecimal256 `json:"balance"` - Precompiled *CPAccountPrecompiled `json:"precompiled"` - Code hexutil.Bytes `json:"code"` - Storage map[string]string `json:"storage"` - Nonce *math.HexOrDecimal64 `json:"nonce"` -} - -type CPAccountPrecompiled struct { - Name string `json:"name"` - StartingBlock math.HexOrDecimal64 `json:"startingBlock"` - Linear *CPAPrecompiledLinear `json:"linear"` -} - -type CPAPrecompiledLinear struct { - Base uint64 `json:"base"` - Word uint64 `json:"word"` -} - -type AccountRangeResult struct { - AddressMap map[common.Hash]common.Address `json:"addressMap"` - NextKey common.Hash `json:"nextKey"` -} - -type StorageRangeResult struct { - Complete bool `json:"complete"` - Storage map[common.Hash]SRItem `json:"storage"` -} - -type SRItem struct { - Key string `json:"key"` - Value string `json:"value"` -} - -type NoRewardEngine struct { - inner consensus.Engine - rewardsOn bool -} - -func (e *NoRewardEngine) Author(header *types.Header) (common.Address, error) { - return e.inner.Author(header) -} - -func (e *NoRewardEngine) VerifyHeader(chain consensus.ChainHeaderReader, header *types.Header, seal bool) error { - return e.inner.VerifyHeader(chain, header, seal) -} - -func (e *NoRewardEngine) VerifyHeaders(chain consensus.ChainHeaderReader, headers []*types.Header, seals []bool) (chan<- struct{}, <-chan error) { - return e.inner.VerifyHeaders(chain, headers, seals) -} - -func (e *NoRewardEngine) VerifyUncles(chain consensus.ChainReader, block *types.Block) error { - return e.inner.VerifyUncles(chain, block) -} - -func (e *NoRewardEngine) VerifySeal(chain consensus.ChainHeaderReader, header *types.Header) error { - return e.inner.VerifySeal(chain, header) -} - -func (e *NoRewardEngine) Prepare(chain consensus.ChainHeaderReader, header *types.Header) error { - return e.inner.Prepare(chain, header) -} - -func (e *NoRewardEngine) accumulateRewards(config *params.ChainConfig, state *state.StateDB, header *types.Header, uncles []*types.Header) { - // Simply touch miner and uncle coinbase accounts - reward := big.NewInt(0) - for _, uncle := range uncles { - state.AddBalance(uncle.Coinbase, reward) - } - state.AddBalance(header.Coinbase, reward) -} - -func (e *NoRewardEngine) Finalize(chain consensus.ChainHeaderReader, header *types.Header, statedb *state.StateDB, txs *[]*types.Transaction, - uncles []*types.Header, receipts *[]*types.Receipt, systemTxs []*types.Transaction) error { - if e.rewardsOn { - e.inner.Finalize(chain, header, statedb, txs, uncles, receipts, systemTxs) - } else { - e.accumulateRewards(chain.Config(), statedb, header, uncles) - header.Root = statedb.IntermediateRoot(chain.Config().IsEIP158(header.Number)) - } - - return nil -} - -func (e *NoRewardEngine) FinalizeAndAssemble(chain consensus.ChainHeaderReader, header *types.Header, statedb *state.StateDB, txs []*types.Transaction, - uncles []*types.Header, receipts []*types.Receipt) (*types.Block, []*types.Receipt, error) { - if e.rewardsOn { - return e.inner.FinalizeAndAssemble(chain, header, statedb, txs, uncles, receipts) - } else { - e.accumulateRewards(chain.Config(), statedb, header, uncles) - header.Root = statedb.IntermediateRoot(chain.Config().IsEIP158(header.Number)) - - // Header seems complete, assemble into a block and return - return types.NewBlock(header, txs, uncles, receipts, new(trie.Trie)), receipts, nil - } -} - -func (e *NoRewardEngine) Seal(chain consensus.ChainHeaderReader, block *types.Block, results chan<- *types.Block, stop <-chan struct{}) error { - return e.inner.Seal(chain, block, results, stop) -} - -func (e *NoRewardEngine) SealHash(header *types.Header) common.Hash { - return e.inner.SealHash(header) -} - -func (e *NoRewardEngine) CalcDifficulty(chain consensus.ChainHeaderReader, time uint64, parent *types.Header) *big.Int { - return e.inner.CalcDifficulty(chain, time, parent) -} - -func (e *NoRewardEngine) APIs(chain consensus.ChainHeaderReader) []rpc.API { - return e.inner.APIs(chain) -} - -func (e *NoRewardEngine) Close() error { - return e.inner.Close() -} - -func (api *RetestethAPI) SetChainParams(ctx context.Context, chainParams ChainParams) (bool, error) { - // Clean up - if api.blockchain != nil { - api.blockchain.Stop() - } - if api.engine != nil { - api.engine.Close() - } - if api.ethDb != nil { - api.ethDb.Close() - } - ethDb := rawdb.NewMemoryDatabase() - accounts := make(core.GenesisAlloc) - for address, account := range chainParams.Accounts { - balance := big.NewInt(0) - if account.Balance != nil { - balance.Set((*big.Int)(account.Balance)) - } - var nonce uint64 - if account.Nonce != nil { - nonce = uint64(*account.Nonce) - } - if account.Precompiled == nil || account.Balance != nil { - storage := make(map[common.Hash]common.Hash) - for k, v := range account.Storage { - storage[common.HexToHash(k)] = common.HexToHash(v) - } - accounts[address] = core.GenesisAccount{ - Balance: balance, - Code: account.Code, - Nonce: nonce, - Storage: storage, - } - } - } - chainId := big.NewInt(1) - if chainParams.Params.ChainID != nil { - chainId.Set((*big.Int)(chainParams.Params.ChainID)) - } - var ( - homesteadBlock *big.Int - daoForkBlock *big.Int - eip150Block *big.Int - eip155Block *big.Int - eip158Block *big.Int - byzantiumBlock *big.Int - constantinopleBlock *big.Int - petersburgBlock *big.Int - istanbulBlock *big.Int - ) - if chainParams.Params.HomesteadForkBlock != nil { - homesteadBlock = big.NewInt(int64(*chainParams.Params.HomesteadForkBlock)) - } - if chainParams.Params.DaoHardforkBlock != nil { - daoForkBlock = big.NewInt(int64(*chainParams.Params.DaoHardforkBlock)) - } - if chainParams.Params.EIP150ForkBlock != nil { - eip150Block = big.NewInt(int64(*chainParams.Params.EIP150ForkBlock)) - } - if chainParams.Params.EIP158ForkBlock != nil { - eip158Block = big.NewInt(int64(*chainParams.Params.EIP158ForkBlock)) - eip155Block = eip158Block - } - if chainParams.Params.ByzantiumForkBlock != nil { - byzantiumBlock = big.NewInt(int64(*chainParams.Params.ByzantiumForkBlock)) - } - if chainParams.Params.ConstantinopleForkBlock != nil { - constantinopleBlock = big.NewInt(int64(*chainParams.Params.ConstantinopleForkBlock)) - } - if chainParams.Params.ConstantinopleFixForkBlock != nil { - petersburgBlock = big.NewInt(int64(*chainParams.Params.ConstantinopleFixForkBlock)) - } - if constantinopleBlock != nil && petersburgBlock == nil { - petersburgBlock = big.NewInt(100000000000) - } - if chainParams.Params.IstanbulBlock != nil { - istanbulBlock = big.NewInt(int64(*chainParams.Params.IstanbulBlock)) - } - - genesis := &core.Genesis{ - Config: ¶ms.ChainConfig{ - ChainID: chainId, - HomesteadBlock: homesteadBlock, - DAOForkBlock: daoForkBlock, - DAOForkSupport: true, - EIP150Block: eip150Block, - EIP155Block: eip155Block, - EIP158Block: eip158Block, - ByzantiumBlock: byzantiumBlock, - ConstantinopleBlock: constantinopleBlock, - PetersburgBlock: petersburgBlock, - IstanbulBlock: istanbulBlock, - }, - Nonce: uint64(chainParams.Genesis.Nonce), - Timestamp: uint64(chainParams.Genesis.Timestamp), - ExtraData: chainParams.Genesis.ExtraData, - GasLimit: uint64(chainParams.Genesis.GasLimit), - Difficulty: big.NewInt(0).Set((*big.Int)(chainParams.Genesis.Difficulty)), - Mixhash: common.BigToHash((*big.Int)(chainParams.Genesis.MixHash)), - Coinbase: chainParams.Genesis.Author, - ParentHash: chainParams.Genesis.ParentHash, - Alloc: accounts, - } - chainConfig, genesisHash, err := core.SetupGenesisBlock(ethDb, genesis) - if err != nil { - return false, err - } - fmt.Printf("Chain config: %v\n", chainConfig) - - var inner consensus.Engine - switch chainParams.SealEngine { - case "NoProof", "NoReward": - inner = ethash.NewFaker() - case "Ethash": - inner = ethash.New(ethash.Config{ - CacheDir: "ethash", - CachesInMem: 2, - CachesOnDisk: 3, - CachesLockMmap: false, - DatasetsInMem: 1, - DatasetsOnDisk: 2, - DatasetsLockMmap: false, - }, nil, false) - default: - return false, fmt.Errorf("unrecognised seal engine: %s", chainParams.SealEngine) - } - engine := &NoRewardEngine{inner: inner, rewardsOn: chainParams.SealEngine != "NoReward"} - - blockchain, err := core.NewBlockChain(ethDb, nil, chainConfig, engine, vm.Config{}, nil, nil) - if err != nil { - return false, err - } - - api.chainConfig = chainConfig - api.genesisHash = genesisHash - api.author = chainParams.Genesis.Author - api.extraData = chainParams.Genesis.ExtraData - api.ethDb = ethDb - api.engine = engine - api.blockchain = blockchain - api.db = state.NewDatabase(api.ethDb) - api.txMap = make(map[common.Address]map[uint64]*types.Transaction) - api.txSenders = make(map[common.Address]struct{}) - api.blockInterval = 0 - return true, nil -} - -func (api *RetestethAPI) SendRawTransaction(ctx context.Context, rawTx hexutil.Bytes) (common.Hash, error) { - tx := new(types.Transaction) - if err := rlp.DecodeBytes(rawTx, tx); err != nil { - // Return nil is not by mistake - some tests include sending transaction where gasLimit overflows uint64 - return common.Hash{}, nil - } - signer := types.MakeSigner(api.chainConfig, big.NewInt(int64(api.currentNumber()))) - sender, err := types.Sender(signer, tx) - if err != nil { - return common.Hash{}, err - } - if nonceMap, ok := api.txMap[sender]; ok { - nonceMap[tx.Nonce()] = tx - } else { - nonceMap = make(map[uint64]*types.Transaction) - nonceMap[tx.Nonce()] = tx - api.txMap[sender] = nonceMap - } - api.txSenders[sender] = struct{}{} - return tx.Hash(), nil -} - -func (api *RetestethAPI) MineBlocks(ctx context.Context, number uint64) (bool, error) { - for i := 0; i < int(number); i++ { - if err := api.mineBlock(); err != nil { - return false, err - } - } - fmt.Printf("Mined %d blocks\n", number) - return true, nil -} - -func (api *RetestethAPI) currentNumber() uint64 { - if current := api.blockchain.CurrentBlock(); current != nil { - return current.NumberU64() - } - return 0 -} - -func (api *RetestethAPI) mineBlock() error { - number := api.currentNumber() - parentHash := rawdb.ReadCanonicalHash(api.ethDb, number) - parent := rawdb.ReadBlock(api.ethDb, parentHash, number) - var timestamp uint64 - if api.blockInterval == 0 { - timestamp = uint64(time.Now().Unix()) - } else { - timestamp = parent.Time() + api.blockInterval - } - gasLimit := core.CalcGasLimit(parent, 9223372036854775807, 9223372036854775807) - header := &types.Header{ - ParentHash: parent.Hash(), - Number: big.NewInt(int64(number + 1)), - GasLimit: gasLimit, - Extra: api.extraData, - Time: timestamp, - } - header.Coinbase = api.author - if api.engine != nil { - api.engine.Prepare(api.blockchain, header) - } - // If we are care about TheDAO hard-fork check whether to override the extra-data or not - if daoBlock := api.chainConfig.DAOForkBlock; daoBlock != nil { - // Check whether the block is among the fork extra-override range - limit := new(big.Int).Add(daoBlock, params.DAOForkExtraRange) - if header.Number.Cmp(daoBlock) >= 0 && header.Number.Cmp(limit) < 0 { - // Depending whether we support or oppose the fork, override differently - if api.chainConfig.DAOForkSupport { - header.Extra = common.CopyBytes(params.DAOForkBlockExtra) - } else if bytes.Equal(header.Extra, params.DAOForkBlockExtra) { - header.Extra = []byte{} // If miner opposes, don't let it use the reserved extra-data - } - } - } - statedb, err := api.blockchain.StateAt(parent.Root()) - if err != nil { - return err - } - if api.chainConfig.DAOForkSupport && api.chainConfig.DAOForkBlock != nil && api.chainConfig.DAOForkBlock.Cmp(header.Number) == 0 { - misc.ApplyDAOHardFork(statedb) - } - - gasPool := new(core.GasPool).AddGas(header.GasLimit) - txCount := 0 - var txs []*types.Transaction - var receipts []*types.Receipt - var blockFull = gasPool.Gas() < params.TxGas - for address := range api.txSenders { - if blockFull { - break - } - m := api.txMap[address] - for nonce := statedb.GetNonce(address); ; nonce++ { - if tx, ok := m[nonce]; ok { - // Try to apply transactions to the state - statedb.Prepare(tx.Hash(), common.Hash{}, txCount) - snap := statedb.Snapshot() - - receipt, err := core.ApplyTransaction( - api.chainConfig, - api.blockchain, - &api.author, - gasPool, - statedb, - header, tx, &header.GasUsed, *api.blockchain.GetVMConfig(), - ) - if err != nil { - statedb.RevertToSnapshot(snap) - break - } - txs = append(txs, tx) - receipts = append(receipts, receipt) - delete(m, nonce) - if len(m) == 0 { - // Last tx for the sender - delete(api.txMap, address) - delete(api.txSenders, address) - } - txCount++ - if gasPool.Gas() < params.TxGas { - blockFull = true - break - } - } else { - break // Gap in the nonces - } - } - } - block, _, err := api.engine.FinalizeAndAssemble(api.blockchain, header, statedb, txs, []*types.Header{}, receipts) - if err != nil { - return err - } - return api.importBlock(block) -} - -func (api *RetestethAPI) importBlock(block *types.Block) error { - if _, err := api.blockchain.InsertChain([]*types.Block{block}); err != nil { - return err - } - fmt.Printf("Imported block %d, head is %d\n", block.NumberU64(), api.currentNumber()) - return nil -} - -func (api *RetestethAPI) ModifyTimestamp(ctx context.Context, interval uint64) (bool, error) { - api.blockInterval = interval - return true, nil -} - -func (api *RetestethAPI) ImportRawBlock(ctx context.Context, rawBlock hexutil.Bytes) (common.Hash, error) { - block := new(types.Block) - if err := rlp.DecodeBytes(rawBlock, block); err != nil { - return common.Hash{}, err - } - fmt.Printf("Importing block %d with parent hash: %x, genesisHash: %x\n", block.NumberU64(), block.ParentHash(), api.genesisHash) - if err := api.importBlock(block); err != nil { - return common.Hash{}, err - } - return block.Hash(), nil -} - -func (api *RetestethAPI) RewindToBlock(ctx context.Context, newHead uint64) (bool, error) { - if err := api.blockchain.SetHead(newHead); err != nil { - return false, err - } - // When we rewind, the transaction pool should be cleaned out. - api.txMap = make(map[common.Address]map[uint64]*types.Transaction) - api.txSenders = make(map[common.Address]struct{}) - return true, nil -} - -var emptyListHash common.Hash = common.HexToHash("0x1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347") - -func (api *RetestethAPI) GetLogHash(ctx context.Context, txHash common.Hash) (common.Hash, error) { - receipt, _, _, _ := rawdb.ReadReceipt(api.ethDb, txHash, api.chainConfig) - if receipt == nil { - return emptyListHash, nil - } else { - if logListRlp, err := rlp.EncodeToBytes(receipt.Logs); err != nil { - return common.Hash{}, err - } else { - return common.BytesToHash(crypto.Keccak256(logListRlp)), nil - } - } -} - -func (api *RetestethAPI) BlockNumber(ctx context.Context) (uint64, error) { - return api.currentNumber(), nil -} - -func (api *RetestethAPI) GetBlockByNumber(ctx context.Context, blockNr math.HexOrDecimal64, fullTx bool) (map[string]interface{}, error) { - block := api.blockchain.GetBlockByNumber(uint64(blockNr)) - if block != nil { - response, err := RPCMarshalBlock(block, true, fullTx) - if err != nil { - return nil, err - } - response["author"] = response["miner"] - response["totalDifficulty"] = (*hexutil.Big)(api.blockchain.GetTd(block.Hash(), uint64(blockNr))) - return response, err - } - return nil, fmt.Errorf("block %d not found", blockNr) -} - -func (api *RetestethAPI) GetBlockByHash(ctx context.Context, blockHash common.Hash, fullTx bool) (map[string]interface{}, error) { - block := api.blockchain.GetBlockByHash(blockHash) - if block != nil { - response, err := RPCMarshalBlock(block, true, fullTx) - if err != nil { - return nil, err - } - response["author"] = response["miner"] - response["totalDifficulty"] = (*hexutil.Big)(api.blockchain.GetTd(block.Hash(), block.Number().Uint64())) - return response, err - } - return nil, fmt.Errorf("block 0x%x not found", blockHash) -} - -func (api *RetestethAPI) AccountRange(ctx context.Context, - blockHashOrNumber *math.HexOrDecimal256, txIndex uint64, - addressHash *math.HexOrDecimal256, maxResults uint64, -) (AccountRangeResult, error) { - var ( - header *types.Header - block *types.Block - ) - if (*big.Int)(blockHashOrNumber).Cmp(big.NewInt(math.MaxInt64)) > 0 { - blockHash := common.BigToHash((*big.Int)(blockHashOrNumber)) - header = api.blockchain.GetHeaderByHash(blockHash) - block = api.blockchain.GetBlockByHash(blockHash) - //fmt.Printf("Account range: %x, txIndex %d, start: %x, maxResults: %d\n", blockHash, txIndex, common.BigToHash((*big.Int)(addressHash)), maxResults) - } else { - blockNumber := (*big.Int)(blockHashOrNumber).Uint64() - header = api.blockchain.GetHeaderByNumber(blockNumber) - block = api.blockchain.GetBlockByNumber(blockNumber) - //fmt.Printf("Account range: %d, txIndex %d, start: %x, maxResults: %d\n", blockNumber, txIndex, common.BigToHash((*big.Int)(addressHash)), maxResults) - } - parentHeader := api.blockchain.GetHeaderByHash(header.ParentHash) - var root common.Hash - var statedb *state.StateDB - var err error - if parentHeader == nil || int(txIndex) >= len(block.Transactions()) { - root = header.Root - statedb, err = api.blockchain.StateAt(root) - if err != nil { - return AccountRangeResult{}, err - } - } else { - root = parentHeader.Root - statedb, err = api.blockchain.StateAt(root) - if err != nil { - return AccountRangeResult{}, err - } - // Recompute transactions up to the target index. - signer := types.MakeSigner(api.blockchain.Config(), block.Number()) - for idx, tx := range block.Transactions() { - // Assemble the transaction call message and return if the requested offset - msg, _ := tx.AsMessage(signer) - context := core.NewEVMContext(msg, block.Header(), api.blockchain, nil) - // Not yet the searched for transaction, execute on top of the current state - vmenv := vm.NewEVM(context, statedb, api.blockchain.Config(), vm.Config{}) - if _, err := core.ApplyMessage(vmenv, msg, new(core.GasPool).AddGas(tx.Gas())); err != nil { - return AccountRangeResult{}, fmt.Errorf("transaction %#x failed: %v", tx.Hash(), err) - } - // Ensure any modifications are committed to the state - // Only delete empty objects if EIP158/161 (a.k.a Spurious Dragon) is in effect - root = statedb.IntermediateRoot(vmenv.ChainConfig().IsEIP158(block.Number())) - if idx == int(txIndex) { - // This is to make sure root can be opened by OpenTrie - root, err = statedb.Commit(api.chainConfig.IsEIP158(block.Number())) - if err != nil { - return AccountRangeResult{}, err - } - break - } - } - } - accountTrie, err := statedb.Database().OpenTrie(root) - if err != nil { - return AccountRangeResult{}, err - } - it := trie.NewIterator(accountTrie.NodeIterator(common.BigToHash((*big.Int)(addressHash)).Bytes())) - result := AccountRangeResult{AddressMap: make(map[common.Hash]common.Address)} - for i := 0; i < int(maxResults) && it.Next(); i++ { - if preimage := accountTrie.GetKey(it.Key); preimage != nil { - result.AddressMap[common.BytesToHash(it.Key)] = common.BytesToAddress(preimage) - } - } - //fmt.Printf("Number of entries returned: %d\n", len(result.AddressMap)) - // Add the 'next key' so clients can continue downloading. - if it.Next() { - next := common.BytesToHash(it.Key) - result.NextKey = next - } - return result, nil -} - -func (api *RetestethAPI) GetBalance(ctx context.Context, address common.Address, blockNr math.HexOrDecimal64) (*math.HexOrDecimal256, error) { - //fmt.Printf("GetBalance %x, block %d\n", address, blockNr) - header := api.blockchain.GetHeaderByNumber(uint64(blockNr)) - statedb, err := api.blockchain.StateAt(header.Root) - if err != nil { - return nil, err - } - return (*math.HexOrDecimal256)(statedb.GetBalance(address)), nil -} - -func (api *RetestethAPI) GetCode(ctx context.Context, address common.Address, blockNr math.HexOrDecimal64) (hexutil.Bytes, error) { - header := api.blockchain.GetHeaderByNumber(uint64(blockNr)) - statedb, err := api.blockchain.StateAt(header.Root) - if err != nil { - return nil, err - } - return statedb.GetCode(address), nil -} - -func (api *RetestethAPI) GetTransactionCount(ctx context.Context, address common.Address, blockNr math.HexOrDecimal64) (uint64, error) { - header := api.blockchain.GetHeaderByNumber(uint64(blockNr)) - statedb, err := api.blockchain.StateAt(header.Root) - if err != nil { - return 0, err - } - return statedb.GetNonce(address), nil -} - -func (api *RetestethAPI) StorageRangeAt(ctx context.Context, - blockHashOrNumber *math.HexOrDecimal256, txIndex uint64, - address common.Address, - begin *math.HexOrDecimal256, maxResults uint64, -) (StorageRangeResult, error) { - var ( - header *types.Header - block *types.Block - ) - if (*big.Int)(blockHashOrNumber).Cmp(big.NewInt(math.MaxInt64)) > 0 { - blockHash := common.BigToHash((*big.Int)(blockHashOrNumber)) - header = api.blockchain.GetHeaderByHash(blockHash) - block = api.blockchain.GetBlockByHash(blockHash) - //fmt.Printf("Storage range: %x, txIndex %d, addr: %x, start: %x, maxResults: %d\n", - // blockHash, txIndex, address, common.BigToHash((*big.Int)(begin)), maxResults) - } else { - blockNumber := (*big.Int)(blockHashOrNumber).Uint64() - header = api.blockchain.GetHeaderByNumber(blockNumber) - block = api.blockchain.GetBlockByNumber(blockNumber) - //fmt.Printf("Storage range: %d, txIndex %d, addr: %x, start: %x, maxResults: %d\n", - // blockNumber, txIndex, address, common.BigToHash((*big.Int)(begin)), maxResults) - } - parentHeader := api.blockchain.GetHeaderByHash(header.ParentHash) - var root common.Hash - var statedb *state.StateDB - var err error - if parentHeader == nil || int(txIndex) >= len(block.Transactions()) { - root = header.Root - statedb, err = api.blockchain.StateAt(root) - if err != nil { - return StorageRangeResult{}, err - } - } else { - root = parentHeader.Root - statedb, err = api.blockchain.StateAt(root) - if err != nil { - return StorageRangeResult{}, err - } - // Recompute transactions up to the target index. - signer := types.MakeSigner(api.blockchain.Config(), block.Number()) - for idx, tx := range block.Transactions() { - // Assemble the transaction call message and return if the requested offset - msg, _ := tx.AsMessage(signer) - context := core.NewEVMContext(msg, block.Header(), api.blockchain, nil) - // Not yet the searched for transaction, execute on top of the current state - vmenv := vm.NewEVM(context, statedb, api.blockchain.Config(), vm.Config{}) - if _, err := core.ApplyMessage(vmenv, msg, new(core.GasPool).AddGas(tx.Gas())); err != nil { - return StorageRangeResult{}, fmt.Errorf("transaction %#x failed: %v", tx.Hash(), err) - } - // Ensure any modifications are committed to the state - // Only delete empty objects if EIP158/161 (a.k.a Spurious Dragon) is in effect - _ = statedb.IntermediateRoot(vmenv.ChainConfig().IsEIP158(block.Number())) - if idx == int(txIndex) { - // This is to make sure root can be opened by OpenTrie - _, err = statedb.Commit(vmenv.ChainConfig().IsEIP158(block.Number())) - if err != nil { - return StorageRangeResult{}, err - } - } - } - } - storageTrie := statedb.StorageTrie(address) - it := trie.NewIterator(storageTrie.NodeIterator(common.BigToHash((*big.Int)(begin)).Bytes())) - result := StorageRangeResult{Storage: make(map[common.Hash]SRItem)} - for i := 0; /*i < int(maxResults) && */ it.Next(); i++ { - if preimage := storageTrie.GetKey(it.Key); preimage != nil { - key := (*math.HexOrDecimal256)(big.NewInt(0).SetBytes(preimage)) - v, _, err := rlp.SplitString(it.Value) - if err != nil { - return StorageRangeResult{}, err - } - value := (*math.HexOrDecimal256)(big.NewInt(0).SetBytes(v)) - ks, _ := key.MarshalText() - vs, _ := value.MarshalText() - if len(ks)%2 != 0 { - ks = append(append(append([]byte{}, ks[:2]...), byte('0')), ks[2:]...) - } - if len(vs)%2 != 0 { - vs = append(append(append([]byte{}, vs[:2]...), byte('0')), vs[2:]...) - } - result.Storage[common.BytesToHash(it.Key)] = SRItem{ - Key: string(ks), - Value: string(vs), - } - } - } - if it.Next() { - result.Complete = false - } else { - result.Complete = true - } - return result, nil -} - -func (api *RetestethAPI) ClientVersion(ctx context.Context) (string, error) { - return "Geth-" + params.VersionWithCommit(gitCommit, gitDate), nil -} - -func retesteth(ctx *cli.Context) error { - log.Info("Welcome to retesteth!") - // register signer API with server - var ( - extapiURL string - ) - apiImpl := &RetestethAPI{} - var testApi RetestethTestAPI = apiImpl - var ethApi RetestethEthAPI = apiImpl - var debugApi RetestethDebugAPI = apiImpl - var web3Api RetestWeb3API = apiImpl - rpcAPI := []rpc.API{ - { - Namespace: "test", - Public: true, - Service: testApi, - Version: "1.0", - }, - { - Namespace: "eth", - Public: true, - Service: ethApi, - Version: "1.0", - }, - { - Namespace: "debug", - Public: true, - Service: debugApi, - Version: "1.0", - }, - { - Namespace: "web3", - Public: true, - Service: web3Api, - Version: "1.0", - }, - } - vhosts := utils.SplitAndTrim(ctx.GlobalString(utils.HTTPVirtualHostsFlag.Name)) - cors := utils.SplitAndTrim(ctx.GlobalString(utils.HTTPCORSDomainFlag.Name)) - - // register apis and create handler stack - srv := rpc.NewServer() - err := node.RegisterApisFromWhitelist(rpcAPI, []string{"test", "eth", "debug", "web3"}, srv, false) - if err != nil { - utils.Fatalf("Could not register RPC apis: %w", err) - } - handler := node.NewHTTPHandlerStack(srv, cors, vhosts) - - // start http server - var RetestethHTTPTimeouts = rpc.HTTPTimeouts{ - ReadTimeout: 120 * time.Second, - WriteTimeout: 120 * time.Second, - IdleTimeout: 120 * time.Second, - } - httpEndpoint := fmt.Sprintf("%s:%d", ctx.GlobalString(utils.HTTPListenAddrFlag.Name), ctx.Int(rpcPortFlag.Name)) - httpServer, _, err := node.StartHTTPEndpoint(httpEndpoint, RetestethHTTPTimeouts, handler) - if err != nil { - utils.Fatalf("Could not start RPC api: %v", err) - } - extapiURL = fmt.Sprintf("http://%s", httpEndpoint) - log.Info("HTTP endpoint opened", "url", extapiURL) - - defer func() { - // Don't bother imposing a timeout here. - httpServer.Shutdown(context.Background()) - log.Info("HTTP endpoint closed", "url", httpEndpoint) - }() - - abortChan := make(chan os.Signal, 11) - signal.Notify(abortChan, os.Interrupt) - - sig := <-abortChan - log.Info("Exiting...", "signal", sig) - return nil -} diff --git a/cmd/geth/retesteth_copypaste.go b/cmd/geth/retesteth_copypaste.go deleted file mode 100644 index e2795af7f..000000000 --- a/cmd/geth/retesteth_copypaste.go +++ /dev/null @@ -1,148 +0,0 @@ -// Copyright 2019 The go-ethereum Authors -// This file is part of go-ethereum. -// -// go-ethereum is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// go-ethereum is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. -// -// You should have received a copy of the GNU General Public License -// along with go-ethereum. If not, see . - -package main - -import ( - "math/big" - - "github.com/ethereum/go-ethereum/common" - "github.com/ethereum/go-ethereum/common/hexutil" - "github.com/ethereum/go-ethereum/core/types" -) - -// RPCTransaction represents a transaction that will serialize to the RPC representation of a transaction -type RPCTransaction struct { - BlockHash common.Hash `json:"blockHash"` - BlockNumber *hexutil.Big `json:"blockNumber"` - From common.Address `json:"from"` - Gas hexutil.Uint64 `json:"gas"` - GasPrice *hexutil.Big `json:"gasPrice"` - Hash common.Hash `json:"hash"` - Input hexutil.Bytes `json:"input"` - Nonce hexutil.Uint64 `json:"nonce"` - To *common.Address `json:"to"` - TransactionIndex hexutil.Uint `json:"transactionIndex"` - Value *hexutil.Big `json:"value"` - V *hexutil.Big `json:"v"` - R *hexutil.Big `json:"r"` - S *hexutil.Big `json:"s"` -} - -// newRPCTransaction returns a transaction that will serialize to the RPC -// representation, with the given location metadata set (if available). -func newRPCTransaction(tx *types.Transaction, blockHash common.Hash, blockNumber uint64, index uint64) *RPCTransaction { - var signer types.Signer = types.FrontierSigner{} - if tx.Protected() { - signer = types.NewEIP155Signer(tx.ChainId()) - } - from, _ := types.Sender(signer, tx) - v, r, s := tx.RawSignatureValues() - - result := &RPCTransaction{ - From: from, - Gas: hexutil.Uint64(tx.Gas()), - GasPrice: (*hexutil.Big)(tx.GasPrice()), - Hash: tx.Hash(), - Input: hexutil.Bytes(tx.Data()), - Nonce: hexutil.Uint64(tx.Nonce()), - To: tx.To(), - Value: (*hexutil.Big)(tx.Value()), - V: (*hexutil.Big)(v), - R: (*hexutil.Big)(r), - S: (*hexutil.Big)(s), - } - if blockHash != (common.Hash{}) { - result.BlockHash = blockHash - result.BlockNumber = (*hexutil.Big)(new(big.Int).SetUint64(blockNumber)) - result.TransactionIndex = hexutil.Uint(index) - } - return result -} - -// newRPCTransactionFromBlockIndex returns a transaction that will serialize to the RPC representation. -func newRPCTransactionFromBlockIndex(b *types.Block, index uint64) *RPCTransaction { - txs := b.Transactions() - if index >= uint64(len(txs)) { - return nil - } - return newRPCTransaction(txs[index], b.Hash(), b.NumberU64(), index) -} - -// newRPCTransactionFromBlockHash returns a transaction that will serialize to the RPC representation. -func newRPCTransactionFromBlockHash(b *types.Block, hash common.Hash) *RPCTransaction { - for idx, tx := range b.Transactions() { - if tx.Hash() == hash { - return newRPCTransactionFromBlockIndex(b, uint64(idx)) - } - } - return nil -} - -// RPCMarshalBlock converts the given block to the RPC output which depends on fullTx. If inclTx is true transactions are -// returned. When fullTx is true the returned block contains full transaction details, otherwise it will only contain -// transaction hashes. -func RPCMarshalBlock(b *types.Block, inclTx bool, fullTx bool) (map[string]interface{}, error) { - head := b.Header() // copies the header once - fields := map[string]interface{}{ - "number": (*hexutil.Big)(head.Number), - "hash": b.Hash(), - "parentHash": head.ParentHash, - "nonce": head.Nonce, - "mixHash": head.MixDigest, - "sha3Uncles": head.UncleHash, - "logsBloom": head.Bloom, - "stateRoot": head.Root, - "miner": head.Coinbase, - "difficulty": (*hexutil.Big)(head.Difficulty), - "extraData": hexutil.Bytes(head.Extra), - "size": hexutil.Uint64(b.Size()), - "gasLimit": hexutil.Uint64(head.GasLimit), - "gasUsed": hexutil.Uint64(head.GasUsed), - "timestamp": hexutil.Uint64(head.Time), - "transactionsRoot": head.TxHash, - "receiptsRoot": head.ReceiptHash, - } - - if inclTx { - formatTx := func(tx *types.Transaction) (interface{}, error) { - return tx.Hash(), nil - } - if fullTx { - formatTx = func(tx *types.Transaction) (interface{}, error) { - return newRPCTransactionFromBlockHash(b, tx.Hash()), nil - } - } - txs := b.Transactions() - transactions := make([]interface{}, len(txs)) - var err error - for i, tx := range txs { - if transactions[i], err = formatTx(tx); err != nil { - return nil, err - } - } - fields["transactions"] = transactions - } - - uncles := b.Uncles() - uncleHashes := make([]common.Hash, len(uncles)) - for i, uncle := range uncles { - uncleHashes[i] = uncle.Hash() - } - fields["uncles"] = uncleHashes - - return fields, nil -} diff --git a/cmd/geth/run_test.go b/cmd/geth/run_test.go index f7b735b84..527c38a65 100644 --- a/cmd/geth/run_test.go +++ b/cmd/geth/run_test.go @@ -70,12 +70,12 @@ func runGeth(t *testing.T, args ...string) *testgeth { tt := &testgeth{} tt.TestCmd = cmdtest.NewTestCmd(t, tt) for i, arg := range args { - switch { - case arg == "-datadir" || arg == "--datadir": + switch arg { + case "--datadir": if i < len(args)-1 { tt.Datadir = args[i+1] } - case arg == "-etherbase" || arg == "--etherbase": + case "--miner.etherbase": if i < len(args)-1 { tt.Etherbase = args[i+1] } @@ -84,7 +84,7 @@ func runGeth(t *testing.T, args ...string) *testgeth { if tt.Datadir == "" { tt.Datadir = tmpdir(t) tt.Cleanup = func() { os.RemoveAll(tt.Datadir) } - args = append([]string{"-datadir", tt.Datadir}, args...) + args = append([]string{"--datadir", tt.Datadir}, args...) // Remove the temporary datadir if something fails below. defer func() { if t.Failed() { diff --git a/cmd/geth/snapshot.go b/cmd/geth/snapshot.go new file mode 100644 index 000000000..5e72ff448 --- /dev/null +++ b/cmd/geth/snapshot.go @@ -0,0 +1,519 @@ +// Copyright 2020 The go-ethereum Authors +// This file is part of go-ethereum. +// +// go-ethereum is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// go-ethereum is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. +// +// You should have received a copy of the GNU General Public License +// along with go-ethereum. If not, see . + +package main + +import ( + "bytes" + "encoding/json" + "errors" + "os" + "time" + + "github.com/ethereum/go-ethereum/cmd/utils" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core/rawdb" + "github.com/ethereum/go-ethereum/core/state" + "github.com/ethereum/go-ethereum/core/state/pruner" + "github.com/ethereum/go-ethereum/core/state/snapshot" + "github.com/ethereum/go-ethereum/crypto" + "github.com/ethereum/go-ethereum/log" + "github.com/ethereum/go-ethereum/rlp" + "github.com/ethereum/go-ethereum/trie" + cli "gopkg.in/urfave/cli.v1" +) + +var ( + // emptyRoot is the known root hash of an empty trie. + emptyRoot = common.HexToHash("56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421") + + // emptyCode is the known hash of the empty EVM bytecode. + emptyCode = crypto.Keccak256(nil) +) + +var ( + snapshotCommand = cli.Command{ + Name: "snapshot", + Usage: "A set of commands based on the snapshot", + Category: "MISCELLANEOUS COMMANDS", + Description: "", + Subcommands: []cli.Command{ + { + Name: "prune-state", + Usage: "Prune stale ethereum state data based on the snapshot", + ArgsUsage: "", + Action: utils.MigrateFlags(pruneState), + Category: "MISCELLANEOUS COMMANDS", + Flags: []cli.Flag{ + utils.DataDirFlag, + utils.AncientFlag, + utils.TestnetFlag, + utils.CacheTrieJournalFlag, + utils.BloomFilterSizeFlag, + }, + Description: ` +geth snapshot prune-state +will prune historical state data with the help of the state snapshot. +All trie nodes and contract codes that do not belong to the specified +version state will be deleted from the database. After pruning, only +two version states are available: genesis and the specific one. + +The default pruning target is the HEAD-127 state. + +WARNING: It's necessary to delete the trie clean cache after the pruning. +If you specify another directory for the trie clean cache via "--cache.trie.journal" +during the use of Geth, please also specify it here for correct deletion. Otherwise +the trie clean cache with default directory will be deleted. +`, + }, + { + Name: "verify-state", + Usage: "Recalculate state hash based on the snapshot for verification", + ArgsUsage: "", + Action: utils.MigrateFlags(verifyState), + Category: "MISCELLANEOUS COMMANDS", + Flags: []cli.Flag{ + utils.DataDirFlag, + utils.AncientFlag, + utils.TestnetFlag, + }, + Description: ` +geth snapshot verify-state +will traverse the whole accounts and storages set based on the specified +snapshot and recalculate the root hash of state for verification. +In other words, this command does the snapshot to trie conversion. +`, + }, + { + Name: "traverse-state", + Usage: "Traverse the state with given root hash for verification", + ArgsUsage: "", + Action: utils.MigrateFlags(traverseState), + Category: "MISCELLANEOUS COMMANDS", + Flags: []cli.Flag{ + utils.DataDirFlag, + utils.AncientFlag, + utils.TestnetFlag, + }, + Description: ` +geth snapshot traverse-state +will traverse the whole state from the given state root and will abort if any +referenced trie node or contract code is missing. This command can be used for +state integrity verification. The default checking target is the HEAD state. + +It's also usable without snapshot enabled. +`, + }, + { + Name: "traverse-rawstate", + Usage: "Traverse the state with given root hash for verification", + ArgsUsage: "", + Action: utils.MigrateFlags(traverseRawState), + Category: "MISCELLANEOUS COMMANDS", + Flags: []cli.Flag{ + utils.DataDirFlag, + utils.AncientFlag, + utils.TestnetFlag, + }, + Description: ` +geth snapshot traverse-rawstate +will traverse the whole state from the given root and will abort if any referenced +trie node or contract code is missing. This command can be used for state integrity +verification. The default checking target is the HEAD state. It's basically identical +to traverse-state, but the check granularity is smaller. + +It's also usable without snapshot enabled. +`, + }, + { + Name: "dump", + Usage: "Dump a specific block from storage (same as 'geth dump' but using snapshots)", + ArgsUsage: "[? | ]", + Action: utils.MigrateFlags(dumpState), + Category: "MISCELLANEOUS COMMANDS", + Flags: []cli.Flag{ + utils.DataDirFlag, + utils.AncientFlag, + utils.TestnetFlag, + utils.ExcludeCodeFlag, + utils.ExcludeStorageFlag, + utils.StartKeyFlag, + utils.DumpLimitFlag, + }, + Description: ` +This command is semantically equivalent to 'geth dump', but uses the snapshots +as the backend data source, making this command a lot faster. + +The argument is interpreted as block number or hash. If none is provided, the latest +block is used. +`, + }, + }, + } +) + +func pruneState(ctx *cli.Context) error { + stack, config := makeConfigNode(ctx) + defer stack.Close() + + chaindb := utils.MakeChainDatabase(ctx, stack, false) + pruner, err := pruner.NewPruner(chaindb, stack.ResolvePath(""), stack.ResolvePath(config.Eth.TrieCleanCacheJournal), ctx.GlobalUint64(utils.BloomFilterSizeFlag.Name)) + if err != nil { + log.Error("Failed to open snapshot tree", "err", err) + return err + } + if ctx.NArg() > 1 { + log.Error("Too many arguments given") + return errors.New("too many arguments") + } + var targetRoot common.Hash + if ctx.NArg() == 1 { + targetRoot, err = parseRoot(ctx.Args()[0]) + if err != nil { + log.Error("Failed to resolve state root", "err", err) + return err + } + } + if err = pruner.Prune(targetRoot); err != nil { + log.Error("Failed to prune state", "err", err) + return err + } + return nil +} + +func verifyState(ctx *cli.Context) error { + stack, _ := makeConfigNode(ctx) + defer stack.Close() + + chaindb := utils.MakeChainDatabase(ctx, stack, true) + headBlock := rawdb.ReadHeadBlock(chaindb) + if headBlock == nil { + log.Error("Failed to load head block") + return errors.New("no head block") + } + snaptree, err := snapshot.New(chaindb, trie.NewDatabase(chaindb), 256, headBlock.Root(), false, false, false) + if err != nil { + log.Error("Failed to open snapshot tree", "err", err) + return err + } + if ctx.NArg() > 1 { + log.Error("Too many arguments given") + return errors.New("too many arguments") + } + var root = headBlock.Root() + if ctx.NArg() == 1 { + root, err = parseRoot(ctx.Args()[0]) + if err != nil { + log.Error("Failed to resolve state root", "err", err) + return err + } + } + if err := snaptree.Verify(root); err != nil { + log.Error("Failed to verfiy state", "root", root, "err", err) + return err + } + log.Info("Verified the state", "root", root) + return nil +} + +// traverseState is a helper function used for pruning verification. +// Basically it just iterates the trie, ensure all nodes and associated +// contract codes are present. +func traverseState(ctx *cli.Context) error { + stack, _ := makeConfigNode(ctx) + defer stack.Close() + + chaindb := utils.MakeChainDatabase(ctx, stack, true) + headBlock := rawdb.ReadHeadBlock(chaindb) + if headBlock == nil { + log.Error("Failed to load head block") + return errors.New("no head block") + } + if ctx.NArg() > 1 { + log.Error("Too many arguments given") + return errors.New("too many arguments") + } + var ( + root common.Hash + err error + ) + if ctx.NArg() == 1 { + root, err = parseRoot(ctx.Args()[0]) + if err != nil { + log.Error("Failed to resolve state root", "err", err) + return err + } + log.Info("Start traversing the state", "root", root) + } else { + root = headBlock.Root() + log.Info("Start traversing the state", "root", root, "number", headBlock.NumberU64()) + } + triedb := trie.NewDatabase(chaindb) + t, err := trie.NewSecure(root, triedb) + if err != nil { + log.Error("Failed to open trie", "root", root, "err", err) + return err + } + var ( + accounts int + slots int + codes int + lastReport time.Time + start = time.Now() + ) + accIter := trie.NewIterator(t.NodeIterator(nil)) + for accIter.Next() { + accounts += 1 + var acc state.Account + if err := rlp.DecodeBytes(accIter.Value, &acc); err != nil { + log.Error("Invalid account encountered during traversal", "err", err) + return err + } + if acc.Root != emptyRoot { + storageTrie, err := trie.NewSecure(acc.Root, triedb) + if err != nil { + log.Error("Failed to open storage trie", "root", acc.Root, "err", err) + return err + } + storageIter := trie.NewIterator(storageTrie.NodeIterator(nil)) + for storageIter.Next() { + slots += 1 + } + if storageIter.Err != nil { + log.Error("Failed to traverse storage trie", "root", acc.Root, "err", storageIter.Err) + return storageIter.Err + } + } + if !bytes.Equal(acc.CodeHash, emptyCode) { + code := rawdb.ReadCode(chaindb, common.BytesToHash(acc.CodeHash)) + if len(code) == 0 { + log.Error("Code is missing", "hash", common.BytesToHash(acc.CodeHash)) + return errors.New("missing code") + } + codes += 1 + } + if time.Since(lastReport) > time.Second*8 { + log.Info("Traversing state", "accounts", accounts, "slots", slots, "codes", codes, "elapsed", common.PrettyDuration(time.Since(start))) + lastReport = time.Now() + } + } + if accIter.Err != nil { + log.Error("Failed to traverse state trie", "root", root, "err", accIter.Err) + return accIter.Err + } + log.Info("State is complete", "accounts", accounts, "slots", slots, "codes", codes, "elapsed", common.PrettyDuration(time.Since(start))) + return nil +} + +// traverseRawState is a helper function used for pruning verification. +// Basically it just iterates the trie, ensure all nodes and associated +// contract codes are present. It's basically identical to traverseState +// but it will check each trie node. +func traverseRawState(ctx *cli.Context) error { + stack, _ := makeConfigNode(ctx) + defer stack.Close() + + chaindb := utils.MakeChainDatabase(ctx, stack, true) + headBlock := rawdb.ReadHeadBlock(chaindb) + if headBlock == nil { + log.Error("Failed to load head block") + return errors.New("no head block") + } + if ctx.NArg() > 1 { + log.Error("Too many arguments given") + return errors.New("too many arguments") + } + var ( + root common.Hash + err error + ) + if ctx.NArg() == 1 { + root, err = parseRoot(ctx.Args()[0]) + if err != nil { + log.Error("Failed to resolve state root", "err", err) + return err + } + log.Info("Start traversing the state", "root", root) + } else { + root = headBlock.Root() + log.Info("Start traversing the state", "root", root, "number", headBlock.NumberU64()) + } + triedb := trie.NewDatabase(chaindb) + t, err := trie.NewSecure(root, triedb) + if err != nil { + log.Error("Failed to open trie", "root", root, "err", err) + return err + } + var ( + nodes int + accounts int + slots int + codes int + lastReport time.Time + start = time.Now() + ) + accIter := t.NodeIterator(nil) + for accIter.Next(true) { + nodes += 1 + node := accIter.Hash() + + if node != (common.Hash{}) { + // Check the present for non-empty hash node(embedded node doesn't + // have their own hash). + blob := rawdb.ReadTrieNode(chaindb, node) + if len(blob) == 0 { + log.Error("Missing trie node(account)", "hash", node) + return errors.New("missing account") + } + } + // If it's a leaf node, yes we are touching an account, + // dig into the storage trie further. + if accIter.Leaf() { + accounts += 1 + var acc state.Account + if err := rlp.DecodeBytes(accIter.LeafBlob(), &acc); err != nil { + log.Error("Invalid account encountered during traversal", "err", err) + return errors.New("invalid account") + } + if acc.Root != emptyRoot { + storageTrie, err := trie.NewSecure(acc.Root, triedb) + if err != nil { + log.Error("Failed to open storage trie", "root", acc.Root, "err", err) + return errors.New("missing storage trie") + } + storageIter := storageTrie.NodeIterator(nil) + for storageIter.Next(true) { + nodes += 1 + node := storageIter.Hash() + + // Check the present for non-empty hash node(embedded node doesn't + // have their own hash). + if node != (common.Hash{}) { + blob := rawdb.ReadTrieNode(chaindb, node) + if len(blob) == 0 { + log.Error("Missing trie node(storage)", "hash", node) + return errors.New("missing storage") + } + } + // Bump the counter if it's leaf node. + if storageIter.Leaf() { + slots += 1 + } + } + if storageIter.Error() != nil { + log.Error("Failed to traverse storage trie", "root", acc.Root, "err", storageIter.Error()) + return storageIter.Error() + } + } + if !bytes.Equal(acc.CodeHash, emptyCode) { + code := rawdb.ReadCode(chaindb, common.BytesToHash(acc.CodeHash)) + if len(code) == 0 { + log.Error("Code is missing", "account", common.BytesToHash(accIter.LeafKey())) + return errors.New("missing code") + } + codes += 1 + } + if time.Since(lastReport) > time.Second*8 { + log.Info("Traversing state", "nodes", nodes, "accounts", accounts, "slots", slots, "codes", codes, "elapsed", common.PrettyDuration(time.Since(start))) + lastReport = time.Now() + } + } + } + if accIter.Error() != nil { + log.Error("Failed to traverse state trie", "root", root, "err", accIter.Error()) + return accIter.Error() + } + log.Info("State is complete", "nodes", nodes, "accounts", accounts, "slots", slots, "codes", codes, "elapsed", common.PrettyDuration(time.Since(start))) + return nil +} + +func parseRoot(input string) (common.Hash, error) { + var h common.Hash + if err := h.UnmarshalText([]byte(input)); err != nil { + return h, err + } + return h, nil +} + +func dumpState(ctx *cli.Context) error { + stack, _ := makeConfigNode(ctx) + defer stack.Close() + + conf, db, root, err := parseDumpConfig(ctx, stack) + if err != nil { + return err + } + snaptree, err := snapshot.New(db, trie.NewDatabase(db), 256, root, false, false, false) + if err != nil { + return err + } + accIt, err := snaptree.AccountIterator(root, common.BytesToHash(conf.Start)) + if err != nil { + return err + } + defer accIt.Release() + + log.Info("Snapshot dumping started", "root", root) + var ( + start = time.Now() + logged = time.Now() + accounts uint64 + ) + enc := json.NewEncoder(os.Stdout) + enc.Encode(struct { + Root common.Hash `json:"root"` + }{root}) + for accIt.Next() { + account, err := snapshot.FullAccount(accIt.Account()) + if err != nil { + return err + } + da := &state.DumpAccount{ + Balance: account.Balance.String(), + Nonce: account.Nonce, + Root: account.Root, + CodeHash: account.CodeHash, + SecureKey: accIt.Hash().Bytes(), + } + if !conf.SkipCode && !bytes.Equal(account.CodeHash, emptyCode) { + da.Code = rawdb.ReadCode(db, common.BytesToHash(account.CodeHash)) + } + if !conf.SkipStorage { + da.Storage = make(map[common.Hash]string) + + stIt, err := snaptree.StorageIterator(root, accIt.Hash(), common.Hash{}) + if err != nil { + return err + } + for stIt.Next() { + da.Storage[stIt.Hash()] = common.Bytes2Hex(stIt.Slot()) + } + } + enc.Encode(da) + accounts++ + if time.Since(logged) > 8*time.Second { + log.Info("Snapshot dumping in progress", "at", accIt.Hash(), "accounts", accounts, + "elapsed", common.PrettyDuration(time.Since(start))) + logged = time.Now() + } + if conf.Max > 0 && accounts >= conf.Max { + break + } + } + log.Info("Snapshot dumping complete", "accounts", accounts, + "elapsed", common.PrettyDuration(time.Since(start))) + return nil +} diff --git a/cmd/geth/testdata/vcheck/data.json b/cmd/geth/testdata/vcheck/data.json new file mode 100644 index 000000000..e7ee2bf7e --- /dev/null +++ b/cmd/geth/testdata/vcheck/data.json @@ -0,0 +1,61 @@ +[ + { + "name": "CorruptedDAG", + "uid": "GETH-2020-01", + "summary": "Mining nodes will generate erroneous PoW on epochs > `385`.", + "description": "A mining flaw could cause miners to erroneously calculate PoW, due to an index overflow, if DAG size is exceeding the maximum 32 bit unsigned value.\n\nThis occurred on the ETC chain on 2020-11-06. This is likely to trigger for ETH mainnet around block `11550000`/epoch `385`, slated to occur early January 2021.\n\nThis issue is relevant only for miners, non-mining nodes are unaffected, since non-mining nodes use a smaller verification cache instead of a full DAG.", + "links": [ + "https://github.com/ethereum/go-ethereum/pull/21793", + "https://blog.ethereum.org/2020/11/12/geth_security_release/", + "https://github.com/ethereum/go-ethereum/commit/567d41d9363706b4b13ce0903804e8acf214af49" + ], + "introduced": "v1.6.0", + "fixed": "v1.9.24", + "published": "2020-11-12", + "severity": "Medium", + "check": "Geth\\/v1\\.(6|7|8)\\..*|Geth\\/v1\\.9\\.2(1|2|3)-.*" + }, + { + "name": "GoCrash", + "uid": "GETH-2020-02", + "summary": "A denial-of-service issue can be used to crash Geth nodes during block processing, due to an underlying bug in Go (CVE-2020-28362) versions < `1.15.5`, or `<1.14.12`", + "description": "The DoS issue can be used to crash all Geth nodes during block processing, the effects of which would be that a major part of the Ethereum network went offline.\n\nOutside of Go-Ethereum, the issue is most likely relevant for all forks of Geth (such as TurboGeth or ETC’s core-geth) which is built with versions of Go which contains the vulnerability.", + "links": [ + "https://blog.ethereum.org/2020/11/12/geth_security_release/", + "https://groups.google.com/g/golang-announce/c/NpBGTTmKzpM", + "https://github.com/golang/go/issues/42552" + ], + "fixed": "v1.9.24", + "published": "2020-11-12", + "severity": "Critical", + "check": "Geth.*\\/go1\\.(11(.*)|12(.*)|13(.*)|14|14\\.(\\d|10|11|)|15|15\\.[0-4])$" + }, + { + "name": "ShallowCopy", + "uid": "GETH-2020-03", + "summary": "A consensus flaw in Geth, related to `datacopy` precompile", + "description": "Geth erroneously performed a 'shallow' copy when the precompiled `datacopy` (at `0x00...04`) was invoked. An attacker could deploy a contract that uses the shallow copy to corrupt the contents of the `RETURNDATA`, thus causing a consensus failure.", + "links": [ + "https://blog.ethereum.org/2020/11/12/geth_security_release/" + ], + "introduced": "v1.9.7", + "fixed": "v1.9.17", + "published": "2020-11-12", + "severity": "Critical", + "check": "Geth\\/v1\\.9\\.(7|8|9|10|11|12|13|14|15|16).*$" + }, + { + "name": "GethCrash", + "uid": "GETH-2020-04", + "summary": "A denial-of-service issue can be used to crash Geth nodes during block processing", + "description": "Full details to be disclosed at a later date", + "links": [ + "https://blog.ethereum.org/2020/11/12/geth_security_release/" + ], + "introduced": "v1.9.16", + "fixed": "v1.9.18", + "published": "2020-11-12", + "severity": "Critical", + "check": "Geth\\/v1\\.9.(16|17).*$" + } +] diff --git a/cmd/geth/testdata/vcheck/minisig-sigs/vulnerabilities.json.minisig.1 b/cmd/geth/testdata/vcheck/minisig-sigs/vulnerabilities.json.minisig.1 new file mode 100644 index 000000000..f9066d4fe --- /dev/null +++ b/cmd/geth/testdata/vcheck/minisig-sigs/vulnerabilities.json.minisig.1 @@ -0,0 +1,4 @@ +untrusted comment: signature from minisign secret key +RWQkliYstQBOKFQFQTjmCd6TPw07VZyWFSB3v4+1BM1kv8eHLE5FDy2OkPEqtdaL53xftlrHoJQie0uCcovdlSV8kpyxiLrxEQ0= +trusted comment: timestamp:1605618622 file:vulnerabilities.json +osAPs4QPdDkmiWQxqeMIzYv/b+ZGxJ+19Sbrk1Cpq4t2gHBT+lqFtwL3OCzKWWyjGRTmHfsVGBYpzEdPRQ0/BQ== diff --git a/cmd/geth/testdata/vcheck/minisig-sigs/vulnerabilities.json.minisig.2 b/cmd/geth/testdata/vcheck/minisig-sigs/vulnerabilities.json.minisig.2 new file mode 100644 index 000000000..a89a83d21 --- /dev/null +++ b/cmd/geth/testdata/vcheck/minisig-sigs/vulnerabilities.json.minisig.2 @@ -0,0 +1,4 @@ +untrusted comment: Here's a comment +RWQkliYstQBOKFQFQTjmCd6TPw07VZyWFSB3v4+1BM1kv8eHLE5FDy2OkPEqtdaL53xftlrHoJQie0uCcovdlSV8kpyxiLrxEQ0= +trusted comment: Here's a trusted comment +3CnkIuz9MEDa7uNyGZAbKZhuirwfiqm7E1uQHrd2SiO4Y8+Akw9vs052AyKw0s5nhbYHCZE2IMQdHNjKwxEGAQ== diff --git a/cmd/geth/testdata/vcheck/minisig-sigs/vulnerabilities.json.minisig.3 b/cmd/geth/testdata/vcheck/minisig-sigs/vulnerabilities.json.minisig.3 new file mode 100644 index 000000000..6fd33b19a --- /dev/null +++ b/cmd/geth/testdata/vcheck/minisig-sigs/vulnerabilities.json.minisig.3 @@ -0,0 +1,4 @@ +untrusted comment: One more (untrusted) comment +RWQkliYstQBOKFQFQTjmCd6TPw07VZyWFSB3v4+1BM1kv8eHLE5FDy2OkPEqtdaL53xftlrHoJQie0uCcovdlSV8kpyxiLrxEQ0= +trusted comment: Here's a trusted comment +3CnkIuz9MEDa7uNyGZAbKZhuirwfiqm7E1uQHrd2SiO4Y8+Akw9vs052AyKw0s5nhbYHCZE2IMQdHNjKwxEGAQ== diff --git a/cmd/geth/testdata/vcheck/minisign.pub b/cmd/geth/testdata/vcheck/minisign.pub new file mode 100644 index 000000000..183dce5f6 --- /dev/null +++ b/cmd/geth/testdata/vcheck/minisign.pub @@ -0,0 +1,2 @@ +untrusted comment: minisign public key 284E00B52C269624 +RWQkliYstQBOKOdtClfgC3IypIPX6TAmoEi7beZ4gyR3wsaezvqOMWsp diff --git a/cmd/geth/testdata/vcheck/minisign.sec b/cmd/geth/testdata/vcheck/minisign.sec new file mode 100644 index 000000000..5c50715b2 --- /dev/null +++ b/cmd/geth/testdata/vcheck/minisign.sec @@ -0,0 +1,2 @@ +untrusted comment: minisign encrypted secret key +RWRTY0Iyz8kmPMKrqk6DCtlO9a33akKiaOQG1aLolqDxs52qvPoAAAACAAAAAAAAAEAAAAAArEiggdvyn6+WzTprirLtgiYQoU+ihz/HyGgjhuF+Pz2ddMduyCO+xjCHeq+vgVVW039fbsI8hW6LRGJZLBKV5/jdxCXAVVQE7qTQ6xpEdO0z8Z731/pV1hlspQXG2PNd16NMtwd9dWw= diff --git a/cmd/geth/testdata/vcheck/signify-sigs/data.json.sig b/cmd/geth/testdata/vcheck/signify-sigs/data.json.sig new file mode 100644 index 000000000..3d5fcacf9 --- /dev/null +++ b/cmd/geth/testdata/vcheck/signify-sigs/data.json.sig @@ -0,0 +1,2 @@ +untrusted comment: verify with ./signifykey.pub +RWSKLNhZb0KdAbhRUhW2LQZXdnwttu2SYhM9EuC4mMgOJB85h7/YIPupf8/ldTs4N8e9Y/fhgdY40q5LQpt5IFC62fq0v8U1/w8= diff --git a/cmd/geth/testdata/vcheck/signifykey.pub b/cmd/geth/testdata/vcheck/signifykey.pub new file mode 100644 index 000000000..328f973ab --- /dev/null +++ b/cmd/geth/testdata/vcheck/signifykey.pub @@ -0,0 +1,2 @@ +untrusted comment: signify public key +RWSKLNhZb0KdATtRT7mZC/bybI3t3+Hv/O2i3ye04Dq9fnT9slpZ1a2/ diff --git a/cmd/geth/testdata/vcheck/signifykey.sec b/cmd/geth/testdata/vcheck/signifykey.sec new file mode 100644 index 000000000..3279a2e58 --- /dev/null +++ b/cmd/geth/testdata/vcheck/signifykey.sec @@ -0,0 +1,2 @@ +untrusted comment: signify secret key +RWRCSwAAACpLQDLawSQCtI7eAVIvaiHzjTsTyJsfV5aKLNhZb0KdAWeICXJGa93/bHAcsY6jUh9I8RdEcDWEoGxmaXZC+IdVBPxDpkix9fBRGEUdKWHi3dOfqME0YRzErWI5AVg3cRw= diff --git a/cmd/geth/testdata/vcheck/sigs/vulnerabilities.json.minisig.1 b/cmd/geth/testdata/vcheck/sigs/vulnerabilities.json.minisig.1 new file mode 100644 index 000000000..f9066d4fe --- /dev/null +++ b/cmd/geth/testdata/vcheck/sigs/vulnerabilities.json.minisig.1 @@ -0,0 +1,4 @@ +untrusted comment: signature from minisign secret key +RWQkliYstQBOKFQFQTjmCd6TPw07VZyWFSB3v4+1BM1kv8eHLE5FDy2OkPEqtdaL53xftlrHoJQie0uCcovdlSV8kpyxiLrxEQ0= +trusted comment: timestamp:1605618622 file:vulnerabilities.json +osAPs4QPdDkmiWQxqeMIzYv/b+ZGxJ+19Sbrk1Cpq4t2gHBT+lqFtwL3OCzKWWyjGRTmHfsVGBYpzEdPRQ0/BQ== diff --git a/cmd/geth/testdata/vcheck/sigs/vulnerabilities.json.minisig.2 b/cmd/geth/testdata/vcheck/sigs/vulnerabilities.json.minisig.2 new file mode 100644 index 000000000..a89a83d21 --- /dev/null +++ b/cmd/geth/testdata/vcheck/sigs/vulnerabilities.json.minisig.2 @@ -0,0 +1,4 @@ +untrusted comment: Here's a comment +RWQkliYstQBOKFQFQTjmCd6TPw07VZyWFSB3v4+1BM1kv8eHLE5FDy2OkPEqtdaL53xftlrHoJQie0uCcovdlSV8kpyxiLrxEQ0= +trusted comment: Here's a trusted comment +3CnkIuz9MEDa7uNyGZAbKZhuirwfiqm7E1uQHrd2SiO4Y8+Akw9vs052AyKw0s5nhbYHCZE2IMQdHNjKwxEGAQ== diff --git a/cmd/geth/testdata/vcheck/sigs/vulnerabilities.json.minisig.3 b/cmd/geth/testdata/vcheck/sigs/vulnerabilities.json.minisig.3 new file mode 100644 index 000000000..6fd33b19a --- /dev/null +++ b/cmd/geth/testdata/vcheck/sigs/vulnerabilities.json.minisig.3 @@ -0,0 +1,4 @@ +untrusted comment: One more (untrusted) comment +RWQkliYstQBOKFQFQTjmCd6TPw07VZyWFSB3v4+1BM1kv8eHLE5FDy2OkPEqtdaL53xftlrHoJQie0uCcovdlSV8kpyxiLrxEQ0= +trusted comment: Here's a trusted comment +3CnkIuz9MEDa7uNyGZAbKZhuirwfiqm7E1uQHrd2SiO4Y8+Akw9vs052AyKw0s5nhbYHCZE2IMQdHNjKwxEGAQ== diff --git a/cmd/geth/testdata/vcheck/vulnerabilities.json b/cmd/geth/testdata/vcheck/vulnerabilities.json new file mode 100644 index 000000000..92de0c9cc --- /dev/null +++ b/cmd/geth/testdata/vcheck/vulnerabilities.json @@ -0,0 +1,119 @@ +[ + { + "name": "CorruptedDAG", + "uid": "GETH-2020-01", + "summary": "Mining nodes will generate erroneous PoW on epochs > `385`.", + "description": "A mining flaw could cause miners to erroneously calculate PoW, due to an index overflow, if DAG size is exceeding the maximum 32 bit unsigned value.\n\nThis occurred on the ETC chain on 2020-11-06. This is likely to trigger for ETH mainnet around block `11550000`/epoch `385`, slated to occur early January 2021.\n\nThis issue is relevant only for miners, non-mining nodes are unaffected, since non-mining nodes use a smaller verification cache instead of a full DAG.", + "links": [ + "https://github.com/ethereum/go-ethereum/pull/21793", + "https://blog.ethereum.org/2020/11/12/geth_security_release/", + "https://github.com/ethereum/go-ethereum/commit/567d41d9363706b4b13ce0903804e8acf214af49", + "https://github.com/ethereum/go-ethereum/security/advisories/GHSA-v592-xf75-856p" + ], + "introduced": "v1.6.0", + "fixed": "v1.9.24", + "published": "2020-11-12", + "severity": "Medium", + "CVE": "CVE-2020-26240", + "check": "Geth\\/v1\\.(6|7|8)\\..*|Geth\\/v1\\.9\\.\\d-.*|Geth\\/v1\\.9\\.1.*|Geth\\/v1\\.9\\.2(0|1|2|3)-.*" + }, + { + "name": "Denial of service due to Go CVE-2020-28362", + "uid": "GETH-2020-02", + "summary": "A denial-of-service issue can be used to crash Geth nodes during block processing, due to an underlying bug in Go (CVE-2020-28362) versions < `1.15.5`, or `<1.14.12`", + "description": "The DoS issue can be used to crash all Geth nodes during block processing, the effects of which would be that a major part of the Ethereum network went offline.\n\nOutside of Go-Ethereum, the issue is most likely relevant for all forks of Geth (such as TurboGeth or ETC’s core-geth) which is built with versions of Go which contains the vulnerability.", + "links": [ + "https://blog.ethereum.org/2020/11/12/geth_security_release/", + "https://groups.google.com/g/golang-announce/c/NpBGTTmKzpM", + "https://github.com/golang/go/issues/42552", + "https://github.com/ethereum/go-ethereum/security/advisories/GHSA-m6gx-rhvj-fh52" + ], + "introduced": "v0.0.0", + "fixed": "v1.9.24", + "published": "2020-11-12", + "severity": "Critical", + "CVE": "CVE-2020-28362", + "check": "Geth.*\\/go1\\.(11(.*)|12(.*)|13(.*)|14|14\\.(\\d|10|11|)|15|15\\.[0-4])$" + }, + { + "name": "ShallowCopy", + "uid": "GETH-2020-03", + "summary": "A consensus flaw in Geth, related to `datacopy` precompile", + "description": "Geth erroneously performed a 'shallow' copy when the precompiled `datacopy` (at `0x00...04`) was invoked. An attacker could deploy a contract that uses the shallow copy to corrupt the contents of the `RETURNDATA`, thus causing a consensus failure.", + "links": [ + "https://blog.ethereum.org/2020/11/12/geth_security_release/", + "https://github.com/ethereum/go-ethereum/security/advisories/GHSA-69v6-xc2j-r2jf" + ], + "introduced": "v1.9.7", + "fixed": "v1.9.17", + "published": "2020-11-12", + "severity": "Critical", + "CVE": "CVE-2020-26241", + "check": "Geth\\/v1\\.9\\.(7|8|9|10|11|12|13|14|15|16).*$" + }, + { + "name": "Geth DoS via MULMOD", + "uid": "GETH-2020-04", + "summary": "A denial-of-service issue can be used to crash Geth nodes during block processing", + "description": "Affected versions suffer from a vulnerability which can be exploited through the `MULMOD` operation, by specifying a modulo of `0`: `mulmod(a,b,0)`, causing a `panic` in the underlying library. \nThe crash was in the `uint256` library, where a buffer [underflowed](https://github.com/holiman/uint256/blob/4ce82e695c10ddad57215bdbeafb68b8c5df2c30/uint256.go#L442).\n\n\tif `d == 0`, `dLen` remains `0`\n\nand https://github.com/holiman/uint256/blob/4ce82e695c10ddad57215bdbeafb68b8c5df2c30/uint256.go#L451 will try to access index `[-1]`.\n\nThe `uint256` library was first merged in this [commit](https://github.com/ethereum/go-ethereum/commit/cf6674539c589f80031f3371a71c6a80addbe454), on 2020-06-08. \nExploiting this vulnerabilty would cause all vulnerable nodes to drop off the network. \n\nThe issue was brought to our attention through a [bug report](https://github.com/ethereum/go-ethereum/issues/21367), showing a `panic` occurring on sync from genesis on the Ropsten network.\n \nIt was estimated that the least obvious way to fix this would be to merge the fix into `uint256`, make a new release of that library and then update the geth-dependency.\n", + "links": [ + "https://blog.ethereum.org/2020/11/12/geth_security_release/", + "https://github.com/ethereum/go-ethereum/security/advisories/GHSA-jm5c-rv3w-w83m", + "https://github.com/holiman/uint256/releases/tag/v1.1.1", + "https://github.com/holiman/uint256/pull/80", + "https://github.com/ethereum/go-ethereum/pull/21368" + ], + "introduced": "v1.9.16", + "fixed": "v1.9.18", + "published": "2020-11-12", + "severity": "Critical", + "CVE": "CVE-2020-26242", + "check": "Geth\\/v1\\.9.(16|17).*$" + }, + { + "name": "LES Server DoS via GetProofsV2", + "uid": "GETH-2020-05", + "summary": "A DoS vulnerability can make a LES server crash.", + "description": "A DoS vulnerability can make a LES server crash via malicious GetProofsV2 request from a connected LES client.\n\nThe vulnerability was patched in #21896.\n\nThis vulnerability only concern users explicitly running geth as a light server", + "links": [ + "https://github.com/ethereum/go-ethereum/security/advisories/GHSA-r33q-22hv-j29q", + "https://github.com/ethereum/go-ethereum/pull/21896" + ], + "introduced": "v1.8.0", + "fixed": "v1.9.25", + "published": "2020-12-10", + "severity": "Medium", + "CVE": "CVE-2020-26264", + "check": "(Geth\\/v1\\.8\\.*)|(Geth\\/v1\\.9\\.\\d-.*)|(Geth\\/v1\\.9\\.1\\d-.*)|(Geth\\/v1\\.9\\.(20|21|22|23|24)-.*)$" + }, + { + "name": "SELFDESTRUCT-recreate consensus flaw", + "uid": "GETH-2020-06", + "introduced": "v1.9.4", + "fixed": "v1.9.20", + "summary": "A consensus-vulnerability in Geth could cause a chain split, where vulnerable versions refuse to accept the canonical chain.", + "description": "A flaw was repoted at 2020-08-11 by John Youngseok Yang (Software Platform Lab), where a particular sequence of transactions could cause a consensus failure.\n\n- Tx 1:\n - `sender` invokes `caller`.\n - `caller` invokes `0xaa`. `0xaa` has 3 wei, does a self-destruct-to-self\n - `caller` does a `1 wei` -call to `0xaa`, who thereby has 1 wei (the code in `0xaa` still executed, since the tx is still ongoing, but doesn't redo the selfdestruct, it takes a different path if callvalue is non-zero)\n\n-Tx 2:\n - `sender` does a 5-wei call to 0xaa. No exec (since no code). \n\nIn geth, the result would be that `0xaa` had `6 wei`, whereas OE reported (correctly) `5` wei. Furthermore, in geth, if the second tx was not executed, the `0xaa` would be destructed, resulting in `0 wei`. Thus obviously wrong. \n\nIt was determined that the root cause was this [commit](https://github.com/ethereum/go-ethereum/commit/223b950944f494a5b4e0957fd9f92c48b09037ad) from [this PR](https://github.com/ethereum/go-ethereum/pull/19953). The semantics of `createObject` was subtly changd, into returning a non-nil object (with `deleted=true`) where it previously did not if the account had been destructed. This return value caused the new object to inherit the old `balance`.\n", + "links": [ + "https://github.com/ethereum/go-ethereum/security/advisories/GHSA-xw37-57qp-9mm4" + ], + "published": "2020-12-10", + "severity": "High", + "CVE": "CVE-2020-26265", + "check": "(Geth\\/v1\\.9\\.(4|5|6|7|8|9)-.*)|(Geth\\/v1\\.9\\.1\\d-.*)$" + }, + { + "name": "Not ready for London upgrade", + "uid": "GETH-2021-01", + "summary": "The client is not ready for the 'London' technical upgrade, and will deviate from the canonical chain when the London upgrade occurs (at block '12965000' around August 4, 2021.", + "description": "At (or around) August 4, Ethereum will undergo a technical upgrade called 'London'. Clients not upgraded will fail to progress on the canonical chain.", + "links": [ + "https://github.com/ethereum/eth1.0-specs/blob/master/network-upgrades/mainnet-upgrades/london.md", + "https://notes.ethereum.org/@timbeiko/ropsten-postmortem" + ], + "introduced": "v1.10.1", + "fixed": "v1.10.6", + "published": "2020-12-10", + "severity": "High", + "check": "(Geth\\/v1\\.10\\.(1|2|3|4|5)-.*)$" + } +] diff --git a/cmd/geth/usage.go b/cmd/geth/usage.go index 023103936..b3adeb632 100644 --- a/cmd/geth/usage.go +++ b/cmd/geth/usage.go @@ -25,7 +25,7 @@ import ( "github.com/ethereum/go-ethereum/cmd/utils" "github.com/ethereum/go-ethereum/internal/debug" "github.com/ethereum/go-ethereum/internal/flags" - cli "gopkg.in/urfave/cli.v1" + "gopkg.in/urfave/cli.v1" ) // AppHelpFlagGroups is the application flags, grouped by functionality. @@ -36,10 +36,12 @@ var AppHelpFlagGroups = []flags.FlagGroup{ configFileFlag, utils.DataDirFlag, utils.AncientFlag, + utils.MinFreeDiskSpaceFlag, utils.KeyStoreDirFlag, - utils.NoUSBFlag, + utils.USBFlag, utils.SmartCardDaemonPathFlag, utils.NetworkIdFlag, + utils.MainnetFlag, utils.TestnetFlag, utils.SyncModeFlag, utils.ExitWhenSyncedFlag, @@ -62,6 +64,7 @@ var AppHelpFlagGroups = []flags.FlagGroup{ utils.UltraLightFractionFlag, utils.UltraLightOnlyAnnounceFlag, utils.LightNoPruneFlag, + utils.LightNoSyncServeFlag, }, }, { @@ -111,6 +114,7 @@ var AppHelpFlagGroups = []flags.FlagGroup{ utils.CacheGCFlag, utils.CacheSnapshotFlag, utils.CacheNoPrefetchFlag, + utils.CachePreimagesFlag, }, }, { @@ -131,18 +135,21 @@ var AppHelpFlagGroups = []flags.FlagGroup{ utils.HTTPListenAddrFlag, utils.HTTPPortFlag, utils.HTTPApiFlag, + utils.HTTPPathPrefixFlag, utils.HTTPCORSDomainFlag, utils.HTTPVirtualHostsFlag, utils.WSEnabledFlag, utils.WSListenAddrFlag, utils.WSPortFlag, utils.WSApiFlag, + utils.WSPathPrefixFlag, utils.WSAllowedOriginsFlag, utils.GraphQLEnabledFlag, utils.GraphQLCORSDomainFlag, utils.GraphQLVirtualHostsFlag, utils.RPCGlobalGasCapFlag, utils.RPCGlobalTxFeeCapFlag, + utils.AllowUnprotectedTxs, utils.JSpathFlag, utils.ExecFlag, utils.PreloadJSFlag, @@ -152,8 +159,6 @@ var AppHelpFlagGroups = []flags.FlagGroup{ Name: "NETWORKING", Flags: []cli.Flag{ utils.BootnodesFlag, - utils.LegacyBootnodesV4Flag, - utils.LegacyBootnodesV5Flag, utils.DNSDiscoveryFlag, utils.ListenPortFlag, utils.MaxPeersFlag, @@ -172,8 +177,8 @@ var AppHelpFlagGroups = []flags.FlagGroup{ utils.MiningEnabledFlag, utils.MinerThreadsFlag, utils.MinerNotifyFlag, + utils.MinerNotifyFullFlag, utils.MinerGasPriceFlag, - utils.MinerGasTargetFlag, utils.MinerGasLimitFlag, utils.MinerEtherbaseFlag, utils.MinerExtraDataFlag, @@ -187,14 +192,13 @@ var AppHelpFlagGroups = []flags.FlagGroup{ utils.GpoBlocksFlag, utils.GpoPercentileFlag, utils.GpoMaxGasPriceFlag, + utils.GpoIgnoreGasPriceFlag, }, }, { Name: "VIRTUAL MACHINE", Flags: []cli.Flag{ utils.VMEnableDebugFlag, - utils.EVMInterpreterFlag, - utils.EWASMInterpreterFlag, }, }, { @@ -208,34 +212,26 @@ var AppHelpFlagGroups = []flags.FlagGroup{ Name: "METRICS AND STATS", Flags: metricsFlags, }, - { - Name: "WHISPER (deprecated)", - Flags: whisperFlags, - }, { Name: "ALIASED (deprecated)", - Flags: append([]cli.Flag{ + Flags: []cli.Flag{ + utils.NoUSBFlag, utils.LegacyRPCEnabledFlag, utils.LegacyRPCListenAddrFlag, utils.LegacyRPCPortFlag, utils.LegacyRPCCORSDomainFlag, utils.LegacyRPCVirtualHostsFlag, utils.LegacyRPCApiFlag, - utils.LegacyWSListenAddrFlag, - utils.LegacyWSPortFlag, - utils.LegacyWSAllowedOriginsFlag, - utils.LegacyWSApiFlag, - utils.LegacyGpoBlocksFlag, - utils.LegacyGpoPercentileFlag, - utils.LegacyGraphQLListenAddrFlag, - utils.LegacyGraphQLPortFlag, - }, debug.DeprecatedFlags...), + utils.LegacyMinerGasTargetFlag, + }, }, { Name: "MISC", Flags: []cli.Flag{ utils.SnapshotFlag, + utils.BloomFilterSizeFlag, cli.HelpFlag, + utils.CatalystFlag, }, }, } diff --git a/cmd/geth/version_check.go b/cmd/geth/version_check.go new file mode 100644 index 000000000..2101a69e9 --- /dev/null +++ b/cmd/geth/version_check.go @@ -0,0 +1,169 @@ +// Copyright 2020 The go-ethereum Authors +// This file is part of go-ethereum. +// +// go-ethereum is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// go-ethereum is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. +// +// You should have received a copy of the GNU General Public License +// along with go-ethereum. If not, see . + +package main + +import ( + "encoding/json" + "errors" + "fmt" + "io/ioutil" + "net/http" + "regexp" + "strings" + + "github.com/ethereum/go-ethereum/log" + "github.com/jedisct1/go-minisign" + "gopkg.in/urfave/cli.v1" +) + +var gethPubKeys []string = []string{ + //@holiman, minisign public key FB1D084D39BAEC24 + "RWQk7Lo5TQgd+wxBNZM+Zoy+7UhhMHaWKzqoes9tvSbFLJYZhNTbrIjx", + //minisign public key 138B1CA303E51687 + "RWSHFuUDoxyLEzjszuWZI1xStS66QTyXFFZG18uDfO26CuCsbckX1e9J", + //minisign public key FD9813B2D2098484 + "RWSEhAnSshOY/b+GmaiDkObbCWefsAoavjoLcPjBo1xn71yuOH5I+Lts", +} + +type vulnJson struct { + Name string + Uid string + Summary string + Description string + Links []string + Introduced string + Fixed string + Published string + Severity string + Check string + CVE string +} + +func versionCheck(ctx *cli.Context) error { + url := ctx.String(VersionCheckUrlFlag.Name) + version := ctx.String(VersionCheckVersionFlag.Name) + log.Info("Checking vulnerabilities", "version", version, "url", url) + return checkCurrent(url, version) +} + +func checkCurrent(url, current string) error { + var ( + data []byte + sig []byte + err error + ) + if data, err = fetch(url); err != nil { + return fmt.Errorf("could not retrieve data: %w", err) + } + if sig, err = fetch(fmt.Sprintf("%v.minisig", url)); err != nil { + return fmt.Errorf("could not retrieve signature: %w", err) + } + if err = verifySignature(gethPubKeys, data, sig); err != nil { + return err + } + var vulns []vulnJson + if err = json.Unmarshal(data, &vulns); err != nil { + return err + } + allOk := true + for _, vuln := range vulns { + r, err := regexp.Compile(vuln.Check) + if err != nil { + return err + } + if r.MatchString(current) { + allOk = false + fmt.Printf("## Vulnerable to %v (%v)\n\n", vuln.Uid, vuln.Name) + fmt.Printf("Severity: %v\n", vuln.Severity) + fmt.Printf("Summary : %v\n", vuln.Summary) + fmt.Printf("Fixed in: %v\n", vuln.Fixed) + if len(vuln.CVE) > 0 { + fmt.Printf("CVE: %v\n", vuln.CVE) + } + if len(vuln.Links) > 0 { + fmt.Printf("References:\n") + for _, ref := range vuln.Links { + fmt.Printf("\t- %v\n", ref) + } + } + fmt.Println() + } + } + if allOk { + fmt.Println("No vulnerabilities found") + } + return nil +} + +// fetch makes an HTTP request to the given url and returns the response body +func fetch(url string) ([]byte, error) { + if filep := strings.TrimPrefix(url, "file://"); filep != url { + return ioutil.ReadFile(filep) + } + res, err := http.Get(url) + if err != nil { + return nil, err + } + defer res.Body.Close() + body, err := ioutil.ReadAll(res.Body) + if err != nil { + return nil, err + } + return body, nil +} + +// verifySignature checks that the sigData is a valid signature of the given +// data, for pubkey GethPubkey +func verifySignature(pubkeys []string, data, sigdata []byte) error { + sig, err := minisign.DecodeSignature(string(sigdata)) + if err != nil { + return err + } + // find the used key + var key *minisign.PublicKey + for _, pubkey := range pubkeys { + pub, err := minisign.NewPublicKey(pubkey) + if err != nil { + // our pubkeys should be parseable + return err + } + if pub.KeyId != sig.KeyId { + continue + } + key = &pub + break + } + if key == nil { + log.Info("Signing key not trusted", "keyid", keyID(sig.KeyId), "error", err) + return errors.New("signature could not be verified") + } + if ok, err := key.Verify(data, sig); !ok || err != nil { + log.Info("Verification failed error", "keyid", keyID(key.KeyId), "error", err) + return errors.New("signature could not be verified") + } + return nil +} + +// keyID turns a binary minisign key ID into a hex string. +// Note: key IDs are printed in reverse byte order. +func keyID(id [8]byte) string { + var rev [8]byte + for i := range id { + rev[len(rev)-1-i] = id[i] + } + return fmt.Sprintf("%X", rev) +} diff --git a/cmd/geth/version_check_test.go b/cmd/geth/version_check_test.go new file mode 100644 index 000000000..0f056d196 --- /dev/null +++ b/cmd/geth/version_check_test.go @@ -0,0 +1,130 @@ +// Copyright 2020 The go-ethereum Authors +// This file is part of go-ethereum. +// +// go-ethereum is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// go-ethereum is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. +// +// You should have received a copy of the GNU General Public License +// along with go-ethereum. If not, see . + +package main + +import ( + "encoding/json" + "fmt" + "io/ioutil" + "path/filepath" + "regexp" + "strconv" + "strings" + "testing" +) + +func TestVerification(t *testing.T) { + // Signatures generated with `minisign` + t.Run("minisig", func(t *testing.T) { + // For this test, the pubkey is in testdata/minisign.pub + // (the privkey is `minisign.sec`, if we want to expand this test. Password 'test' ) + pub := "RWQkliYstQBOKOdtClfgC3IypIPX6TAmoEi7beZ4gyR3wsaezvqOMWsp" + testVerification(t, pub, "./testdata/vcheck/minisig-sigs/") + }) + // Signatures generated with `signify-openbsd` + t.Run("signify-openbsd", func(t *testing.T) { + t.Skip("This currently fails, minisign expects 4 lines of data, signify provides only 2") + // For this test, the pubkey is in testdata/signifykey.pub + // (the privkey is `signifykey.sec`, if we want to expand this test. Password 'test' ) + pub := "RWSKLNhZb0KdATtRT7mZC/bybI3t3+Hv/O2i3ye04Dq9fnT9slpZ1a2/" + testVerification(t, pub, "./testdata/vcheck/signify-sigs/") + }) +} + +func testVerification(t *testing.T, pubkey, sigdir string) { + // Data to verify + data, err := ioutil.ReadFile("./testdata/vcheck/data.json") + if err != nil { + t.Fatal(err) + } + // Signatures, with and without comments, both trusted and untrusted + files, err := ioutil.ReadDir(sigdir) + if err != nil { + t.Fatal(err) + } + for _, f := range files { + sig, err := ioutil.ReadFile(filepath.Join(sigdir, f.Name())) + if err != nil { + t.Fatal(err) + } + err = verifySignature([]string{pubkey}, data, sig) + if err != nil { + t.Fatal(err) + } + } +} + +func versionUint(v string) int { + mustInt := func(s string) int { + a, err := strconv.Atoi(s) + if err != nil { + panic(v) + } + return a + } + components := strings.Split(strings.TrimPrefix(v, "v"), ".") + a := mustInt(components[0]) + b := mustInt(components[1]) + c := mustInt(components[2]) + return a*100*100 + b*100 + c +} + +// TestMatching can be used to check that the regexps are correct +func TestMatching(t *testing.T) { + data, _ := ioutil.ReadFile("./testdata/vcheck/vulnerabilities.json") + var vulns []vulnJson + if err := json.Unmarshal(data, &vulns); err != nil { + t.Fatal(err) + } + check := func(version string) { + vFull := fmt.Sprintf("Geth/%v-unstable-15339cf1-20201204/linux-amd64/go1.15.4", version) + for _, vuln := range vulns { + r, err := regexp.Compile(vuln.Check) + vulnIntro := versionUint(vuln.Introduced) + vulnFixed := versionUint(vuln.Fixed) + current := versionUint(version) + if err != nil { + t.Fatal(err) + } + if vuln.Name == "Denial of service due to Go CVE-2020-28362" { + // this one is not tied to geth-versions + continue + } + if vulnIntro <= current && vulnFixed > current { + // Should be vulnerable + if !r.MatchString(vFull) { + t.Errorf("Should be vulnerable, version %v, intro: %v, fixed: %v %v %v", + version, vuln.Introduced, vuln.Fixed, vuln.Name, vuln.Check) + } + } else { + if r.MatchString(vFull) { + t.Errorf("Should not be flagged vulnerable, version %v, intro: %v, fixed: %v %v %d %d %d", + version, vuln.Introduced, vuln.Fixed, vuln.Name, vulnIntro, current, vulnFixed) + } + } + + } + } + for major := 1; major < 2; major++ { + for minor := 0; minor < 30; minor++ { + for patch := 0; patch < 30; patch++ { + vShort := fmt.Sprintf("v%d.%d.%d", major, minor, patch) + check(vShort) + } + } + } +} diff --git a/cmd/puppeth/genesis.go b/cmd/puppeth/genesis.go index b3e1709db..ef1f977bf 100644 --- a/cmd/puppeth/genesis.go +++ b/cmd/puppeth/genesis.go @@ -152,7 +152,7 @@ func newAlethGenesisSpec(network string, genesis *core.Genesis) (*alethGenesisSp spec.Genesis.Author = genesis.Coinbase spec.Genesis.Timestamp = (hexutil.Uint64)(genesis.Timestamp) spec.Genesis.ParentHash = genesis.ParentHash - spec.Genesis.ExtraData = (hexutil.Bytes)(genesis.ExtraData) + spec.Genesis.ExtraData = genesis.ExtraData spec.Genesis.GasLimit = (hexutil.Uint64)(genesis.GasLimit) for address, account := range genesis.Alloc { @@ -425,12 +425,12 @@ func newParityChainSpec(network string, genesis *core.Genesis, bootnodes []strin spec.Params.EIP98Transition = math.MaxInt64 spec.Genesis.Seal.Ethereum.Nonce = types.EncodeNonce(genesis.Nonce) - spec.Genesis.Seal.Ethereum.MixHash = (genesis.Mixhash[:]) + spec.Genesis.Seal.Ethereum.MixHash = genesis.Mixhash[:] spec.Genesis.Difficulty = (*hexutil.Big)(genesis.Difficulty) spec.Genesis.Author = genesis.Coinbase spec.Genesis.Timestamp = (hexutil.Uint64)(genesis.Timestamp) spec.Genesis.ParentHash = genesis.ParentHash - spec.Genesis.ExtraData = (hexutil.Bytes)(genesis.ExtraData) + spec.Genesis.ExtraData = genesis.ExtraData spec.Genesis.GasLimit = (hexutil.Uint64)(genesis.GasLimit) spec.Accounts = make(map[common.UnprefixedAddress]*parityChainSpecAccount) diff --git a/cmd/puppeth/module_dashboard.go b/cmd/puppeth/module_dashboard.go index be8b6ec60..35cfada66 100644 --- a/cmd/puppeth/module_dashboard.go +++ b/cmd/puppeth/module_dashboard.go @@ -80,12 +80,10 @@ var dashboardContent = `